diff --git a/.gitignore b/.gitignore index 116206f2a..a447040b5 100755 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,12 @@ !thirdparty/ +.idea +.vscode/* *.Po *.a -*.doxyfile *.dat* *.doxyfile *.o +*sql.output .cproject .gitignore.swp .history @@ -12,6 +14,10 @@ .settings /test Build +CLAIMS.config +CLAIMS.creator* +CLAIMS.files +CLAIMS.includes Config.cpp Debug.h Default @@ -23,6 +29,7 @@ autom4te.cache/ bin/ build claims.log +claims_log/ claimsserver client compile @@ -38,6 +45,7 @@ config.status config.sub configure depcomp +doc include/ install-sh install/ @@ -45,40 +53,21 @@ libtool log ltmain.sh missing -stamp-h1 -Makefile.in -*.Po -Debug.h -install/ -*.a -*.o -Makefile -config.h -config.h.in -config.log -configure -claims.log -doc -conf -conf/config -build -Build -*.dat* -include/ -.history -compile -thirdparty/ -ar-lib -缺省值 -CLAIMS.config -CLAIMS.creator* -CLAIMS.files -CLAIMS.includes -*sql.output -*.doxyfile sbin/.claimssserver.pid +<<<<<<< HEAD +sbin/2-claims-conf/claims_log/ sbin/2-claims-conf/config-* +sbin/2-claims-conf/test_for_DI_GetTuple +======= +sbin/2-claims-conf/ +>>>>>>> f5005b1ecfdcfb429ed630a0dfd573e1129a592b sbin/claims-test/claims_log/ sbin/claims-test/test_for_DI_GetTuple +sbin/claims-test/testresult/ sbin/claims-test/testresult/*.result +sbin/claims_log/ sbin/logs/ +stamp-h1 +test_for_DI_GetTuple +thirdparty/ +缺省值 diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 000000000..97251711a --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,2483 @@ +cmake_minimum_required(VERSION 3.3) +project(CLAIMS) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + +set(SOURCE_FILES + build/bin/claims_log/claimsserver.ERROR + build/bin/claims_log/claimsserver.INFO + build/bin/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151023-132102.32394 + build/bin/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151012-172423.11861 + build/bin/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151023-132102.32394 + build/bin/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151023-132102.32394 + build/bin/claims_log/claimsserver.WARNING + build/bin/claims_log/test.ERROR + build/bin/claims_log/test.INFO + build/bin/claims_log/test.localhost.localdomain.fish.log.ERROR.20151023-132244.32681 + build/bin/claims_log/test.localhost.localdomain.fish.log.INFO.20151012-172429.12126 + build/bin/claims_log/test.localhost.localdomain.fish.log.INFO.20151023-132244.32681 + build/bin/claims_log/test.localhost.localdomain.fish.log.WARNING.20151023-132244.32681 + build/bin/claims_log/test.WARNING + build/bin/claimsserver + build/bin/client + build/bin/parser + build/bin/test + build/BlockStreamIterator/ParallelBlockStreamIterator/Test/Buffer.o + build/BlockStreamIterator/ParallelBlockStreamIterator/Test/Expanded_iterators_test.o + build/BlockStreamIterator/ParallelBlockStreamIterator/Test/in_iterator_test.o + build/BlockStreamIterator/ParallelBlockStreamIterator/Test/iterator_test.o + build/BlockStreamIterator/ParallelBlockStreamIterator/Test/libtest.a + build/BlockStreamIterator/ParallelBlockStreamIterator/Test/Makefile + build/BlockStreamIterator/ParallelBlockStreamIterator/Test/Project_test.o + build/BlockStreamIterator/ParallelBlockStreamIterator/Test/projectionScan.o + build/BlockStreamIterator/ParallelBlockStreamIterator/Test/Sort_test.o + build/BlockStreamIterator/ParallelBlockStreamIterator/Test/TopN_test.o + build/BlockStreamIterator/ParallelBlockStreamIterator/BlockStreamAggregationIterator.o + build/BlockStreamIterator/ParallelBlockStreamIterator/BlockStreamCombinedIterator.o + build/BlockStreamIterator/ParallelBlockStreamIterator/BlockStreamExpander.o + build/BlockStreamIterator/ParallelBlockStreamIterator/BlockStreamInIterator.o + build/BlockStreamIterator/ParallelBlockStreamIterator/BlockStreamJoinIterator.o + build/BlockStreamIterator/ParallelBlockStreamIterator/BlockStreamNestLoopJoinIterator.o + build/BlockStreamIterator/ParallelBlockStreamIterator/BlockStreamProjectIterator.o + build/BlockStreamIterator/ParallelBlockStreamIterator/BlockStreamSortIterator.o + build/BlockStreamIterator/ParallelBlockStreamIterator/ExpandableBlockStreamBuffer.o + build/BlockStreamIterator/ParallelBlockStreamIterator/ExpandableBlockStreamExchangeEpoll.o + build/BlockStreamIterator/ParallelBlockStreamIterator/ExpandableBlockStreamExchangeLowerEfficient.o + build/BlockStreamIterator/ParallelBlockStreamIterator/ExpandableBlockStreamExchangeLowerMaterialized.o + build/BlockStreamIterator/ParallelBlockStreamIterator/ExpandableBlockStreamFilter.o + build/BlockStreamIterator/ParallelBlockStreamIterator/ExpandableBlockStreamHdfsScan.o + build/BlockStreamIterator/ParallelBlockStreamIterator/ExpandableBlockStreamProjectionScan.o + build/BlockStreamIterator/ParallelBlockStreamIterator/ExpandableBlockStreamRandomDiskAccess.o + build/BlockStreamIterator/ParallelBlockStreamIterator/ExpandableBlockStreamRandomMemAccess.o + build/BlockStreamIterator/ParallelBlockStreamIterator/ExpandableBlockStreamSingleColumnScan.o + build/BlockStreamIterator/ParallelBlockStreamIterator/ExpandableBlockStreamSingleColumnScanDisk.o + build/BlockStreamIterator/ParallelBlockStreamIterator/libparallelblockstreamiterator.a + build/BlockStreamIterator/ParallelBlockStreamIterator/Makefile + build/BlockStreamIterator/Serialization/libserialization.a + build/BlockStreamIterator/Serialization/Makefile + build/BlockStreamIterator/Serialization/RegisterDerivedClass.o + build/BlockStreamIterator/BlockStreamExchangeBase.o + build/BlockStreamIterator/BlockStreamExchangeLowerBase.o + build/BlockStreamIterator/BlockStreamFilter.o + build/BlockStreamIterator/BlockStreamIteratorBase.o + build/BlockStreamIterator/BlockStreamLimit.o + build/BlockStreamIterator/BlockStreamPerformanceMonitorTop.o + build/BlockStreamIterator/BlockStreamPerformanceTest.o + build/BlockStreamIterator/BlockStreamPrint.o + build/BlockStreamIterator/BlockStreamRandomMemAccess.o + build/BlockStreamIterator/BlockStreamResultCollector.o + build/BlockStreamIterator/BlockStreamSingleColumnScan.o + build/BlockStreamIterator/ExpandableBlockStreamIteratorBase.o + build/BlockStreamIterator/libblockstreamiterator.a + build/BlockStreamIterator/Makefile + build/Catalog/stat/Analyzer.gcno + build/Catalog/stat/Analyzer.o + build/Catalog/stat/AttributeStatistics.o + build/Catalog/stat/Estimation.gcno + build/Catalog/stat/Estimation.o + build/Catalog/stat/libstat.a + build/Catalog/stat/Makefile + build/Catalog/stat/Statistic.o + build/Catalog/stat/StatManager.o + build/Catalog/stat/TableStatistic.o + build/Catalog/Test/libtest.a + build/Catalog/Test/Makefile + build/Catalog/Catalog.gcda + build/Catalog/Catalog.gcno + build/Catalog/Catalog.o + build/Catalog/Column.o + build/Catalog/libcatalog.a + build/Catalog/Makefile + build/Catalog/Partitioner.gcda + build/Catalog/Partitioner.gcno + build/Catalog/Partitioner.o + build/Catalog/ProjectionBinding.gcda + build/Catalog/ProjectionBinding.gcno + build/Catalog/ProjectionBinding.o + build/Catalog/table.gcda + build/Catalog/table.gcno + build/Catalog/table.o + build/catalog/stat/Analyzer.gcno + build/catalog/stat/Analyzer.o + build/catalog/stat/AttributeStatistics.gcda + build/catalog/stat/AttributeStatistics.gcno + build/catalog/stat/AttributeStatistics.o + build/catalog/stat/Estimation.gcno + build/catalog/stat/Estimation.o + build/catalog/stat/libstat.a + build/catalog/stat/Makefile + build/catalog/stat/Statistic.gcno + build/catalog/stat/Statistic.o + build/catalog/stat/StatManager.gcda + build/catalog/stat/StatManager.gcno + build/catalog/stat/StatManager.o + build/catalog/stat/TableStatistic.gcda + build/catalog/stat/TableStatistic.gcno + build/catalog/stat/TableStatistic.o + build/catalog/Test/Makefile + build/catalog/catalog.gcda + build/catalog/catalog.gcno + build/catalog/catalog.o + build/catalog/column.gcda + build/catalog/column.gcno + build/catalog/column.o + build/catalog/libcatalog.a + build/catalog/Makefile + build/catalog/partitioner.gcda + build/catalog/partitioner.gcno + build/catalog/partitioner.o + build/catalog/projection.gcda + build/catalog/projection.gcno + build/catalog/projection.o + build/catalog/projection_binding.gcda + build/catalog/projection_binding.gcno + build/catalog/projection_binding.o + build/catalog/table.gcda + build/catalog/table.gcno + build/catalog/table.o + build/claims_log/claimsserver.ERROR + build/claims_log/claimsserver.INFO + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151110-101519.19134 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151110-110622.27820 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151115-142921.22011 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151115-153340.27058 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151115-200645.12465 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151115-201001.12761 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151115-201113.13202 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151115-201146.13496 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151115-201607.13810 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151115-201644.14084 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151115-202323.14551 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151115-204728.16335 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151116-210520.15502 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151116-211651.16894 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151117-152748.2760 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151117-154930.5393 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151117-155015.5694 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151117-155641.6147 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151117-160015.7660 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151117-161032.9349 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151117-203106.12960 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151117-204500.13615 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151117-204622.13913 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151117-204757.14325 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151117-204831.14623 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-093243.7367 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-143530.2924 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-144522.4045 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-145156.5143 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-145308.5434 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-145524.6544 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-161859.14475 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-163721.17414 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-164246.17821 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-214758.4924 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-215838.6311 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-220202.7253 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-223107.10263 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151118-224817.13928 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151119-202212.1963 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151119-202236.2286 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-103823.4341 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-122325.10936 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-123020.11984 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-123256.13067 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-123425.13384 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-123634.13681 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-134708.20304 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-134818.20592 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-135928.23393 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-140055.23757 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-201701.22523 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-201821.22897 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-202700.26645 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-202816.27345 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-202946.27719 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-203102.28175 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-203208.28538 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-205330.2116 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-205553.3544 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-205857.5902 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-210057.6385 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151120-210701.6907 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151123-193846.19461 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151123-194935.20105 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151123-195418.21082 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151123-195722.21850 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151123-203927.28231 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151123-204046.28427 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151123-205952.32503 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151123-210059.451 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151123-212103.3600 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151123-212756.4573 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151123-213434.6399 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151124-102814.18457 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151126-164628.7973 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151126-164813.8027 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151126-165224.8525 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151126-165335.8802 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151126-165641.9117 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151126-170314.11673 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151126-172820.14417 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151126-195101.25686 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151126-212954.31790 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151126-223238.10705 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151126-223252.10976 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151126-223400.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151127-172028.16659 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151127-172450.17759 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151127-172904.20120 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151128-094945.18639 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151128-095257.19101 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151128-095417.19392 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151128-095628.20464 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151128-101713.24068 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151128-103424.24825 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151128-104454.27905 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151130-161459.3337 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151130-161847.3597 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151130-184020.12594 + build/claims_log/claimsserver.localhost.localdomain.fish.log.ERROR.20151130-184124.22891 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151110-101508.19134 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151110-110616.27820 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151112-205707.24677 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151112-211419.26147 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151113-213158.31861 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151115-142239.21206 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151115-142738.22011 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151115-152951.26631 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151115-153318.27058 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151115-200622.12465 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151115-200654.12761 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151115-201032.13202 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151115-201124.13496 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151115-201548.13810 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151115-201613.14084 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151115-202249.14551 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151115-204037.16335 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151115-205644.17402 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151116-143145.18785 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151116-210508.15237 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151116-210520.15502 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151116-211651.16894 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-152605.2466 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-152719.2760 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-154818.5393 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-154959.5694 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-155331.6147 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-155958.7660 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-160343.8245 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-161027.9349 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-162246.10558 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-162611.12268 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-202948.12960 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-204442.13615 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-204538.13913 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-204754.14325 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151117-204828.14623 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-093142.7367 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-134831.27390 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-135816.29036 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-140037.29987 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-141251.31325 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-141705.31724 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-142935.1838 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-143425.2924 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-144409.4045 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-145124.5143 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-145222.5434 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-145457.5777 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-145524.6544 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-161724.14475 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-162628.16077 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-163239.17414 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-163732.17821 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-214609.4924 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-215757.6311 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-215844.7253 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-220413.8044 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-222747.10263 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-223158.11451 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-223454.11839 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151118-224641.13928 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151119-202151.1963 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151119-202220.2286 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151119-205017.10713 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-103811.4341 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-122257.10936 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-122650.11984 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-123054.13067 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-123338.13384 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-123437.13681 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-123648.14133 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-124833.14742 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-125022.15144 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-132137.17434 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-132347.17726 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-133523.19437 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-134628.20304 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-134725.20592 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-135252.21214 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-135908.23393 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-135945.23757 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-140134.24323 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-140152.24636 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-140243.24973 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-140503.25270 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-142219.26810 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-143146.29454 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-193734.17236 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-201454.22523 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-201719.22897 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-202529.25820 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-202700.26645 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-202807.27345 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-202838.27719 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-203024.28175 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-203143.28538 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-205129.942 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-205316.2116 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-205522.3544 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-205851.5902 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-210002.6385 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-210138.6907 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-210707.7338 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-211210.7908 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-211717.8999 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-212411.10347 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-213033.12954 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-214023.14494 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151120-214616.15894 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151123-193231.19461 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151123-194906.20105 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151123-195412.21082 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151123-195526.21850 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151123-203927.28231 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151123-203950.28427 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151123-205904.32503 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151123-210049.451 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151123-211634.3600 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151123-212602.4573 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151123-213420.6399 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151124-102733.18457 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-164628.7973 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-164813.8027 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-165224.8525 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-165307.8802 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-165437.9117 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-170306.11673 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-172808.14417 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-194336.24730 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-194947.25686 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-210101.31790 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-223238.10705 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-223252.10976 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-223301.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-223750.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-223815.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-223841.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-223907.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-223935.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224002.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224029.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224057.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224125.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224153.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224222.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224250.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224319.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224347.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224415.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224443.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224512.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224540.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224608.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224637.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224705.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224733.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224802.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224831.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224859.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224928.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-224956.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225025.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225054.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225122.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225150.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225218.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225247.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225315.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225344.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225413.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225440.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225508.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225535.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225603.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225631.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225658.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225726.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225754.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225821.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225849.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225917.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-225944.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230012.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230040.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230107.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230135.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230202.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230230.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230258.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230325.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230353.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230421.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230448.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230516.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230544.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230611.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230639.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230707.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230734.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230802.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230829.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230857.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230925.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-230952.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231020.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231048.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231115.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231143.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231210.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231238.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231306.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231333.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231401.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231429.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231456.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231524.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231551.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231619.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231647.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231714.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231742.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231809.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231837.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231904.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231932.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-231959.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232027.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232054.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232122.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232149.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232217.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232244.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232312.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232340.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232407.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232435.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232503.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232530.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232558.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232626.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232653.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232721.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232748.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232816.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232844.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232911.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-232939.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233007.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233035.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233102.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233130.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233157.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233225.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233253.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233320.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233348.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233416.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233442.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233508.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233535.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233601.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233628.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233654.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233721.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233747.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233813.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233840.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233907.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-233933.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234000.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234036.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234103.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234129.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234155.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234222.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234249.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234315.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234342.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234409.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234437.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234504.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234532.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234600.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234626.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234652.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234721.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234749.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234815.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234842.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234909.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-234935.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235004.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235030.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235056.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235125.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235152.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235220.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235248.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235316.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235343.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235410.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235437.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235508.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235536.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235602.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235628.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235655.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235722.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235748.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235817.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235843.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235914.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151126-235940.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000010.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000038.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000107.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000133.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000159.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000227.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000253.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000319.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000346.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000412.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000439.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000505.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000531.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000559.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000626.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000652.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000719.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000746.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000815.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000841.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000908.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-000935.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001000.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001028.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001055.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001121.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001147.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001214.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001240.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001306.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001332.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001403.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001430.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001456.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001525.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001552.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001618.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001645.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001711.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001738.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001805.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001831.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001858.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001925.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-001951.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002020.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002047.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002113.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002139.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002205.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002232.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002258.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002325.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002352.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002420.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002447.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002515.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002541.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002608.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002634.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002700.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002727.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002754.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002823.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002850.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002916.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-002943.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003010.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003036.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003103.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003130.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003157.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003224.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003250.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003318.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003344.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003410.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003436.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003503.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003529.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003556.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003623.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003650.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003717.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003743.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003810.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003837.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003904.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003930.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-003956.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004026.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004053.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004119.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004146.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004212.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004239.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004305.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004331.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004357.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004424.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004450.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004516.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004543.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004610.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004637.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004703.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004730.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004756.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004822.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004848.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004914.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-004941.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005009.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005035.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005103.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005129.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005156.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005222.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005249.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005315.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005342.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005408.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005435.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005502.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005529.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005556.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005623.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005649.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005716.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005742.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005812.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005838.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005904.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005931.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-005958.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010025.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010051.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010118.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010145.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010212.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010238.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010306.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010333.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010359.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010426.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010452.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010519.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010546.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010612.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010639.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010706.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010732.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010800.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010826.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010853.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010920.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-010947.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011015.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011042.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011121.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011149.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011217.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011244.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011311.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011337.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011404.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011431.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011458.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011524.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011552.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011619.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011645.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011712.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011740.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011807.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011833.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011900.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011928.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-011954.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012022.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012049.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012116.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012144.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012211.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012238.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012305.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012331.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012357.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012424.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012450.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012521.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012548.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012615.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012642.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012709.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012736.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012806.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012832.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012859.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012926.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-012952.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013022.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013049.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013115.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013142.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013210.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013236.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013305.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013332.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013358.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013425.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013451.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013518.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013545.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013612.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013639.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013705.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013732.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013759.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013825.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013852.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013919.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-013946.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014014.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014041.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014107.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014133.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014201.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014227.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014254.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014321.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014348.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014414.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014441.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014508.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014535.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014601.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014628.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014655.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014722.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014748.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014815.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014842.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014909.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-014935.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015004.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015031.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015058.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015124.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015151.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015218.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015245.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015312.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015338.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015405.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015432.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015459.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015528.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015554.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015621.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015648.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015715.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015742.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015811.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015838.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015905.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015931.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-015959.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020027.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020054.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020121.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020147.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020214.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020240.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020308.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020335.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020401.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020428.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020455.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020522.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020548.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020615.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020642.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020709.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020739.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020806.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-020833.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-021957.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-110030.25767 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-171945.16659 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-172423.17759 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-172836.20120 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-173230.20577 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151127-175410.22507 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-094859.18639 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-095232.19101 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-095335.19392 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-095523.20167 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-095603.20464 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-095639.20751 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-100742.22531 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-101324.23166 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-101707.24068 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-102236.24543 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-102300.24825 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-103652.26567 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-104101.27006 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151128-104440.27905 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151130-161458.3337 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151130-161847.3597 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151130-164928.12594 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151130-184057.22891 + build/claims_log/claimsserver.localhost.localdomain.fish.log.INFO.20151201-094150.29480 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151110-101519.19134 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151110-110622.27820 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151115-142921.22011 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151115-153340.27058 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151115-200645.12465 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151115-200801.12761 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151115-201113.13202 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151115-201146.13496 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151115-201607.13810 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151115-201644.14084 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151115-202323.14551 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151115-204728.16335 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151116-145243.18785 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151116-210520.15502 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151116-211651.16894 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151117-152748.2760 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151117-154826.5393 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151117-155015.5694 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151117-155641.6147 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151117-160011.7660 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151117-161032.9349 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151117-162252.10558 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151117-162619.12268 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151117-203032.12960 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151117-204451.13615 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151117-204546.13913 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151117-204757.14325 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151117-204831.14623 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-093209.7367 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-134844.27390 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-140055.29987 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-141300.31325 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-141900.31724 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-142942.1838 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-143445.2924 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-144423.4045 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-145136.5143 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-145231.5434 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-145524.6544 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-161830.14475 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-162642.16077 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-163307.17414 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-164212.17821 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-214627.4924 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-215805.6311 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-215857.7253 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-220430.8044 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-222757.10263 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-223212.11451 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-223507.11839 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151118-224649.13928 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151119-202202.1963 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151119-202230.2286 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151119-205125.10713 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-103823.4341 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-122314.10936 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-122659.11984 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-123108.13067 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-123346.13384 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-123444.13681 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-123705.14133 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-124911.14742 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-130227.15144 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-132233.17434 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-132358.17726 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-133533.19437 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-134641.20304 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-134736.20592 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-135305.21214 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-135922.23393 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-140055.23757 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-140142.24323 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-140201.24636 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-140252.24973 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-140528.25270 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-142229.26810 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-143159.29454 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-194950.17236 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-201504.22523 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-201739.22897 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-202700.26645 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-202816.27345 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-202853.27719 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-203037.28175 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-203208.28538 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-205323.2116 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-205530.3544 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-205857.5902 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-210010.6385 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-210147.6907 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-210713.7338 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-211221.7908 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-211732.8999 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-212415.10347 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-213039.12954 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-214026.14494 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151120-214634.15894 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151123-193250.19461 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151123-194924.20105 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151123-195418.21082 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151123-195533.21850 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151123-203927.28231 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151123-204046.28427 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151123-205952.32503 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151123-210059.451 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151123-211644.3600 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151123-212609.4573 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151123-213429.6399 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151124-102742.18457 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151126-164628.7973 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151126-164813.8027 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151126-165224.8525 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151126-165313.8802 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151126-165458.9117 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151126-170314.11673 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151126-172820.14417 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151126-195027.25686 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151126-210119.31790 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151126-223238.10705 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151126-223252.10976 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151126-223317.11246 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151127-171954.16659 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151127-172426.17759 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151127-172840.20120 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151127-173424.20577 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151127-175430.22507 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151128-094910.18639 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151128-095238.19101 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151128-095338.19392 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151128-095530.20167 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151128-095612.20464 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151128-095639.20751 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151128-101333.23166 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151128-101713.24068 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151128-102236.24543 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151128-102424.24825 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151128-103658.26567 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151128-104109.27006 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151128-104447.27905 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151130-161459.3337 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151130-161847.3597 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151130-164951.12594 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151130-184115.22891 + build/claims_log/claimsserver.localhost.localdomain.fish.log.WARNING.20151201-094207.29480 + build/claims_log/claimsserver.WARNING + build/claims_log/client.ERROR + build/claims_log/client.INFO + build/claims_log/client.localhost.localdomain.fish.log.ERROR.20151126-212954.32180 + build/claims_log/client.localhost.localdomain.fish.log.ERROR.20151130-152843.28171 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151110-101519.19403 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151110-110622.28086 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151110-143347.9070 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151116-213509.19273 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-152656.2741 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-152735.3036 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-152748.3042 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-154826.5659 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-155015.6072 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-155641.6413 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-160011.8033 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-160356.8523 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-161032.9615 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-162252.10834 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-162619.12535 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-204451.13894 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-204546.14184 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-204757.14606 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151117-204831.14889 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-093408.7685 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-134844.27656 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-135153.27754 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-140055.30256 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-140447.30420 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-140526.30457 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-141300.31597 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-141716.31993 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-142942.2115 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-143438.3195 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-144423.4321 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-145136.5411 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-145231.5700 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-145530.6810 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-161830.14750 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-162642.16409 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-163307.17680 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-164212.18300 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-214627.5191 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-215805.6577 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-215857.7522 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-220430.8314 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-222757.10532 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-223212.11770 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-223507.12122 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151118-224649.14196 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151119-202202.2251 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151119-202230.2555 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151119-205100.11006 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-104006.4627 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-122314.11231 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-122659.12251 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-123108.13338 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-123346.13652 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-123444.13947 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-123705.14417 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-124847.15015 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-125036.15418 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-132233.17704 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-132358.17995 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-133533.19705 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-134641.20580 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-134736.20865 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-135305.21490 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-135922.23662 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-140055.24132 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-140142.24594 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-140201.24904 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-140252.25242 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-140528.25540 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-142229.27076 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-143159.29722 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-194950.17510 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-201504.22790 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-201739.23166 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-202816.27611 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-202853.27989 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-203037.28445 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-203208.28807 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-205323.2405 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-205530.3813 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-205857.6169 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-210010.6651 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-210147.7179 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-210713.7608 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-211221.8178 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-211732.9266 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-212415.10617 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-213039.13223 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-214026.14764 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151120-214634.16165 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151123-193250.19733 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151123-194924.20385 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151123-195418.21352 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151123-195533.22121 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151123-204046.28693 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151123-205952.310 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151123-210059.755 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151123-211644.3871 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151123-212609.4844 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151123-213429.6668 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151124-102742.18723 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151126-164934.8356 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151126-165236.8793 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151126-165313.9079 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151126-165449.9383 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151126-170314.11955 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151126-172820.14694 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151126-195447.26144 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151126-210119.32180 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151126-223317.11526 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151126-223430.11561 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151126-223520.11580 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151126-223824.11637 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151126-224923.11942 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151127-172603.18748 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151127-173424.20854 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151127-175003.22279 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151127-175430.22774 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151128-094910.18915 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151128-095238.19367 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151128-095338.19666 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151128-095530.20436 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151128-095612.20734 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151128-095645.21021 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151128-101333.23439 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151128-101713.24334 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151128-101727.24343 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151128-102424.25092 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151128-103658.26842 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151128-104109.27275 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151128-104447.28171 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151130-164951.12909 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151130-184115.23174 + build/claims_log/client.localhost.localdomain.fish.log.INFO.20151201-094207.29746 + build/claims_log/client.localhost.localdomain.fish.log.WARNING.20151126-212954.32180 + build/claims_log/client.localhost.localdomain.fish.log.WARNING.20151130-152843.28171 + build/claims_log/client.WARNING + build/claims_log/test.ERROR + build/claims_log/test.INFO + build/claims_log/test.localhost.localdomain.fish.log.ERROR.20151120-103823.4607 + build/claims_log/test.localhost.localdomain.fish.log.ERROR.20151126-194348.24997 + build/claims_log/test.localhost.localdomain.fish.log.ERROR.20151126-194954.25957 + build/claims_log/test.localhost.localdomain.fish.log.ERROR.20151126-195027.26080 + build/claims_log/test.localhost.localdomain.fish.log.ERROR.20151126-223327.11534 + build/claims_log/test.localhost.localdomain.fish.log.ERROR.20151127-171954.16925 + build/claims_log/test.localhost.localdomain.fish.log.ERROR.20151127-172154.17067 + build/claims_log/test.localhost.localdomain.fish.log.ERROR.20151127-172426.18025 + build/claims_log/test.localhost.localdomain.fish.log.ERROR.20151127-172840.20386 + build/claims_log/test.localhost.localdomain.fish.log.ERROR.20151127-173145.20547 + build/claims_log/test.localhost.localdomain.fish.log.ERROR.20151127-174702.21539 + build/claims_log/test.localhost.localdomain.fish.log.ERROR.20151127-174806.21572 + build/claims_log/test.localhost.localdomain.fish.log.ERROR.20151127-174922.21980 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151117-203032.13292 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151118-093209.7641 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151120-103823.4607 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151126-194348.24997 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151126-194954.25957 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151126-195027.26080 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151126-223327.11534 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151127-171954.16925 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151127-172154.17067 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151127-172426.18025 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151127-172840.20386 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151127-173145.20547 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151127-174702.21539 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151127-174806.21572 + build/claims_log/test.localhost.localdomain.fish.log.INFO.20151127-174922.21980 + build/claims_log/test.localhost.localdomain.fish.log.WARNING.20151120-103823.4607 + build/claims_log/test.localhost.localdomain.fish.log.WARNING.20151126-194348.24997 + build/claims_log/test.localhost.localdomain.fish.log.WARNING.20151126-194954.25957 + build/claims_log/test.localhost.localdomain.fish.log.WARNING.20151126-195027.26080 + build/claims_log/test.localhost.localdomain.fish.log.WARNING.20151126-223327.11534 + build/claims_log/test.localhost.localdomain.fish.log.WARNING.20151127-171954.16925 + build/claims_log/test.localhost.localdomain.fish.log.WARNING.20151127-172154.17067 + build/claims_log/test.localhost.localdomain.fish.log.WARNING.20151127-172426.18025 + build/claims_log/test.localhost.localdomain.fish.log.WARNING.20151127-172840.20386 + build/claims_log/test.localhost.localdomain.fish.log.WARNING.20151127-173145.20547 + build/claims_log/test.localhost.localdomain.fish.log.WARNING.20151127-174702.21539 + build/claims_log/test.localhost.localdomain.fish.log.WARNING.20151127-174806.21572 + build/claims_log/test.localhost.localdomain.fish.log.WARNING.20151127-174922.21980 + build/claims_log/test.WARNING + build/Client/json/libjson.a + build/Client/json/Makefile + build/Client/Test/libtest.a + build/Client/Test/Makefile + build/Client/ClaimsServer.gcda + build/Client/ClaimsServer.gcno + build/Client/ClaimsServer.o + build/Client/Client.gcda + build/Client/Client.gcno + build/Client/Client.o + build/Client/ClientResponse.gcno + build/Client/ClientResponse.o + build/Client/jsoncpp.gcda + build/Client/jsoncpp.gcno + build/Client/jsoncpp.o + build/Client/libclient.a + build/Client/Makefile + build/codegen/CodeGenerator.gcda + build/codegen/CodeGenerator.gcno + build/codegen/CodeGenerator.o + build/codegen/CompareFunctonGenerator.gcda + build/codegen/CompareFunctonGenerator.gcno + build/codegen/CompareFunctonGenerator.o + build/codegen/ExpressionGenerator.gcda + build/codegen/ExpressionGenerator.gcno + build/codegen/ExpressionGenerator.o + build/codegen/libcodegen.a + build/codegen/Makefile + build/common/Block/Block.gcda + build/common/Block/Block.gcno + build/common/Block/Block.o + build/common/Block/BlockContainer.gcda + build/common/Block/BlockContainer.gcno + build/common/Block/BlockContainer.o + build/common/Block/BlockStream.gcda + build/common/Block/BlockStream.gcno + build/common/Block/BlockStream.o + build/common/Block/BlockStreamBuffer.gcda + build/common/Block/BlockStreamBuffer.gcno + build/common/Block/BlockStreamBuffer.o + build/common/Block/DynamicBlockBuffer.gcda + build/common/Block/DynamicBlockBuffer.gcno + build/common/Block/DynamicBlockBuffer.o + build/common/Block/libblock.a + build/common/Block/Makefile + build/common/Block/MonitorableBuffer.gcda + build/common/Block/MonitorableBuffer.gcno + build/common/Block/MonitorableBuffer.o + build/common/Block/PartitionedBlockBuffer.gcda + build/common/Block/PartitionedBlockBuffer.gcno + build/common/Block/PartitionedBlockBuffer.o + build/common/Block/PartitionedBlockContainer.gcda + build/common/Block/PartitionedBlockContainer.gcno + build/common/Block/PartitionedBlockContainer.o + build/common/Block/ResultSet.gcda + build/common/Block/ResultSet.gcno + build/common/Block/ResultSet.o + build/common/Expression/execfunc.gcda + build/common/Expression/execfunc.gcno + build/common/Expression/execfunc.o + build/common/Expression/initquery.gcda + build/common/Expression/initquery.gcno + build/common/Expression/initquery.o + build/common/Expression/libexpression.a + build/common/Expression/Makefile + build/common/Expression/qnode.gcda + build/common/Expression/qnode.gcno + build/common/Expression/qnode.o + build/common/Expression/queryfunc.gcda + build/common/Expression/queryfunc.gcno + build/common/Expression/queryfunc.o + build/common/expression/data_type_oper.gcda + build/common/expression/data_type_oper.gcno + build/common/expression/data_type_oper.o + build/common/expression/expr_binary.gcda + build/common/expression/expr_binary.gcno + build/common/expression/expr_binary.o + build/common/expression/expr_case_when.gcda + build/common/expression/expr_case_when.gcno + build/common/expression/expr_case_when.o + build/common/expression/expr_column.gcda + build/common/expression/expr_column.gcno + build/common/expression/expr_column.o + build/common/expression/expr_const.gcda + build/common/expression/expr_const.gcno + build/common/expression/expr_const.o + build/common/expression/expr_date.gcda + build/common/expression/expr_date.gcno + build/common/expression/expr_date.o + build/common/expression/expr_in.gcda + build/common/expression/expr_in.gcno + build/common/expression/expr_in.o + build/common/expression/expr_node.gcda + build/common/expression/expr_node.gcno + build/common/expression/expr_node.o + build/common/expression/expr_ternary.gcda + build/common/expression/expr_ternary.gcno + build/common/expression/expr_ternary.o + build/common/expression/expr_type_cast.gcda + build/common/expression/expr_type_cast.gcno + build/common/expression/expr_type_cast.o + build/common/expression/expr_unary.gcda + build/common/expression/expr_unary.gcno + build/common/expression/expr_unary.o + build/common/expression/libexpression.a + build/common/expression/Makefile + build/common/expression/type_conversion_matrix.gcda + build/common/expression/type_conversion_matrix.gcno + build/common/expression/type_conversion_matrix.o + build/common/file_handle/test/libtest.a + build/common/file_handle/test/Makefile + build/common/file_handle/disk_file_handle_imp.gcda + build/common/file_handle/disk_file_handle_imp.gcno + build/common/file_handle/disk_file_handle_imp.o + build/common/file_handle/file_handle_imp.gcno + build/common/file_handle/file_handle_imp.o + build/common/file_handle/hdfs_file_handle_imp.gcda + build/common/file_handle/hdfs_file_handle_imp.gcno + build/common/file_handle/hdfs_file_handle_imp.o + build/common/file_handle/libfilehandle.a + build/common/file_handle/Makefile + build/common/log/lib_logging.a + build/common/log/liblog.a + build/common/log/logging.gcda + build/common/log/logging.gcno + build/common/log/logging.o + build/common/log/Makefile + build/common/Schema/Test/libtest.a + build/common/Schema/Test/Makefile + build/common/Schema/libschema.a + build/common/Schema/Makefile + build/common/Schema/Schema.gcda + build/common/Schema/Schema.gcno + build/common/Schema/Schema.o + build/common/Schema/SchemaFix.gcda + build/common/Schema/SchemaFix.gcno + build/common/Schema/SchemaFix.o + build/common/Schema/SchemaVar.gcno + build/common/Schema/SchemaVar.o + build/common/Schema/TupleConvertor.gcda + build/common/Schema/TupleConvertor.gcno + build/common/Schema/TupleConvertor.o + build/common/serialization/libserialization.a + build/common/serialization/Makefile + build/common/serialization/RegisterDerivedClass.gcda + build/common/serialization/RegisterDerivedClass.gcno + build/common/serialization/RegisterDerivedClass.o + build/common/types/Test/libtest.a + build/common/types/Test/Makefile + build/common/types/ttmath/libttmath.a + build/common/types/ttmath/Makefile + build/common/types/libtypes.a + build/common/types/Makefile + build/common/types/NValue.gcda + build/common/types/NValue.gcno + build/common/types/NValue.o + build/common/AttributeComparator.gcda + build/common/AttributeComparator.gcno + build/common/AttributeComparator.o + build/common/Comparator.gcda + build/common/Comparator.gcno + build/common/Comparator.o + build/common/data_type.gcda + build/common/data_type.gcno + build/common/data_type.o + build/common/error_no.gcda + build/common/error_no.gcno + build/common/error_no.o + build/common/ExpandedThreadTracker.gcda + build/common/ExpandedThreadTracker.gcno + build/common/ExpandedThreadTracker.o + build/common/hash.gcda + build/common/hash.gcno + build/common/hash.o + build/common/hashtable.gcda + build/common/hashtable.gcno + build/common/hashtable.o + build/common/ids.gcda + build/common/ids.gcno + build/common/ids.o + build/common/InsertOptimizedHashTable.gcno + build/common/InsertOptimizedHashTable.o + build/common/libcommon.a + build/common/Logging.gcda + build/common/Logging.gcno + build/common/Logging.o + build/common/Makefile + build/common/Mapping.gcno + build/common/Mapping.o + build/common/Message.gcda + build/common/Message.gcno + build/common/Message.o + build/common/TimeOutReceiver.gcda + build/common/TimeOutReceiver.gcno + build/common/TimeOutReceiver.o + build/common/TypeCast.gcda + build/common/TypeCast.gcno + build/common/TypeCast.o + build/common/TypePromotionMap.gcda + build/common/TypePromotionMap.gcno + build/common/TypePromotionMap.o + build/Daemon/Test/libtest.a + build/Daemon/Test/Makefile + build/Daemon/Daemon.gcda + build/Daemon/Daemon.gcno + build/Daemon/Daemon.o + build/Daemon/Executing.gcno + build/Daemon/Executing.o + build/Daemon/libdaemon.a + build/Daemon/Makefile + build/Executor/Test/libtest.a + build/Executor/Test/Makefile + build/Executor/AdaptiveEndPoint.gcda + build/Executor/AdaptiveEndPoint.gcno + build/Executor/AdaptiveEndPoint.o + build/Executor/Coordinator.gcda + build/Executor/Coordinator.gcno + build/Executor/Coordinator.o + build/Executor/exchange_tracker.gcda + build/Executor/exchange_tracker.gcno + build/Executor/exchange_tracker.o + build/Executor/expander_tracker.gcda + build/Executor/expander_tracker.gcno + build/Executor/expander_tracker.o + build/Executor/IteratorExecutorMaster.gcda + build/Executor/IteratorExecutorMaster.gcno + build/Executor/IteratorExecutorMaster.o + build/Executor/IteratorExecutorSlave.gcda + build/Executor/IteratorExecutorSlave.gcno + build/Executor/IteratorExecutorSlave.o + build/Executor/libexecutor.a + build/Executor/Makefile + build/Executor/PortManager.gcda + build/Executor/PortManager.gcno + build/Executor/PortManager.o + build/include/Config.h + build/include/configure.h + build/include/Debug.h + build/include/Environment.h + build/include/IDsGenerator.h + build/IndexManager/Test/libtest.a + build/IndexManager/Test/Makefile + build/IndexManager/CSBIndexBuilding.gcda + build/IndexManager/CSBIndexBuilding.gcno + build/IndexManager/CSBIndexBuilding.o + build/IndexManager/CSBPlusTree.gcda + build/IndexManager/CSBPlusTree.gcno + build/IndexManager/CSBPlusTree.o + build/IndexManager/IndexManager.gcda + build/IndexManager/IndexManager.gcno + build/IndexManager/IndexManager.o + build/IndexManager/IndexScanIterator.gcda + build/IndexManager/IndexScanIterator.gcno + build/IndexManager/IndexScanIterator.o + build/IndexManager/libindexmanager.a + build/IndexManager/LogicalCSBIndexBuilding.gcno + build/IndexManager/LogicalCSBIndexBuilding.o + build/IndexManager/LogicalIndexFilter.gcno + build/IndexManager/LogicalIndexFilter.o + build/IndexManager/LogicalIndexScan.gcno + build/IndexManager/LogicalIndexScan.o + build/IndexManager/Makefile + build/Loader/Test/libtest.a + build/Loader/Test/Makefile + build/Loader/Hdfsconnector.gcda + build/Loader/Hdfsconnector.gcno + build/Loader/Hdfsconnector.o + build/Loader/Hdfsloader.gcda + build/Loader/Hdfsloader.gcno + build/Loader/Hdfsloader.o + build/Loader/libloader.a + build/Loader/LocalDiskConnector.gcda + build/Loader/LocalDiskConnector.gcno + build/Loader/LocalDiskConnector.o + build/Loader/Makefile + build/loader/test/data_injector_test.gcno + build/loader/test/data_injector_test.o + build/loader/test/libtest.a + build/loader/test/Makefile + build/loader/test/single_file_connector_test.gcno + build/loader/test/single_file_connector_test.o + build/loader/test/table_file_connector_test.gcno + build/loader/test/table_file_connector_test.o + build/loader/data_injector.gcda + build/loader/data_injector.gcno + build/loader/data_injector.o + build/loader/libloader.a + build/loader/Makefile + build/loader/single_file_connector.gcno + build/loader/single_file_connector.o + build/loader/table_file_connector.gcda + build/loader/table_file_connector.gcno + build/loader/table_file_connector.o + build/loader/validity.gcda + build/loader/validity.gcno + build/loader/validity.o + build/logical_operator/liblogicalqueryplan.a + build/logical_operator/logical_aggregation.gcda + build/logical_operator/logical_aggregation.gcno + build/logical_operator/logical_aggregation.o + build/logical_operator/logical_cross_join.gcda + build/logical_operator/logical_cross_join.gcno + build/logical_operator/logical_cross_join.o + build/logical_operator/logical_delete_filter.gcda + build/logical_operator/logical_delete_filter.gcno + build/logical_operator/logical_delete_filter.o + build/logical_operator/logical_equal_join.gcda + build/logical_operator/logical_equal_join.gcno + build/logical_operator/logical_equal_join.o + build/logical_operator/logical_filter.gcda + build/logical_operator/logical_filter.gcno + build/logical_operator/logical_filter.o + build/logical_operator/logical_limit.gcda + build/logical_operator/logical_limit.gcno + build/logical_operator/logical_limit.o + build/logical_operator/logical_operator.gcda + build/logical_operator/logical_operator.gcno + build/logical_operator/logical_operator.o + build/logical_operator/logical_project.gcda + build/logical_operator/logical_project.gcno + build/logical_operator/logical_project.o + build/logical_operator/logical_query_plan_root.gcda + build/logical_operator/logical_query_plan_root.gcno + build/logical_operator/logical_query_plan_root.o + build/logical_operator/logical_scan.gcda + build/logical_operator/logical_scan.gcno + build/logical_operator/logical_scan.o + build/logical_operator/logical_sort.gcda + build/logical_operator/logical_sort.gcno + build/logical_operator/logical_sort.o + build/logical_operator/logical_subquery.gcda + build/logical_operator/logical_subquery.gcno + build/logical_operator/logical_subquery.o + build/logical_operator/Makefile + build/logical_operator/plan_context.gcda + build/logical_operator/plan_context.gcno + build/logical_operator/plan_context.o + build/logical_operator/plan_partition_info.gcda + build/logical_operator/plan_partition_info.gcno + build/logical_operator/plan_partition_info.o + build/logical_operator/plan_partitioner.gcda + build/logical_operator/plan_partitioner.gcno + build/logical_operator/plan_partitioner.o + build/logical_operator/Requirement.gcda + build/logical_operator/Requirement.gcno + build/logical_operator/Requirement.o + build/logical_query_plan/liblogicalqueryplan.a + build/logical_query_plan/logical_aggregation.o + build/logical_query_plan/logical_cross_join.o + build/logical_query_plan/logical_equal_join.o + build/logical_query_plan/logical_filter.o + build/logical_query_plan/logical_limit.o + build/logical_query_plan/logical_operator.o + build/logical_query_plan/logical_project.o + build/logical_query_plan/logical_query_plan_root.o + build/logical_query_plan/logical_scan.o + build/logical_query_plan/logical_sort.o + build/logical_query_plan/Makefile + build/logical_query_plan/plan_context.o + build/logical_query_plan/plan_partition_info.o + build/logical_query_plan/plan_partitioner.o + build/logical_query_plan/Requirement.o + build/LogicalQueryPlan/Test/Aggregation_test.o + build/LogicalQueryPlan/Test/getOptimalQueryPlan.o + build/LogicalQueryPlan/Test/libtest.a + build/LogicalQueryPlan/Test/Makefile + build/LogicalQueryPlan/Test/query_optmization_based_on_statisitic_test.o + build/LogicalQueryPlan/Test/ResultCollect_test.o + build/LogicalQueryPlan/Test/testcrossjoin.o + build/LogicalQueryPlan/Test/testGenerateIteratorTree.o + build/LogicalQueryPlan/Test/testGetDataflow.o + build/LogicalQueryPlan/Test/testIn.o + build/LogicalQueryPlan/Test/testProject.o + build/LogicalQueryPlan/Test/testProject_wl.o + build/LogicalQueryPlan/Test/testSort.o + build/LogicalQueryPlan/Aggregation.o + build/LogicalQueryPlan/Buffer.o + build/LogicalQueryPlan/CrossJoin.o + build/LogicalQueryPlan/Dataflow.o + build/LogicalQueryPlan/DataflowPartition.o + build/LogicalQueryPlan/DataflowPartitionDescriptor.o + build/LogicalQueryPlan/equal_join.o + build/LogicalQueryPlan/Filter.o + build/LogicalQueryPlan/liblogicalqueryplan.a + build/LogicalQueryPlan/logical_sort.o + build/LogicalQueryPlan/LogicalOperator.o + build/LogicalQueryPlan/LogicalQueryPlanRoot.o + build/LogicalQueryPlan/Makefile + build/LogicalQueryPlan/Project.o + build/LogicalQueryPlan/Requirement.o + build/LogicalQueryPlan/Scan.o + build/Parsetree/Test/libtest.a + build/Parsetree/Test/Makefile + build/Parsetree/ExecuteLogicalQueryPlan.gcno + build/Parsetree/ExecuteLogicalQueryPlan.o + build/Parsetree/function.gcda + build/Parsetree/function.gcno + build/Parsetree/function.o + build/Parsetree/libparsetree.a + build/Parsetree/Makefile + build/Parsetree/parsetree2logicalplan.gcno + build/Parsetree/parsetree2logicalplan.o + build/Parsetree/preprocess.gcno + build/Parsetree/preprocess.o + build/Parsetree/runparsetree.gcno + build/Parsetree/runparsetree.o + build/Parsetree/semantic_analysis.gcno + build/Parsetree/semantic_analysis.o + build/Parsetree/StreamBuffer.o + build/Parsetree/wc2tb.gcno + build/Parsetree/wc2tb.o + build/physical_operator/combine_tuple.gcda + build/physical_operator/combine_tuple.gcno + build/physical_operator/combine_tuple.o + build/physical_operator/exchange_merger.gcda + build/physical_operator/exchange_merger.gcno + build/physical_operator/exchange_merger.o + build/physical_operator/exchange_sender.gcda + build/physical_operator/exchange_sender.gcno + build/physical_operator/exchange_sender.o + build/physical_operator/exchange_sender_materialized.gcda + build/physical_operator/exchange_sender_materialized.gcno + build/physical_operator/exchange_sender_materialized.o + build/physical_operator/exchange_sender_pipeline.gcda + build/physical_operator/exchange_sender_pipeline.gcno + build/physical_operator/exchange_sender_pipeline.o + build/physical_operator/expander.gcda + build/physical_operator/expander.gcno + build/physical_operator/expander.o + build/physical_operator/in_operator.gcda + build/physical_operator/in_operator.gcno + build/physical_operator/in_operator.o + build/physical_operator/libphysicalqueryplan.a + build/physical_operator/Makefile + build/physical_operator/performance_monitor.gcda + build/physical_operator/performance_monitor.gcno + build/physical_operator/performance_monitor.o + build/physical_operator/physical_aggregation.gcda + build/physical_operator/physical_aggregation.gcno + build/physical_operator/physical_aggregation.o + build/physical_operator/physical_delete_filter.gcda + build/physical_operator/physical_delete_filter.gcno + build/physical_operator/physical_delete_filter.o + build/physical_operator/physical_filter.gcda + build/physical_operator/physical_filter.gcno + build/physical_operator/physical_filter.o + build/physical_operator/physical_hash_join.gcda + build/physical_operator/physical_hash_join.gcno + build/physical_operator/physical_hash_join.o + build/physical_operator/physical_limit.gcda + build/physical_operator/physical_limit.gcno + build/physical_operator/physical_limit.o + build/physical_operator/physical_nest_loop_join.gcda + build/physical_operator/physical_nest_loop_join.gcno + build/physical_operator/physical_nest_loop_join.o + build/physical_operator/physical_operator.gcda + build/physical_operator/physical_operator.gcno + build/physical_operator/physical_operator.o + build/physical_operator/physical_operator_base.gcda + build/physical_operator/physical_operator_base.gcno + build/physical_operator/physical_operator_base.o + build/physical_operator/physical_project.gcda + build/physical_operator/physical_project.gcno + build/physical_operator/physical_project.o + build/physical_operator/physical_projection_scan.gcda + build/physical_operator/physical_projection_scan.gcno + build/physical_operator/physical_projection_scan.o + build/physical_operator/physical_sort.gcda + build/physical_operator/physical_sort.gcno + build/physical_operator/physical_sort.o + build/physical_operator/result_collector.gcda + build/physical_operator/result_collector.gcno + build/physical_operator/result_collector.o + build/physical_operator/result_printer.gcda + build/physical_operator/result_printer.gcno + build/physical_operator/result_printer.o + build/physical_query_plan/BlockStreamAggregationIterator.o + build/physical_query_plan/BlockStreamCombinedIterator.o + build/physical_query_plan/BlockStreamExchangeLowerBase.o + build/physical_query_plan/BlockStreamExpander.o + build/physical_query_plan/BlockStreamInIterator.o + build/physical_query_plan/BlockStreamIteratorBase.o + build/physical_query_plan/BlockStreamJoinIterator.o + build/physical_query_plan/BlockStreamLimit.o + build/physical_query_plan/BlockStreamNestLoopJoinIterator.o + build/physical_query_plan/BlockStreamPerformanceMonitorTop.o + build/physical_query_plan/BlockStreamPrint.o + build/physical_query_plan/BlockStreamProjectIterator.o + build/physical_query_plan/BlockStreamResultCollector.o + build/physical_query_plan/BlockStreamSortIterator.o + build/physical_query_plan/ExpandableBlockStreamBuffer.o + build/physical_query_plan/ExpandableBlockStreamExchangeEpoll.o + build/physical_query_plan/ExpandableBlockStreamExchangeLowerEfficient.o + build/physical_query_plan/ExpandableBlockStreamExchangeLowerMaterialized.o + build/physical_query_plan/ExpandableBlockStreamFilter.o + build/physical_query_plan/ExpandableBlockStreamHdfsScan.o + build/physical_query_plan/ExpandableBlockStreamProjectionScan.o + build/physical_query_plan/ExpandableBlockStreamRandomDiskAccess.o + build/physical_query_plan/ExpandableBlockStreamRandomMemAccess.o + build/physical_query_plan/ExpandableBlockStreamSingleColumnScan.o + build/physical_query_plan/ExpandableBlockStreamSingleColumnScanDisk.o + build/physical_query_plan/libphysicalqueryplan.a + build/physical_query_plan/Makefile + build/physical_query_plan/physical_operator.o + build/Resource/BufferManager.gcda + build/Resource/BufferManager.gcno + build/Resource/BufferManager.o + build/Resource/CPUResource.gcda + build/Resource/CPUResource.gcno + build/Resource/CPUResource.o + build/Resource/libresouce.a + build/Resource/Makefile + build/Resource/NodeTracker.gcda + build/Resource/NodeTracker.gcno + build/Resource/NodeTracker.o + build/Resource/ResourceInfo.gcda + build/Resource/ResourceInfo.gcno + build/Resource/ResourceInfo.o + build/Resource/ResourceManagerMaster.gcda + build/Resource/ResourceManagerMaster.gcno + build/Resource/ResourceManagerMaster.o + build/Resource/ResourceManagerSlave.gcda + build/Resource/ResourceManagerSlave.gcno + build/Resource/ResourceManagerSlave.o + build/sql_parser/ast_node/ast_create_stmt.gcda + build/sql_parser/ast_node/ast_create_stmt.gcno + build/sql_parser/ast_node/ast_create_stmt.o + build/sql_parser/ast_node/ast_delete_stmt.gcda + build/sql_parser/ast_node/ast_delete_stmt.gcno + build/sql_parser/ast_node/ast_delete_stmt.o + build/sql_parser/ast_node/ast_drop_stmt.gcda + build/sql_parser/ast_node/ast_drop_stmt.gcno + build/sql_parser/ast_node/ast_drop_stmt.o + build/sql_parser/ast_node/ast_expr_node.gcda + build/sql_parser/ast_node/ast_expr_node.gcno + build/sql_parser/ast_node/ast_expr_node.o + build/sql_parser/ast_node/ast_insert_stmt.gcda + build/sql_parser/ast_node/ast_insert_stmt.gcno + build/sql_parser/ast_node/ast_insert_stmt.o + build/sql_parser/ast_node/ast_load_stmt.gcda + build/sql_parser/ast_node/ast_load_stmt.gcno + build/sql_parser/ast_node/ast_load_stmt.o + build/sql_parser/ast_node/ast_node.gcda + build/sql_parser/ast_node/ast_node.gcno + build/sql_parser/ast_node/ast_node.o + build/sql_parser/ast_node/ast_select_stmt.gcda + build/sql_parser/ast_node/ast_select_stmt.gcno + build/sql_parser/ast_node/ast_select_stmt.o + build/sql_parser/ast_node/ast_show_stmt.gcda + build/sql_parser/ast_node/ast_show_stmt.gcno + build/sql_parser/ast_node/ast_show_stmt.o + build/sql_parser/ast_node/libast_node.a + build/sql_parser/ast_node/Makefile + build/sql_parser/main/main.o + build/sql_parser/main/Makefile + build/sql_parser/main/parser + build/sql_parser/parser/lex.yy.gcda + build/sql_parser/parser/lex.yy.gcno + build/sql_parser/parser/lex.yy.o + build/sql_parser/parser/libparser.a + build/sql_parser/parser/Makefile + build/sql_parser/parser/parser.gcda + build/sql_parser/parser/parser.gcno + build/sql_parser/parser/parser.o + build/sql_parser/parser/sql.tab.gcda + build/sql_parser/parser/sql.tab.gcno + build/sql_parser/parser/sql.tab.o + build/sql_parser/Test/libparser_test.a + build/sql_parser/Test/Makefile + build/sql_parser/Test/test_new_sql.gcno + build/sql_parser/Test/test_new_sql.o + build/sql_parser/Makefile + build/stmt_handler/create_projection_exec.gcda + build/stmt_handler/create_projection_exec.gcno + build/stmt_handler/create_projection_exec.o + build/stmt_handler/create_table_exec.gcda + build/stmt_handler/create_table_exec.gcno + build/stmt_handler/create_table_exec.o + build/stmt_handler/delete_stmt_exec.gcda + build/stmt_handler/delete_stmt_exec.gcno + build/stmt_handler/delete_stmt_exec.o + build/stmt_handler/drop_table_exec.gcda + build/stmt_handler/drop_table_exec.gcno + build/stmt_handler/drop_table_exec.o + build/stmt_handler/insert_exec.gcda + build/stmt_handler/insert_exec.gcno + build/stmt_handler/insert_exec.o + build/stmt_handler/libstmthandler.a + build/stmt_handler/load_exec.gcda + build/stmt_handler/load_exec.gcno + build/stmt_handler/load_exec.o + build/stmt_handler/Makefile + build/stmt_handler/select_exec.gcda + build/stmt_handler/select_exec.gcno + build/stmt_handler/select_exec.o + build/stmt_handler/show_exec.gcda + build/stmt_handler/show_exec.gcno + build/stmt_handler/show_exec.o + build/stmt_handler/stmt_exec.gcda + build/stmt_handler/stmt_exec.gcno + build/stmt_handler/stmt_exec.o + build/stmt_handler/stmt_handler.gcda + build/stmt_handler/stmt_handler.gcno + build/stmt_handler/stmt_handler.o + build/storage/AllBlockInfo.gcda + build/storage/AllBlockInfo.gcno + build/storage/AllBlockInfo.o + build/storage/BlanceMatcher.gcda + build/storage/BlanceMatcher.gcno + build/storage/BlanceMatcher.o + build/storage/BlockManager.gcda + build/storage/BlockManager.gcno + build/storage/BlockManager.o + build/storage/BlockManagerId.gcda + build/storage/BlockManagerId.gcno + build/storage/BlockManagerId.o + build/storage/BlockManagerMaster.gcda + build/storage/BlockManagerMaster.gcno + build/storage/BlockManagerMaster.o + build/storage/BlockMessage.gcno + build/storage/BlockMessage.o + build/storage/BlockStore.gcno + build/storage/BlockStore.o + build/storage/ChunkStorage.gcda + build/storage/ChunkStorage.gcno + build/storage/ChunkStorage.o + build/storage/DiskStore.gcda + build/storage/DiskStore.gcno + build/storage/DiskStore.o + build/storage/libstorage.a + build/storage/Makefile + build/storage/MemoryStore.gcda + build/storage/MemoryStore.gcno + build/storage/MemoryStore.o + build/storage/PartitionReaderIterator.gcno + build/storage/PartitionReaderIterator.o + build/storage/PartitionStorage.gcda + build/storage/PartitionStorage.gcno + build/storage/PartitionStorage.o + build/Test/common/libcommon.a + build/Test/common/Makefile + build/Test/TestSuit/libtestsuit.a + build/Test/TestSuit/Makefile + build/Test/utility/libutility.a + build/Test/utility/Makefile + build/Test/gtest_main.gcno + build/Test/gtest_main.o + build/Test/libtest.a + build/Test/Makefile + build/utility/command_line.gcno + build/utility/command_line.o + build/utility/data_distribution_generator.gcno + build/utility/data_distribution_generator.o + build/utility/ExpandabilityShrinkability.gcda + build/utility/ExpandabilityShrinkability.gcno + build/utility/ExpandabilityShrinkability.o + build/utility/libutility.a + build/utility/Makefile + build/utility/string_process.gcda + build/utility/string_process.gcno + build/utility/string_process.o + build/utility/ThreadPool.gcda + build/utility/ThreadPool.gcno + build/utility/ThreadPool.o + build/utility/ThreadSafe.gcda + build/utility/ThreadSafe.gcno + build/utility/ThreadSafe.o + build/catalogData.dat + build/claimsserver + build/client + build/Client.gcno + build/Client.o + build/Config.gcda + build/Config.gcno + build/config.h + build/config.log + build/Config.o + build/config.status + build/Environment.gcda + build/Environment.gcno + build/Environment.o + build/gtest_main.gcda + build/gtest_main.gcno + build/gtest_main.o + build/IDsGenerator.gcda + build/IDsGenerator.gcno + build/IDsGenerator.o + build/libtool + build/Makefile + build/Server.gcno + build/Server.o + build/stamp-h1 + build/test + build/test_for_DI_GetTuple + catalog/stat/Analyzer.cpp + catalog/stat/Analyzer.h + catalog/stat/AttributeStatistics.cpp + catalog/stat/AttributeStatistics.h + catalog/stat/Estimation.cpp + catalog/stat/Estimation.h + catalog/stat/Makefile.am + catalog/stat/Makefile.in + catalog/stat/Statistic.cpp + catalog/stat/Statistic.h + catalog/stat/StatManager.cpp + catalog/stat/StatManager.h + catalog/stat/TableStatistic.cpp + catalog/stat/TableStatistic.h + catalog/Test/Makefile.am + catalog/Test/Makefile.in + catalog/Test/Partitioner_test.cpp + catalog/Test/statistic_manager_test.cpp + catalog/attribute.h + catalog/catalog.cpp + catalog/catalog.h + catalog/column.cpp + catalog/column.h + catalog/Makefile.am + catalog/Makefile.in + catalog/partitioner.cpp + catalog/partitioner.h + catalog/projection.cpp + catalog/projection.h + catalog/projection_binding.cpp + catalog/projection_binding.h + catalog/table.cpp + catalog/table.h + catalog/unordered_map.hpp + Client/json/json-forwards.h + Client/json/json.h + Client/json/Makefile.am + Client/json/Makefile.in + Client/Test/Makefile.am + Client/Test/Makefile.in + Client/Test/TestSeverClient.cpp + Client/ClaimsServer.cpp + Client/ClaimsServer.h + Client/Client.cpp + Client/Client.h + Client/ClientResponse.cpp + Client/ClientResponse.h + Client/jsoncpp.cpp + Client/Makefile.am + Client/Makefile.in + codegen/codegen_test.h + codegen/CodeGenerator.cpp + codegen/CodeGenerator.h + codegen/CompareFunctonGenerator.cpp + codegen/CompareFunctonGenerator.h + codegen/ExpressionGenerator.cpp + codegen/ExpressionGenerator.h + codegen/generating_expr.cpp + codegen/main.cpp + codegen/Makefile.am + codegen/Makefile.in + common/Block/Block.cpp + common/Block/Block.h + common/Block/BlockContainer.cpp + common/Block/BlockContainer.h + common/Block/BlockStream.cpp + common/Block/BlockStream.h + common/Block/BlockStreamBuffer.cpp + common/Block/BlockStreamBuffer.h + common/Block/DynamicBlockBuffer.cpp + common/Block/DynamicBlockBuffer.h + common/Block/Makefile.am + common/Block/Makefile.in + common/Block/MonitorableBuffer.cpp + common/Block/MonitorableBuffer.h + common/Block/PartitionedBlockBuffer.cpp + common/Block/PartitionedBlockBuffer.h + common/Block/PartitionedBlockContainer.cpp + common/Block/PartitionedBlockContainer.h + common/Block/ResultSet.cpp + common/Block/ResultSet.h + common/Expression/execfunc.cpp + common/Expression/execfunc.h + common/Expression/expression_test.cpp + common/Expression/expression_test.h + common/Expression/initquery.cpp + common/Expression/initquery.h + common/Expression/Makefile.am + common/Expression/Makefile.in + common/Expression/qnode.cpp + common/Expression/qnode.h + common/Expression/queryfunc.cpp + common/Expression/queryfunc.h + common/Expression/sql_node_struct.h + common/expression/data_type_oper.cpp + common/expression/data_type_oper.h + common/expression/expr_binary.cpp + common/expression/expr_binary.h + common/expression/expr_case_when.cpp + common/expression/expr_case_when.h + common/expression/expr_column.cpp + common/expression/expr_column.h + common/expression/expr_const.cpp + common/expression/expr_const.h + common/expression/expr_date.cpp + common/expression/expr_date.h + common/expression/expr_in.cpp + common/expression/expr_in.h + common/expression/expr_node.cpp + common/expression/expr_node.h + common/expression/expr_ternary.cpp + common/expression/expr_ternary.h + common/expression/expr_type_cast.cpp + common/expression/expr_type_cast.h + common/expression/expr_unary.cpp + common/expression/expr_unary.h + common/expression/Makefile.am + common/expression/Makefile.in + common/expression/type_conversion_matrix.cpp + common/expression/type_conversion_matrix.h + common/file_handle/test/disk_file_handle_imp_test.h + common/file_handle/test/hdfs_file_handle_imp_test.h + common/file_handle/test/Makefile.am + common/file_handle/test/Makefile.in + common/file_handle/disk_file_handle_imp.cpp + common/file_handle/disk_file_handle_imp.h + common/file_handle/file_handle_imp.cpp + common/file_handle/file_handle_imp.h + common/file_handle/file_handle_imp_factory.h + common/file_handle/hdfs_file_handle_imp.cpp + common/file_handle/hdfs_file_handle_imp.h + common/file_handle/Makefile.am + common/file_handle/Makefile.in + common/log/logging.cpp + common/log/logging.h + common/log/Makefile.am + common/log/Makefile.in + common/Schema/Test/generateVariableData_test.cpp + common/Schema/Test/Makefile.am + common/Schema/Test/Makefile.in + common/Schema/Test/SubTuple_test.cpp + common/Schema/Test/test_suits.cpp + common/Schema/Test/VariableSchema_test.cpp + common/Schema/Makefile.am + common/Schema/Makefile.in + common/Schema/Schema.cpp + common/Schema/Schema.h + common/Schema/SchemaFix.cpp + common/Schema/SchemaFix.h + common/Schema/SchemaVar.cpp + common/Schema/SchemaVar.h + common/Schema/TupleConvertor.cpp + common/Schema/TupleConvertor.h + common/serialization/Makefile.am + common/serialization/Makefile.in + common/serialization/RegisterDerivedClass.cpp + common/serialization/RegisterDerivedClass.h + common/test/operate_test.h + common/types/Test/data_type_test.h + common/types/Test/Makefile.am + common/types/Test/Makefile.in + common/types/ttmath/Makefile.am + common/types/ttmath/Makefile.in + common/types/ttmath/ttmathint.h + common/types/ttmath/ttmathmisc.h + common/types/ttmath/ttmathtypes.h + common/types/ttmath/ttmathuint.h + common/types/ttmath/ttmathuint_noasm.h + common/types/ttmath/ttmathuint_x86.h + common/types/ttmath/ttmathuint_x86_64.h + common/types/ExportSerializeIo.h + common/types/Makefile.am + common/types/Makefile.in + common/types/NValue.cpp + common/types/NValue.hpp + common/types/value_defs.h + common/AttributeComparator.cpp + common/AttributeComparator.h + common/Comparator.cpp + common/Comparator.h + common/data_type.cpp + common/data_type.h + common/error_define.h + common/error_no.cpp + common/error_no.h + common/ExpandedThreadTracker.cpp + common/ExpandedThreadTracker.h + common/hash.cpp + common/hash.h + common/hashtable.cpp + common/hashtable.h + common/ids.cpp + common/ids.h + common/InsertOptimizedHashTable.cpp + common/InsertOptimizedHashTable.h + common/Logging.cpp + common/Logging.h + common/Makefile.am + common/Makefile.in + common/Mapping.cpp + common/Mapping.h + common/memory_handle.h + common/Message.cpp + common/Message.h + common/partition_functions.h + common/rename.h + common/TimeOutReceiver.cpp + common/TimeOutReceiver.h + common/TypeCast.cpp + common/TypeCast.h + common/TypePromotionMap.cpp + common/TypePromotionMap.h + conf/config + conf/config.template + conf/imdb.conf.template + Daemon/Test/Makefile.in + Daemon/Daemon.cpp + Daemon/Daemon.h + Daemon/Executing.cpp + Daemon/Executing.h + Daemon/Makefile.am + Daemon/Makefile.in + Executor/Test/Executor.cpp + Executor/Test/Makefile.am + Executor/Test/Makefile.in + Executor/AdaptiveEndPoint.cpp + Executor/AdaptiveEndPoint.h + Executor/Coordinator.cpp + Executor/Coordinator.h + Executor/exchange_tracker.cpp + Executor/exchange_tracker.h + Executor/expander_tracker.cpp + Executor/expander_tracker.h + Executor/IteratorExecutorMaster.cpp + Executor/IteratorExecutorMaster.h + Executor/IteratorExecutorSlave.cpp + Executor/IteratorExecutorSlave.h + Executor/Makefile.am + Executor/Makefile.in + Executor/PortManager.cpp + Executor/PortManager.h + IndexManager/Test/Makefile.am + IndexManager/Test/Makefile.in + IndexManager/Test/test_csb_classes.cpp + IndexManager/Test/test_CSB_index_building.cpp + IndexManager/Test/test_csb_indexing.cpp + IndexManager/Test/test_index_manager.cpp + IndexManager/Test/test_index_scan_iterator.cpp + IndexManager/Test/test_IndexManager_serialize.cpp + IndexManager/Test/test_logical_csb_index_building.cpp + IndexManager/CSBIndexBuilding.cpp + IndexManager/CSBIndexBuilding.h + IndexManager/CSBPlusTree.cpp + IndexManager/CSBPlusTree.h + IndexManager/IndexManager.cpp + IndexManager/IndexManager.h + IndexManager/IndexScanIterator.cpp + IndexManager/IndexScanIterator.h + IndexManager/LogicalCSBIndexBuilding.cpp + IndexManager/LogicalCSBIndexBuilding.h + IndexManager/LogicalIndexFilter.cpp + IndexManager/LogicalIndexFilter.h + IndexManager/LogicalIndexScan.cpp + IndexManager/LogicalIndexScan.h + IndexManager/Makefile.am + IndexManager/Makefile.in + loader/test/data_injector_test.cpp + loader/test/data_injector_test.h + loader/test/Makefile.am + loader/test/Makefile.in + loader/test/single_file_connector_test.cpp + loader/test/single_file_connector_test.h + loader/test/table_file_connector_test.cpp + loader/test/table_file_connector_test.h + loader/test/test_load.cpp + loader/data_injector.cpp + loader/data_injector.h + loader/file_connector.h + loader/Makefile.am + loader/Makefile.in + loader/single_file_connector.cpp + loader/single_file_connector.h + loader/table_file_connector.cpp + loader/table_file_connector.h + loader/validity.cpp + loader/validity.h + logical_operator/logical_aggregation.cpp + logical_operator/logical_aggregation.h + logical_operator/logical_cross_join.cpp + logical_operator/logical_cross_join.h + logical_operator/logical_delete_filter.cpp + logical_operator/logical_delete_filter.h + logical_operator/logical_equal_join.cpp + logical_operator/logical_equal_join.h + logical_operator/logical_filter.cpp + logical_operator/logical_filter.h + logical_operator/logical_limit.cpp + logical_operator/logical_limit.h + logical_operator/logical_operator.cpp + logical_operator/logical_operator.h + logical_operator/logical_project.cpp + logical_operator/logical_project.h + logical_operator/logical_query_plan_root.cpp + logical_operator/logical_query_plan_root.h + logical_operator/logical_scan.cpp + logical_operator/logical_scan.h + logical_operator/logical_sort.cpp + logical_operator/logical_sort.h + logical_operator/logical_subquery.cpp + logical_operator/logical_subquery.h + logical_operator/Makefile.am + logical_operator/Makefile.in + logical_operator/plan_context.cpp + logical_operator/plan_context.h + logical_operator/plan_partition_info.cpp + logical_operator/plan_partition_info.h + logical_operator/plan_partitioner.cpp + logical_operator/plan_partitioner.h + logical_operator/Requirement.cpp + logical_operator/Requirement.h + Parsetree/Test/Makefile.in + Parsetree/Makefile.in + physical_operator/test/delete_filter_test.cpp + physical_operator/test/delete_filter_test.h + physical_operator/test/fake_table_scan.cpp + physical_operator/test/fake_table_scan.h + physical_operator/combine_tuple.cpp + physical_operator/combine_tuple.h + physical_operator/exchange_merger.cpp + physical_operator/exchange_merger.h + physical_operator/exchange_sender.cpp + physical_operator/exchange_sender.h + physical_operator/exchange_sender_materialized.cpp + physical_operator/exchange_sender_materialized.h + physical_operator/exchange_sender_pipeline.cpp + physical_operator/exchange_sender_pipeline.h + physical_operator/expander.cpp + physical_operator/expander.h + physical_operator/in_operator.cpp + physical_operator/in_operator.h + physical_operator/Makefile.am + physical_operator/Makefile.in + physical_operator/performance_monitor.cpp + physical_operator/performance_monitor.h + physical_operator/physical_aggregation.cpp + physical_operator/physical_aggregation.h + physical_operator/physical_delete_filter.cpp + physical_operator/physical_delete_filter.h + physical_operator/physical_filter.cpp + physical_operator/physical_filter.h + physical_operator/physical_hash_join.cpp + physical_operator/physical_hash_join.h + physical_operator/physical_limit.cpp + physical_operator/physical_limit.h + physical_operator/physical_nest_loop_join.cpp + physical_operator/physical_nest_loop_join.h + physical_operator/physical_operator.cpp + physical_operator/physical_operator.h + physical_operator/physical_operator_base.cpp + physical_operator/physical_operator_base.h + physical_operator/physical_project.cpp + physical_operator/physical_project.h + physical_operator/physical_projection_scan.cpp + physical_operator/physical_projection_scan.h + physical_operator/physical_sort.cpp + physical_operator/physical_sort.h + physical_operator/result_collector.cpp + physical_operator/result_collector.h + physical_operator/result_printer.cpp + physical_operator/result_printer.h + Resource/BufferManager.cpp + Resource/BufferManager.h + Resource/CPUResource.cpp + Resource/CPUResource.h + Resource/Makefile.am + Resource/Makefile.in + Resource/NodeTracker.cpp + Resource/NodeTracker.h + Resource/ResourceInfo.cpp + Resource/ResourceInfo.h + Resource/ResourceManagerMaster.cpp + Resource/ResourceManagerMaster.h + Resource/ResourceManagerSlave.cpp + Resource/ResourceManagerSlave.h + Resource/ThreadManager.h + sbin/2-claims-conf/cluster-deploy.config + sbin/2-claims-conf/generate-config.sh + sbin/2-claims-conf/load-config.sh + sbin/claims-test/testcase/ddl_tpch_sf100_1partition.test + sbin/claims-test/testcase/ddl_tpch_sf100_4partition.test + sbin/claims-test/testcase/ddl_tpch_sf100_8partition.test + sbin/claims-test/testcase/ddl_tpch_sf10_1partition.test + sbin/claims-test/testcase/ddl_tpch_sf10_4partition.test + sbin/claims-test/testcase/ddl_tpch_sf10_8partition.test + sbin/claims-test/testcase/ddl_tpch_sf1_1partition.test + sbin/claims-test/testcase/ddl_tpch_sf1_4partition.test + sbin/claims-test/testcase/ddl_tpch_sf1_8partition.test + sbin/claims-test/testcase/decimal.test + sbin/claims-test/testcase/load.test + sbin/claims-test/testcase/load_tpch_sf1_1p.test + sbin/claims-test/testresult/ddl_tpch_sf10_1partition-1.result + sbin/claims-test/testresult/ddl_tpch_sf10_4partition-1.result + sbin/claims-test/testresult/ddl_tpch_sf10_8partition-1.result + sbin/claims-test/testresult/ddl_tpch_sf1_1partition-1.result + sbin/claims-test/testresult/ddl_tpch_sf1_4partition-1.result + sbin/claims-test/testresult/ddl_tpch_sf1_8partition-1.result + sbin/claims-test/claimstest.sh + sbin/claims-test/monitor-gtest.sh + sbin/claims-test/monitor-test.sh + sbin/slave-scripts/start-slave.sh + sbin/slave-scripts/stop-slave.sh + sbin/1-compile.sh + sbin/3-deploy.sh + sbin/4-stop-all.sh + sbin/5-start-all.sh + sql_parser/ast_node/ast_create_stmt.cpp + sql_parser/ast_node/ast_create_stmt.h + sql_parser/ast_node/ast_delete_stmt.cpp + sql_parser/ast_node/ast_delete_stmt.h + sql_parser/ast_node/ast_drop_stmt.cpp + sql_parser/ast_node/ast_drop_stmt.h + sql_parser/ast_node/ast_expr_node.cpp + sql_parser/ast_node/ast_expr_node.h + sql_parser/ast_node/ast_insert_stmt.cpp + sql_parser/ast_node/ast_insert_stmt.h + sql_parser/ast_node/ast_load_stmt.cpp + sql_parser/ast_node/ast_load_stmt.h + sql_parser/ast_node/ast_node.cpp + sql_parser/ast_node/ast_node.h + sql_parser/ast_node/ast_select_stmt.cpp + sql_parser/ast_node/ast_select_stmt.h + sql_parser/ast_node/ast_show_stmt.cpp + sql_parser/ast_node/ast_show_stmt.h + sql_parser/ast_node/Makefile.am + sql_parser/ast_node/Makefile.in + sql_parser/parser/lex.yy.cpp + sql_parser/parser/Makefile.am + sql_parser/parser/Makefile.in + sql_parser/parser/parser.cpp + sql_parser/parser/parser.h + sql_parser/parser/sql.l + sql_parser/parser/sql.lex.h + sql_parser/parser/sql.tab.cpp + sql_parser/parser/sql.tab.hpp + sql_parser/parser/sql.ypp + sql_parser/Test/Makefile.am + sql_parser/Test/Makefile.in + sql_parser/Test/test_new_sql.cpp + sql_parser/Makefile.am + sql_parser/Makefile.in + stmt_handler/create_projection_exec.cpp + stmt_handler/create_projection_exec.h + stmt_handler/create_table_exec.cpp + stmt_handler/create_table_exec.h + stmt_handler/delete_stmt_exec.cpp + stmt_handler/delete_stmt_exec.h + stmt_handler/drop_table_exec.cpp + stmt_handler/drop_table_exec.h + stmt_handler/insert_exec.cpp + stmt_handler/insert_exec.h + stmt_handler/load_exec.cpp + stmt_handler/load_exec.h + stmt_handler/Makefile.am + stmt_handler/Makefile.in + stmt_handler/select_exec.cpp + stmt_handler/select_exec.h + stmt_handler/show_exec.cpp + stmt_handler/show_exec.h + stmt_handler/stmt_exec.cpp + stmt_handler/stmt_exec.h + stmt_handler/stmt_handler.cpp + stmt_handler/stmt_handler.h + storage/AllBlockInfo.cpp + storage/AllBlockInfo.h + storage/BlanceMatcher.cpp + storage/BlanceMatcher.h + storage/BlockManager.cpp + storage/BlockManager.h + storage/BlockManagerId.cpp + storage/BlockManagerId.h + storage/BlockManagerMaster.cpp + storage/BlockManagerMaster.h + storage/BlockMessage.cpp + storage/BlockMessage.h + storage/BlockStore.cpp + storage/BlockStore.h + storage/ChunkStorage.cpp + storage/ChunkStorage.h + storage/DiskStore.cpp + storage/DiskStore.h + storage/Makefile.am + storage/Makefile.in + storage/MemoryStore.cpp + storage/MemoryStore.h + storage/PartitionReaderIterator.cpp + storage/PartitionReaderIterator.h + storage/PartitionStorage.cpp + storage/PartitionStorage.h + storage/StorageLevel.h + Test/common/errno_test.h + Test/common/insert_optimized_hash_table_test.h + Test/common/issue27.cpp + Test/common/issue27_sort.cpp + Test/common/issue27ing.cpp + Test/common/Makefile.am + Test/common/Makefile.in + Test/common/TestNew.cpp + Test/common/TestTPC_schame.cpp + Test/iterator/elastic_iterator_model_test.h + Test/Parsetree/sql_parser_test.h + Test/Resource/CPUResourceManagerTest.h + Test/TestSuit/filter_test.h + Test/TestSuit/hash_table_test.h + Test/TestSuit/in_segment_scalability_test.cpp + Test/TestSuit/Makefile.am + Test/TestSuit/Makefile.in + Test/TestSuit/sql_query_test.h + Test/TestSuit/tpc_h_test.cpp + Test/utility/generator_test.h + Test/utility/Makefile.am + Test/utility/Makefile.in + Test/block_stream_iterator_serilaization_test.cpp + Test/block_stream_test.cpp + Test/Environment_test.cpp + Test/gtest.h + Test/gtest_main.cpp + Test/Makefile.am + Test/Makefile.in + Test/memory_leak_test.h + Test/MultiToMulti.cpp + Test/set_up_environment.h + Test/test_adaptiveendpoint.cpp + Test/test_coordinator.cpp + Test/test_scdong.cpp + Test/TestMain.cpp + utility/atomics.h + utility/command_line.cpp + utility/command_line.h + utility/CpuScheduler.h + utility/data_distribution_generator.cpp + utility/data_distribution_generator.h + utility/ExpandabilityShrinkability.cpp + utility/ExpandabilityShrinkability.h + utility/generator.h + utility/lock.h + utility/lock_guard.h + utility/Makefile.am + utility/Makefile.in + utility/maths.h + utility/print_tool.h + utility/rdtsc.h + utility/string_process.cpp + utility/string_process.h + utility/test_tool.h + utility/ThreadPool.cpp + utility/ThreadPool.h + utility/ThreadSafe.cpp + utility/ThreadSafe.h + utility/Timer.h + utility/warmup.h + aclocal.m4 + ar-lib + Authors + build.sh + CLAIMS.doxyfile + Client.cpp + Config.cpp + config.guess + Config.h + config.h.in + config.sub + configure + configure.ac + configure.h + Debug.h + depcomp + Environment.cpp + Environment.h + IDsGenerator.cpp + IDsGenerator.h + install-sh + ltmain.sh + Makefile.am + Makefile.in + missing + README + README.md + Server.cpp + startup.cpp + startup.h) + +add_executable(CLAIMS ${SOURCE_FILES}) \ No newline at end of file diff --git a/Client.cpp b/Client.cpp index a48231a6a..215c91aa3 100644 --- a/Client.cpp +++ b/Client.cpp @@ -51,7 +51,10 @@ void submit_command(Client& client, std::string& command) { printf("%s", message.c_str()); break; case Client::error: - printf("%s", message.c_str()); + printf( + "\e[0;31m" + "%s\033[0m\n", + message.c_str()); break; default: assert(false); @@ -73,7 +76,10 @@ void submit_command_repeated(Client& client, std::string& command, printf("%s", message.c_str()); break; case Client::error: - printf("%s", message.c_str()); + printf( + "\e[0;31m" + "%s\033[0m\n", + message.c_str()); break; default: assert(false); @@ -81,7 +87,34 @@ void submit_command_repeated(Client& client, std::string& command, } } } +void PrintUsage() { + cout << "Welcome to use CLAIMS. " << endl; + cout << "Type: " << endl; + cout << "\t help; for usage of CLAIMS." << endl; + cout << "\t copyright; for distribution terms." << endl; + cout << "\t exit; or shutdown; for exiting CLAIMS." << endl; +} +void PrintCopyright() { + cout << "Copyright [2012-2015] DaSE@ECNU " << endl + << " Licensed to the Apache Software Foundation (ASF) under one or more" + << " contributor license agreements. See the NOTICE file distributed " + "with" + << " this work for additional information regarding copyright ownership." + << " The ASF licenses this file to You under the Apache License, " + "Version 2.0" + << " (the \" License\"); you may not use this file except in compliance " + "with" + << " the License. You may obtain a copy of the License at" << endl + << " http://www.apache.org/licenses/LICENSE-2.0" << endl + << " Unless required by applicable law or agreed to in writing, software" + << " distributed under the License is distributed on an \" AS IS \" " + "BASIS," + << " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or " + "implied." + << " See the License for the specific language governing permissions and" + << " limitations under the License." << endl; +} int main(int argc, char** argv) { /* Client */ @@ -98,6 +131,7 @@ int main(int argc, char** argv) { claims::common::Logging claims_logging(argv[0]); print_welcome(); + PrintUsage(); Client client; client.connection(argv[1], atoi(argv[2])); @@ -115,6 +149,12 @@ int main(int argc, char** argv) { break; } else if (command.empty()) { continue; + } else if (command == "help;") { + PrintUsage(); + continue; + } else if (command == "copyright;") { + PrintCopyright(); + continue; } submit_command(client, command); diff --git a/Client/ClaimsServer.cpp b/Client/ClaimsServer.cpp index e0f831f1f..0d9fdee9e 100644 --- a/Client/ClaimsServer.cpp +++ b/Client/ClaimsServer.cpp @@ -71,6 +71,7 @@ int ClientListener::receiveRequest(const int fd, const char *cmd) { remote_command rcmd; rcmd.socket_fd = fd; // rcmd.cmd.append(cmd); + cout << cmd << endl; rcmd.cmd = std::string(cmd); Daemon::getInstance()->addRemoteCommand(rcmd); @@ -266,11 +267,10 @@ void *ClientListener::receiveHandler(void *para) { server->removeClient(server->m_clientFds[i]); continue; } - - // cout<<"nread:"<m_clientFds[i], buf, nread); - buf[read_count] = '\0'; // fix a bug + buf[read_count] = '\0'; // fix a bug + int sql_type = buf[0] - 48; // '1' - 48 = 1 ClientLogging::log("sql_type is %d", sql_type); if (sql_type <= 9 && sql_type >= 0) { @@ -763,16 +763,20 @@ void *ClientListener::sendHandler(void *para) { } else if (client_type::c == server->client_type_) { // query return true cliRes.setOk("Yes Ok"); - ClientListenerLogging::log( - "to send data response-- status:%d length:%d content:%s fd:%d", - cliRes.status, cliRes.length, cliRes.content.c_str(), result.fd_); + // ClientListenerLogging::log( + // "to send data response-- status:%d length:%d + // content:%s fd:%d", + // cliRes.status, cliRes.length, cliRes.content.c_str(), + // result.fd_); server->write(result.fd_, cliRes); printf("send ok response packet ok\n"); cliRes.setSchema(result.result_->schema_); - ClientListenerLogging::log( - "to send data response-- status:%d length:%d content:%s fd:%d", - cliRes.status, cliRes.length, cliRes.content.c_str(), result.fd_); + // ClientListenerLogging::log( + // "to send data response-- status:%d length:%d + // content:%s fd:%d", + // cliRes.status, cliRes.length, cliRes.content.c_str(), + // result.fd_); server->write(result.fd_, cliRes); printf("send schema response packet ok\n"); @@ -782,9 +786,11 @@ void *ClientListener::sendHandler(void *para) { header.add_header(list[i]); } cliRes.setAttributeName(header); - ClientListenerLogging::log( - "to send data response-- status:%d length:%d content:%s fd:%d", - cliRes.status, cliRes.length, cliRes.content.c_str(), result.fd_); + // ClientListenerLogging::log( + // "to send data response-- status:%d length:%d + // content:%s fd:%d", + // cliRes.status, cliRes.length, cliRes.content.c_str(), + // result.fd_); server->write(result.fd_, cliRes); printf("send head response packet ok\n"); @@ -797,11 +803,13 @@ void *ClientListener::sendHandler(void *para) { while (block = (BlockStreamBase *)it.atomicNextBlock()) { block->serialize(serialzed_block); cliRes.setDataBlock(serialzed_block); - ClientListenerLogging::log( - "to send data response-- status:%d length:%d content:%s " - "fd:%d", - cliRes.status, cliRes.length, cliRes.content.c_str(), - result.fd_); + // ClientListenerLogging::log( + // "to send data response-- status:%d length:%d + // content:%s " + // "fd:%d", + // cliRes.status, cliRes.length, + // cliRes.content.c_str(), + // result.fd_); server->write(result.fd_, cliRes); } printf("send data packet ok\n"); @@ -812,6 +820,11 @@ void *ClientListener::sendHandler(void *para) { cliRes.status, cliRes.length, cliRes.content.c_str()); server->write(result.fd_, cliRes); printf("send end response packet ok\n"); + if (NULL != result.result_) { + delete result.result_; + result.result_ = NULL; + // cout << "delete result in memory" << endl; + } } } } else { @@ -828,6 +841,11 @@ void *ClientListener::sendHandler(void *para) { result.warning_.c_str()); server->write(result.fd_, cliRes); printf("send error packet ok\n"); + if (NULL != result.result_) { + delete result.result_; + result.result_ = NULL; + cout << "delete result in memory" << endl; + } } } return NULL; @@ -1035,8 +1053,9 @@ int ClientListener::write(const int fd, const ClientResponse &res) const { // ret = ::write(fd, buffer, length); ret = send(fd, buffer, length, MSG_WAITALL); - LOG(INFO) << "Server: " << res.status << " bytes:" << ret << "\t" - << res.length << "\t" << res.content.c_str() << " is send!" << endl; + // LOG(INFO) << "Server: " << res.status << " bytes:" << ret << "\t" + // << res.length << "\t" << res.content.c_str() << " is send!" << + // endl; // ret = ::write(fd,buffer,length); // if (ret < 0) { // ClientLogging::elog("when send to fd %d, send buffer failed.%s", diff --git a/Client/ClientResponse.h b/Client/ClientResponse.h index 4d692582b..446b2bdcc 100644 --- a/Client/ClientResponse.h +++ b/Client/ClientResponse.h @@ -196,12 +196,14 @@ struct ClientResponse { void* content_start = buffer + sizeof(int) + sizeof(int); memcpy(content_start, content.data(), content.length()); - ClientLogging::log("buffer to send is : %d-%d-%d-%d-%d-%d-%d-%d| %d=%d", - (unsigned int)buffer[0], (unsigned int)buffer[1], - (unsigned int)buffer[2], (unsigned int)buffer[3], - (unsigned int)buffer[4], (unsigned int)buffer[5], - (unsigned int)buffer[6], (unsigned int)buffer[7], status, - length); + // ClientLogging::log("buffer to send is : %d-%d-%d-%d-%d-%d-%d-%d| + // %d=%d", + // (unsigned int)buffer[0], (unsigned int)buffer[1], + // (unsigned int)buffer[2], (unsigned int)buffer[3], + // (unsigned int)buffer[4], (unsigned int)buffer[5], + // (unsigned int)buffer[6], (unsigned int)buffer[7], + // status, + // length); return ret; } diff --git a/Client/Test/Makefile.am b/Client/Test/Makefile.am index 60043081b..0841760e1 100644 --- a/Client/Test/Makefile.am +++ b/Client/Test/Makefile.am @@ -1,11 +1,9 @@ - AM_CPPFLAGS= -fPIC -fpermissive \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ --I${JAVA_HOME}/include/linux\ --I${THERON_HOME}/Include +-I${JAVA_HOME}/include/linux #应该加什么编译选项 AM_LDFLAGS=-lc -lm -lrt lboost_serialization -lxs @@ -17,10 +15,9 @@ endif LDADD = ../../catalog/libcatalog.a \ ../../common/libcommon.a \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a \ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.so \ - ${THERON_HOME}/Lib/libtherond.a \ + ${HADOOP_HOME}/lib/native/libhdfs.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.so \ ${BOOST_HOME}/stage/lib/libboost_serialization.a diff --git a/Config.cpp b/Config.cpp index 9d862da8a..341aa300e 100644 --- a/Config.cpp +++ b/Config.cpp @@ -20,7 +20,7 @@ using namespace std; string gete() { char *p = getenv("CLAIMS_HOME"); stringstream sp; - sp << string(p).c_str() << "/conf/config"; + sp << string(p).c_str() << "conf/config"; return sp.str(); // return "/home/imdb/config/wangli/config"; } @@ -89,6 +89,7 @@ std::string Config::catalog_file; int Config::thread_pool_init_thread_num; int Config::load_thread_num; +int Config::memory_utilization; Config *Config::getInstance() { if (instance_ == 0) { @@ -151,6 +152,8 @@ void Config::initialize() { load_thread_num = getInt("load_thread_num", sysconf(_SC_NPROCESSORS_CONF)); + memory_utilization = getInt("memory_utilization", 100); + #ifdef DEBUG_Config print_configure(); #endif diff --git a/Config.h b/Config.h index 460ff7139..34defbff1 100644 --- a/Config.h +++ b/Config.h @@ -77,6 +77,7 @@ class Config { static bool enable_codegen; static std::string catalog_file; static int thread_pool_init_thread_num; + static int memory_utilization; static int load_thread_num; diff --git a/Daemon/Daemon.h b/Daemon/Daemon.h index ec78029f3..d5257dee0 100755 --- a/Daemon/Daemon.h +++ b/Daemon/Daemon.h @@ -21,6 +21,8 @@ using claims::utility::LockGuard; struct remote_command { + remote_command() {} + remote_command(std::string s, int fd) : cmd(s), socket_fd(fd) {} std::string cmd; int socket_fd; }; diff --git a/Daemon/Makefile.am b/Daemon/Makefile.am index a303bef4c..329dee8b0 100644 --- a/Daemon/Makefile.am +++ b/Daemon/Makefile.am @@ -1,8 +1,7 @@ AM_CPPFLAGS= -fPIC -fpermissive -DTHERON_XS\ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${THERON_HOME}/Include \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux @@ -16,11 +15,10 @@ endif LDADD = ../logical_operator/liblogicalqueryplan.a \ ../common/Block/libblock.a \ ../utility/libutility.a \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a\ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ - ${BOOST_HOME}/stage/lib/libboost_serialization.so \ - ${THERON_HOME}/Lib/libtherond.a + ${BOOST_HOME}/stage/lib/libboost_serialization.so noinst_LIBRARIES=libdaemon.a libdaemon_a_SOURCES = Daemon.cpp Daemon.h \ diff --git a/Debug.h b/Debug.h index e6f00e0cc..da79c7417 100755 --- a/Debug.h +++ b/Debug.h @@ -10,7 +10,6 @@ #define COOR "/home/claims/config/zhanglei/coor.conf" - /* CONFIG is now specified in Config.cpp //#define CONFIG "/home/claims/config/wangli/config" @@ -23,9 +22,9 @@ /* * block in hdfs and sublock in block * */ -#define BLOCK_SIZE 64*1024 -#define SUCHUNK_SIZE 64*1024 -#define CHUNK_SIZE 64*1024*1024 +#define BLOCK_SIZE (64 * 1024) +#define SUCHUNK_SIZE (64 * 1024) +#define CHUNK_SIZE (64 * 1024 * 1024) #define CHUNK_SIZE_IN_MB 64 #define HEARTBEAT_MESSAGE_LEN 64 #define REGISTER_MESSAGE_LEN 64 @@ -38,5 +37,4 @@ // 磁盘目录 #define DISKDIR "/home/claims/diskdata" - #endif /* DEBUG_H_ */ diff --git a/Environment.cpp b/Environment.cpp index e19763e82..3f01b7e64 100755 --- a/Environment.cpp +++ b/Environment.cpp @@ -6,9 +6,15 @@ */ #include "Environment.h" +#include "caf/all.hpp" +#include +#include +#include "common/Message.h" +#include "exec_tracker/stmt_exec_tracker.h" +#include "exec_tracker/segment_exec_tracker.h" +#include "node_manager/base_node.h" #define GLOG_NO_ABBREVIATED_SEVERITIES #include -#undef GLOG_NO_ABBREVIATED_SEVERITIES #include #include #include @@ -24,6 +30,9 @@ #include "common/expression/expr_type_cast.h" #include "common/expression/type_conversion_matrix.h" +using caf::announce; +using claims::BaseNode; +using claims::catalog::Catalog; using claims::common::InitAggAvgDivide; using claims::common::InitOperatorFunc; using claims::common::InitTypeCastFunc; @@ -31,6 +40,9 @@ using claims::common::InitTypeConversionMatrix; //#define DEBUG_MODE #include "catalog/catalog.h" using claims::common::rSuccess; +using claims::NodeAddr; +using claims::NodeSegmentID; +using claims::StmtExecTracker; Environment* Environment::_instance = 0; @@ -44,14 +56,15 @@ Environment::Environment(bool ismaster) : ismaster_(ismaster) { portManager = PortManager::getInstance(); if (ismaster) { - logging_->log("Initializing the Coordinator..."); - initializeCoordinator(); catalog_ = claims::catalog::Catalog::getInstance(); + logging_->log("restore the catalog ..."); if (rSuccess != catalog_->restoreCatalog()) { LOG(ERROR) << "failed to restore catalog" << std::endl; cerr << "ERROR: restore catalog failed" << endl; } } + stmt_exec_tracker_ = new StmtExecTracker(); + seg_exec_tracker_ = new SegmentExecTracker(); if (true == g_thread_pool_used) { logging_->log("Initializing the ThreadPool..."); @@ -59,8 +72,6 @@ Environment::Environment(bool ismaster) : ismaster_(ismaster) { logging_->elog("initialize ThreadPool failed"); } } - logging_->log("Initializing the AdaptiveEndPoint..."); - initializeEndPoint(); /** * TODO: * DO something in AdaptiveEndPoint such that the construction function does @@ -73,8 +84,10 @@ Environment::Environment(bool ismaster) : ismaster_(ismaster) { /*Before initializing Resource Manager, the instance ip and port should be * decided.*/ - + AnnounceCafMessage(); initializeResourceManager(); + // should after above + InitMembership(); initializeStorage(); @@ -101,7 +114,6 @@ Environment::~Environment() { delete logging_; delete portManager; delete catalog_; - delete coordinator; if (ismaster_) { delete iteratorExecutorMaster; delete resourceManagerMaster_; @@ -115,7 +127,6 @@ Environment::~Environment() { delete resourceManagerSlave_; delete blockManager_; delete bufferManager_; - delete endpoint; } Environment* Environment::getInstance(bool ismaster) { if (_instance == 0) { @@ -133,27 +144,21 @@ void Environment::readConfigFile() { cfg.readFile(Config::config_file.c_str()); ip = (const char*)cfg.lookup("ip"); } -void Environment::initializeEndPoint() { - // libconfig::Config cfg; - // cfg.readFile("/home/claims/config/wangli/config"); - // std::string endpoint_ip=(const char*)cfg.lookup("ip"); - // std::string endpoint_port=(const char*)cfg.lookup("port"); - std::string endpoint_ip = ip; - int endpoint_port; - if ((endpoint_port = portManager->applyPort()) == -1) { - logging_->elog("The ports in the PortManager is exhausted!"); - } - port = endpoint_port; - logging_->log("Initializing the AdaptiveEndPoint as EndPoint://%s:%d.", - endpoint_ip.c_str(), endpoint_port); - std::ostringstream name, port_str; - port_str << endpoint_port; - name << "EndPoint://" << endpoint_ip << ":" << endpoint_port; - endpoint = - new AdaptiveEndPoint(name.str().c_str(), endpoint_ip, port_str.str()); +void Environment::AnnounceCafMessage() { + announce( + "StorageBudgetMessage", &StorageBudgetMessage::nodeid, + &StorageBudgetMessage::memory_budget, &StorageBudgetMessage::disk_budget); + announce("ProjectionID", &ProjectionID::table_id, + &ProjectionID::projection_off); + announce("PartitionID", &PartitionID::projection_id, + &PartitionID::partition_off); + announce("ExchangeID", &ExchangeID::exchange_id, + &ExchangeID::partition_offset); + announce("BaseNode", &BaseNode::node_id_to_addr_); + announce("NodeSegmentID", &NodeSegmentID::first, + &NodeSegmentID::second); } -void Environment::initializeCoordinator() { coordinator = new Coordinator(); } void Environment::initializeStorage() { if (ismaster_) { blockManagerMaster_ = BlockManagerMaster::getInstance(); @@ -177,7 +182,15 @@ void Environment::initializeResourceManager() { resourceManagerMaster_ = new ResourceManagerMaster(); } resourceManagerSlave_ = new InstanceResourceManager(); - nodeid = resourceManagerSlave_->Register(); + // nodeid = resourceManagerSlave_->Register(); +} +void Environment::InitMembership() { + if (ismaster_) { + master_node_ = MasterNode::GetInstance(); + } + slave_node_ = SlaveNode::GetInstance(); + slave_node_->RegisterToMaster(true); + node_id_ = slave_node_->get_node_id(); } void Environment::initializeBufferManager() { bufferManager_ = BufferManager::getInstance(); @@ -187,7 +200,6 @@ void Environment::initializeIndexManager() { indexManager_ = IndexManager::getInstance(); } -AdaptiveEndPoint* Environment::getEndPoint() { return endpoint; } ExchangeTracker* Environment::getExchangeTracker() { return exchangeTracker; } ResourceManagerMaster* Environment::getResourceManagerMaster() { return resourceManagerMaster_; @@ -195,7 +207,7 @@ ResourceManagerMaster* Environment::getResourceManagerMaster() { InstanceResourceManager* Environment::getResourceManagerSlave() { return resourceManagerSlave_; } -NodeID Environment::getNodeID() const { return nodeid; } +NodeID Environment::getNodeID() const { return node_id_; } claims::catalog::Catalog* Environment::getCatalog() const { return catalog_; } void Environment::initializeClientListener() { diff --git a/Environment.h b/Environment.h index 01ca21379..541b5ddcc 100755 --- a/Environment.h +++ b/Environment.h @@ -7,7 +7,6 @@ #ifndef ENVIRONMENT_H_ #define ENVIRONMENT_H_ -#include "Executor/Coordinator.h" #include "Executor/IteratorExecutorMaster.h" #include "Executor/IteratorExecutorSlave.h" #include "storage/BlockManager.h" @@ -15,17 +14,29 @@ #include "Resource/ResourceManagerMaster.h" #include "Resource/ResourceManagerSlave.h" #include "IndexManager/IndexManager.h" -#include "Executor/AdaptiveEndPoint.h" #include "Executor/PortManager.h" #include "common/Logging.h" #include "utility/thread_pool.h" #include "Client/ClaimsServer.h" +#include "exec_tracker/stmt_exec_tracker.h" +#include "exec_tracker/segment_exec_tracker.h" #include "Executor/exchange_tracker.h" #include "Executor/expander_tracker.h" +#include "node_manager/master_node.h" +#include "node_manager/slave_node.h" #include "Resource/BufferManager.h" using claims::catalog::Catalog; -// class Catalog; +using claims::MasterNode; +using claims::SegmentExecTracker; +using claims::SlaveNode; +using claims::StmtExecTracker; +class Catalog; +class IteratorExecutorSlave; +class BlockManager; +class ResourceManagerMaster; +class InstanceResourceManager; +class BlockManagerMaster; class Environment { public: @@ -33,20 +44,29 @@ class Environment { static Environment* getInstance(bool ismaster = 0); std::string getIp(); unsigned getPort(); - AdaptiveEndPoint* getEndPoint(); ExchangeTracker* getExchangeTracker(); ResourceManagerMaster* getResourceManagerMaster(); InstanceResourceManager* getResourceManagerSlave(); NodeID getNodeID() const; + void setNodeID(NodeID node_id){ node_id_ = node_id ;} claims::catalog::Catalog* getCatalog() const; ThreadPool* getThreadPool() const; IteratorExecutorSlave* getIteratorExecutorSlave() const; - explicit Environment(bool ismaster = false); + Environment(bool ismaster = false); + MasterNode* get_master_node() { return master_node_; } + SlaveNode* get_slave_node() { return slave_node_; } + BlockManager* get_block_manager() { return blockManager_; } + IteratorExecutorMaster* get_iterator_executor_master() { + return iteratorExecutorMaster; + } + BlockManagerMaster* get_block_manager_master() { return blockManagerMaster_; } + + StmtExecTracker* get_stmt_exec_tracker() { return stmt_exec_tracker_; } + SegmentExecTracker* get_segment_exec_tracker() { return seg_exec_tracker_; } private: + void AnnounceCafMessage(); void readConfigFile(); - void initializeEndPoint(); - void initializeCoordinator(); void initializeStorage(); void initializeResourceManager(); void initializeBufferManager(); @@ -55,12 +75,11 @@ class Environment { void initializeExpressionSystem(); void destoryClientListener(); bool initializeThreadPool(); + void InitMembership(); private: static Environment* _instance; PortManager* portManager; - AdaptiveEndPoint* endpoint; - Coordinator* coordinator; std::string ip; unsigned port; IteratorExecutorSlave* iteratorExecutorSlave; @@ -72,7 +91,7 @@ class Environment { InstanceResourceManager* resourceManagerSlave_; Catalog* catalog_; /* the globally unique node id*/ - NodeID nodeid; + NodeID node_id_; BlockManagerMaster* blockManagerMaster_; BlockManager* blockManager_; BufferManager* bufferManager_; @@ -81,7 +100,11 @@ class Environment { ClientListener* listener_; ThreadPool* thread_pool_; + MasterNode* master_node_; + SlaveNode* slave_node_; + StmtExecTracker* stmt_exec_tracker_; + SegmentExecTracker* seg_exec_tracker_; /** * TODO: the master and slave pair, such as ResouceManagerMaster and * ResourceManagerSlave, should have a diff --git a/Executor/AdaptiveEndPoint.cpp b/Executor/AdaptiveEndPoint.cpp deleted file mode 100755 index 11ef6cdba..000000000 --- a/Executor/AdaptiveEndPoint.cpp +++ /dev/null @@ -1,164 +0,0 @@ -/* - * AdaptiveActor.cpp - * - * Created on: Aug 8, 2013 - * Author: wangli - */ -#include -#include -#include -#include -#include -#include -#include "AdaptiveEndPoint.h" -#include "../Debug.h" -#include "../common/Logging.h" -#include "../utility/ThreadSafe.h" -#include "../Config.h" - -#include -#include -AdaptiveEndPoint::AdaptiveEndPoint(const char* name, std::string ip, std::string port) -:Theron::EndPoint(name, ("tcp://"+ip+":"+port).c_str()){ - logging_=new AdaptiveEndPointLogging(); - - logging_->log("The AdaptiveEndPoint is created as %s:%s",ip.c_str(),port.c_str()); - framework=new Theron::Framework(*(EndPoint*)this); - connectionActor=new AdaptiveEndPoint::ConnectionActor(this,("ConnectionActor://"+ip+":"+port).c_str()); - - if(SayHelloToCoordinator(ip,port)==false){ - logging_->elog("Error occurs when saying hello to the coordinator!"); - } - - logging_->log("Get Coordinator EndPoint Port..."); - - int coordinator_endpoint_port; - if((coordinator_endpoint_port=GetCoordinatorEndPointPort())==-1){ - logging_->elog("Error occurs when getting the coordinator EndPoint port"); - } - - logging_->log("Connect to Coordinator EndPoint..."); - if(ConnectToCoordinateEndPoint(coordinator_endpoint_port)==false){ - logging_->elog("Error occurs when connecting to the coordinator EndPoint"); - } - - logging_->log("Waiting for the Ready signal from the Coordinator."); - if(WaitForReadySignalFromCoordinator()==false){ - logging_->elog("Error occurs when waiting for the coordinator EndPoint"); - } - FileClose(socket_coor); -} - -AdaptiveEndPoint::~AdaptiveEndPoint() { - // TODO Auto-generated destructor stub -// return; - delete connectionActor; - delete framework; -// this->~EndPoint(); -} - -bool AdaptiveEndPoint::SayHelloToCoordinator(std::string ip,std::string port){ - libconfig::Config cfg; - cfg.readFile(Config::config_file.c_str()); - ip_coor=(const char *)cfg.lookup("coordinator.ip"); - - std::string coord_port=(const char*)cfg.lookup("coordinator.port"); - int recvbytes; - - struct hostent host; - struct sockaddr_in serv_addr; - - if((ThreadSafe::gethostbyname_ts(host,ip_coor.c_str()))==0) - { - logging_->elog("gethostbyname errors!\n"); - assert(false); - return false; - } - if((socket_coor = socket(AF_INET, SOCK_STREAM,0))==-1) - { - logging_->elog("socket create errors!\n"); - assert(false); - return false; - } - - serv_addr.sin_family=AF_INET; - serv_addr.sin_port=htons(atoi(coord_port.c_str())); - serv_addr.sin_addr=*((struct in_addr*)host.h_addr); - bzero(&(serv_addr.sin_zero),8); - int attemps_budget=10; - while(connect(socket_coor,(struct sockaddr *)&serv_addr, sizeof(struct sockaddr))==-1) - { - logging_->elog("Cannot connect to the master! To retry in one second!"); - sleep(1); - attemps_budget--; - if(attemps_budget==0){ - logging_->elog("connection errors when connecting to %s:%s! Reason:%s",inet_ntoa(serv_addr.sin_addr),coord_port.c_str(),strerror(errno)); - return false; - } - } - - int port_send=atoi(port.c_str()); - if(send(socket_coor,&port_send,sizeof(int),0)==-1) - { - logging_->elog("Error occurs when sending the hello message to the coordinator!\n"); - assert(false); - return false; - } - return true; -} -int AdaptiveEndPoint::GetCoordinatorEndPointPort(){ - int recvbytes; - int port; - if((recvbytes=recv(socket_coor,&port,sizeof(int),0))==-1){ - logging_->elog("recv error!\n"); - return -1; - } - logging_->log("Get CoordinatorEndPoint is successfully! The Coordinator Theron EndPoint is %s:%d",ip_coor.c_str(),port); - return port; - -} -bool AdaptiveEndPoint::WaitForReadySignalFromCoordinator(){ - int recvbytes; - char signal; - if((recvbytes=recv(socket_coor,&signal,sizeof(char),0))==-1){ - logging_->elog("recv error!\n"); - return false; - } - logging_->log("Join to the EndPoint network successfully!"); - return true; -} -bool AdaptiveEndPoint::ConnectToCoordinateEndPoint(int port){ - std::ostringstream os; - os<<"tcp://"<elog("Check whether network is enabled! can't connect ot %s",os.str().c_str()); -// assert(false); - return false; - } - - logging_->log("ConnectToCoordiateEndPoint is successful!"); - return true; -} -bool AdaptiveEndPoint::ConnectToRemoteEndPoint(std::string location){ - return this->Connect(location.c_str()); -} -AdaptiveEndPoint::ConnectionActor::ConnectionActor(AdaptiveEndPoint* AEP, const char* Name) -:Actor(*(AEP->framework),Name),AEP(AEP){ - RegisterHandler(this,&AdaptiveEndPoint::ConnectionActor::ReceiveNodeStatus256); - -} - -void AdaptiveEndPoint::ConnectionActor::ReceiveNodeStatus256(const Message256 &message, const Theron::Address from){ - NodeConnectionMessage NCM=NodeConnectionMessage::deserialize(message); - std::ostringstream str; - str<<"tcp://"+NCM.ip<<":"<ConnectToRemoteEndPoint(str.str().c_str())){ - AEP->logging_->elog("Check whether the Network is enabled!"); - } - Send(0,from); - - - AEP->logging_->log("Successfully connected to the EndPoint of new node through %s",str.str().c_str()); -} diff --git a/Executor/AdaptiveEndPoint.h b/Executor/AdaptiveEndPoint.h deleted file mode 100755 index de3fdd196..000000000 --- a/Executor/AdaptiveEndPoint.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * AdaptiveActor.h - * - * Created on: Aug 8, 2013 - * Author: wangli - */ - -#ifndef ADAPTIVEENDPOINT_H_ -#define ADAPTIVEENDPOINT_H_ -#include -#include -#ifdef DMALLOC -#include "dmalloc.h" -#endif - -#include "../common/Logging.h" -#include "../common/Message.h" -class AdaptiveEndPoint:public Theron::EndPoint { -public: - /* - * According to ip/port of Coordinator read from config, connect to Coordinator by socket - * then exchange my EndPoint port and Coordinator's EndPoint port. - * Connect to Coordinator with Theron depend on two port. - */ - AdaptiveEndPoint(const char* name, std::string ip, std::string port); - - virtual ~AdaptiveEndPoint(); -// static AdaptiveEndPoint* getInstance(); -private: - bool SayHelloToCoordinator(std::string ip,std::string port); - int GetCoordinatorEndPointPort(); - bool WaitForReadySignalFromCoordinator(); - bool ConnectToCoordinateEndPoint(int port); - bool ConnectToRemoteEndPoint(std::string location); -protected: - Theron::Framework *framework; - Logging* logging_; -private: - Theron::Actor* connectionActor; - int socket_coor; - std::string ip_coor; - - /** - * The Actor which receive the node states from the coordinator - */ - friend class ConnectionActor; -////////////////////////////////////////////////// - class ConnectionActor:public Theron::Actor{ - public: - ConnectionActor(AdaptiveEndPoint*,const char* Name); - private: - void ReceiveNodeStatus256(const Message256 &message, const Theron::Address from); - private: - AdaptiveEndPoint* AEP; - }; -}; - -#endif /* ADAPTIVEACTOR_H_ */ diff --git a/Executor/Coordinator.cpp b/Executor/Coordinator.cpp deleted file mode 100755 index ff9e7a542..000000000 --- a/Executor/Coordinator.cpp +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Coordinator.cpp - * - * Created on: Aug 8, 2013 - * Author: wangli - */ - -#include "Coordinator.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include "../Debug.h" -#include "../common/Message.h" -#include "../Environment.h" -#include "../common/TimeOutReceiver.h" -#include "../Config.h" -Coordinator::Coordinator() { - logging = new CoordinatorLogging(); - /** swap the order of SetupTheTheron and PreparetheSocket to provide more time - * to Theron::EndPoint initialization before other EndPoints connect to it. - * - */ - - if (SetupTheTheron() == false) { - logging->elog("Error occurs during the Theron setup!"); - return; - } - if (PrepareTheSocket() == false) { - logging->elog("Error occurs during the socket prepare!"); - return; - } - - if (CreateListeningThread() == false) { - logging->elog("Error occurs during creating listening thread!"); - return; - } - -} - -Coordinator::~Coordinator() { - pthread_cancel(prochaseId); - void *res=0; - while(res!=PTHREAD_CANCELED){ - pthread_join(prochaseId,&res); - } - - FileClose(socket_fd); -// logging->elog("-----for debug: fd %d is closed", socket_fd); -// std::cout<<"in "<<__FILE__<<":"<<__LINE__;printf("-----for debug: fd %d is closed\n", socket_fd); - delete framework; - delete endpoint; -} -bool Coordinator::PrepareTheSocket() { - libconfig::Config cfg; - cfg.readFile(Config::config_file.c_str()); - std::string master_ip = (const char *) cfg.lookup("coordinator.ip"); - std::string master_port = (const char*) cfg.lookup("coordinator.port"); - - struct hostent* host; - struct sockaddr_in my_addr; - - if ((socket_fd = socket(AF_INET, SOCK_STREAM, 0)) == -1) { - logging->elog("socket create errors!\n"); - return false; - } - my_addr.sin_family = AF_INET; - my_addr.sin_port = htons(atoi(master_port.c_str())); - my_addr.sin_addr.s_addr = INADDR_ANY; - - bzero(&(my_addr.sin_zero), 8); - - /* Enable address reuse */ - int on = 1; - setsockopt(socket_fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); - - if (bind(socket_fd, (struct sockaddr *) &my_addr, sizeof(struct sockaddr)) - == -1) { - logging->elog("bind errors!\n"); - return false; - } - - if (listen(socket_fd, 100) == -1) { - logging->elog("listen errors!\n"); - return false; - } - - logging->log("Coordinator: PrepareTheSocket() is successful."); - - return true; -} -bool Coordinator::SetupTheTheron() { - - std::string ip_port; - std::string ip = Environment::getInstance()->getIp(); - std::ostringstream port; - port << PortManager::getInstance()->applyPort(); - EndPointPort = port.str(); - ip_port = std::string("tcp://") + ip + ":" + port.str(); - - logging->log("[Coordinator]: Now is initializing the Theron EndPoint as %s",ip_port.c_str()); - - endpoint = new Theron::EndPoint((ip + ":" + port.str()).c_str(), - ip_port.c_str()); - - framework = new Theron::Framework(*endpoint); - framework->SetMaxThreads(1); - - logging->log("[Coordinator]: Now is initializing The CoordinatorActor"); - - return true; - -} -bool Coordinator::CreateListeningThread() { - - const int error = pthread_create(&prochaseId, NULL, ListeningNewNode, this); - - logging->log("[Coordinator]: The listening thread is created!"); - - return error == 0; -} - -void* Coordinator::ListeningNewNode(void *arg) { - - Coordinator* Cthis = (Coordinator*) arg; - while (true) { - socklen_t sin_size = sizeof(struct sockaddr_in); - struct sockaddr_in remote_addr; - int socket_fd_new; - - Cthis->logging->log( - "[Coordinator]: I'm waiting for the new node's connection!"); - - if ((socket_fd_new = accept(Cthis->socket_fd, - (struct sockaddr*) &remote_addr, &sin_size)) == -1) { - perror("accept errors!\n"); - return 0; - } - - Cthis->logging->log( - "[Coordinator]: New Node is connected! Waiting for the port infor.."); - - /*Set the timeout value, which is essential to guarantee the correctness when - * there are more nodes trying to connect the coordinate at the same time. */ - struct timeval timeout = { 1, 0 }; - setsockopt(socket_fd_new, SOL_SOCKET, SO_RCVTIMEO, (char*) &timeout, - sizeof(struct timeval)); - - int recvbytes; - int port; - - if ((recvbytes = recv(socket_fd_new, &port, sizeof(port), MSG_WAITALL)) - == -1) { - std::cout << "New node " << inet_ntoa(remote_addr.sin_addr) - << " has connected, but the receiving the information times out!" - << std::endl; - FileClose(socket_fd_new); -// logging->elog("-----for debug: fd %d is closed", socket_fd_new); - continue; - //return false; - } - if (recvbytes != sizeof(int)) { - std::cout << "Information received, but the length is not right!" - << std::endl; - FileClose(socket_fd_new); -// logging->elog("-----for debug: fd %d is closed", socket_fd_new); - continue; - } - - Cthis->logging->log( - "[Coordinator]: The EndPoint of the new node is %s:%d", - inet_ntoa(remote_addr.sin_addr), port); - - if (!Cthis->SendCoordinatorEndPointPort(socket_fd_new)) { - continue; - } - - std::ostringstream ost; - ost << port; - std::string new_node_ip = inet_ntoa(remote_addr.sin_addr); - std::string new_node_port = ost.str(); - - if (!Cthis->endpoint->Connect(("tcp://" + new_node_ip + ":" - + new_node_port).c_str())) { - Cthis->logging->elog( - "Error occurs when the Coordinator EndPoint is connecting to the EndPoint of the new node: " - "tcp://%s:%s", new_node_ip.c_str(), new_node_port.c_str()); - Cthis->logging->log(" tcp://%s:%s", new_node_ip.c_str(), - new_node_port.c_str()); - } - - else - { - Cthis->logging->log( - "[Coordinator]: The Coordinator EndPoint has successfully connected to the EndPoint of the new node!"); - } - - TimeOutReceiver *receiver = new TimeOutReceiver(Cthis->endpoint); - Theron::Catcher resultCatcher; - receiver->RegisterHandler(&resultCatcher, &Theron::Catcher::Push); - const int TimeOut = 1000;//ms - - /** - * TODO: In the current version, the Coordinator will repeatedly send - * message to each NodeConnectionActor until the feedback is received - * which means the target node has conducted new connection based on - * message received. - * However, if the target node is dead, the message will be sent repeatedly - * and infinitely. Additional code is needed to handle the dead node. - */ - for (unsigned i = 0; i < Cthis->PeersIpPort.size(); i++) { - NodeConnectionMessage new_NCM(new_node_ip, new_node_port); - NodeConnectionMessage old_NCM(Cthis->PeersIpPort[i].first, - Cthis->PeersIpPort[i].second); - receiver->Reset(); - Cthis->framework->Send(NodeConnectionMessage::serialize(new_NCM), - receiver->GetAddress(), Theron::Address( - ("ConnectionActor://" + old_NCM.ip + ":" - + old_NCM.port).c_str())); - while (receiver->TimeOutWait(1, TimeOut) != 1) { - Cthis->framework->Send( - NodeConnectionMessage::serialize(new_NCM), - receiver->GetAddress(), Theron::Address( - ("ConnectionActor://" + old_NCM.ip + ":" - + old_NCM.port).c_str())); - } - - receiver->Reset(); - - Cthis->framework->Send(NodeConnectionMessage::serialize(old_NCM), - receiver->GetAddress(), Theron::Address( - ("ConnectionActor://" + new_NCM.ip + ":" - + new_NCM.port).c_str())); - while (receiver->TimeOutWait(1, TimeOut) != 1) { - Cthis->framework->Send( - NodeConnectionMessage::serialize(old_NCM), - receiver->GetAddress(), Theron::Address( - ("ConnectionActor://" + new_NCM.ip + ":" - + new_NCM.port).c_str())); - } - } - - Cthis->PeersIpPort.push_back(std::pair( - new_node_ip, new_node_port)); - - Cthis->logging->log( - "[Coordinator]: New node %s;%s is successfully added to the Theron communication network!", - new_node_ip.c_str(), new_node_port.c_str()); - - Cthis->SendReadyNotificationToNewNode(socket_fd_new); - - FileClose(socket_fd_new); -// logging->elog("-----for debug: fd %d is closed", socket_fd_new); - receiver->~TimeOutReceiver(); - } -} - -bool Coordinator::SendReadyNotificationToNewNode(int socket_new_node) { - char signal = 'R'; - if (!send(socket_new_node, &signal, sizeof(char), 0)) { - logging->log( - "Error occurs when sending the Coordinate EndPoint port to the new node!"); - } - - logging->log("[Coordinator]: The 'Ready' signal is sent to the new node!"); - - return true; -} -bool Coordinator::SendCoordinatorEndPointPort(int socket_new_node) { - int port = atoi(EndPointPort.c_str()); - - if (!send(socket_new_node, &port, sizeof(int), 0)) { - logging->elog( - "Error occurs when sending the Coordinate EndPoint port to the new node!"); - } - - logging->log( - "[Coordinator]: The Coordinator EndPoint port [%d] is sent to the new node!", - port); - - return true; -} -Coordinator::CoordinateActor::CoordinateActor(Theron::Framework *framework, - const char* Name) : - Actor(*framework, Name) { - -} - diff --git a/Executor/Coordinator.h b/Executor/Coordinator.h deleted file mode 100755 index 930752667..000000000 --- a/Executor/Coordinator.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Coordinator.h - * The Coordinator is the center for Theron connections. - * - * Accept all slaves' registration - * and send all slaves' Ip/Port to all slave, - * helping them to connect each other - * - * Created on: Aug 8, 2013 - * Author: wangli - */ - -#ifndef COORDINATOR_H_ -#define COORDINATOR_H_ - -#include "Theron/Theron.h" -#include -#include -#ifdef DMALLOC -#include "dmalloc.h" -#endif -#include "../common/Logging.h" - -class Coordinator { -public: - Coordinator(); - virtual ~Coordinator(); -private: - bool PrepareTheSocket(); - bool SetupTheTheron(); - bool CreateListeningThread(); - bool SendReadyNotificationToNewNode(int socket_new_node); - - bool SendCoordinatorEndPointPort(int socket_new_node); - static void* ListeningNewNode(void * arg); -private: - int socket_fd; - Theron::EndPoint* endpoint; - Theron::Framework* framework; - std::string EndPointPort; - std::vector > PeersIpPort; - Logging* logging; - pthread_t prochaseId; -/** - * Actor - */ -/////////////////////////////////////////////////////////////////////// -public: - class CoordinateActor:public Theron::Actor{ - public: - CoordinateActor(Theron::Framework *framework, const char* Name); - }; -}; - -#endif /* COORDINATOR_H_ */ diff --git a/Executor/IteratorExecutorMaster.cpp b/Executor/IteratorExecutorMaster.cpp index f86f01902..c23aa613e 100755 --- a/Executor/IteratorExecutorMaster.cpp +++ b/Executor/IteratorExecutorMaster.cpp @@ -6,109 +6,64 @@ */ #include "IteratorExecutorMaster.h" +#include +#include #include "../Environment.h" -#include "../common/TimeOutReceiver.h" #include "../utility/rdtsc.h" -IteratorExecutorMaster* IteratorExecutorMaster::_instance=0; +#include "caf/io/all.hpp" +#include "../node_manager/base_node.h" +#include "caf/all.hpp" +#include "../common/memory_handle.h" +using caf::io::remote_actor; +using claims::SendPlanAtom; +IteratorExecutorMaster* IteratorExecutorMaster::_instance = 0; -IteratorExecutorMaster::IteratorExecutorMaster(){ - _instance=this; - endpoint=Environment::getInstance()->getEndPoint(); - framework=new Theron::Framework(*endpoint); - framework->SetMaxThreads(1); -// framework->SetMinThreads(5); - logging_=new IteratorExecutorMasterLogging(); -} +IteratorExecutorMaster::IteratorExecutorMaster() { _instance = this; } -IteratorExecutorMaster::~IteratorExecutorMaster() { - _instance=0; - delete framework; -} +IteratorExecutorMaster::~IteratorExecutorMaster() { _instance = 0; } -IteratorExecutorMaster* IteratorExecutorMaster::getInstance() -{ - if(_instance==0) - { - return new IteratorExecutorMaster(); - } - else - return _instance; +IteratorExecutorMaster* IteratorExecutorMaster::getInstance() { + if (_instance == 0) { + return new IteratorExecutorMaster(); + } else { + return _instance; + } } -bool IteratorExecutorMaster::ExecuteBlockStreamIteratorsOnSites(PhysicalOperatorBase* it, std::vector ip_list){ - - PhysicalQueryPlan im(it); - - Message4K str= PhysicalQueryPlan::serialize4K(im); - - TimeOutReceiver receiver(endpoint); - - Theron::Catcher resultCatcher; - - receiver.RegisterHandler(&resultCatcher, &Theron::Catcher::Push); - for(unsigned slave_filter_id=0;slave_filter_idSend(int(1),receiver.GetAddress(),Theron::Address(ip_port.str().c_str())); - framework->Send(str,receiver.GetAddress(),Theron::Address(ip_port.str().c_str())); - logging_->log("send!\n********** please rebounce!\n"); - } - unsigned feedback_count=0; - feedback_count=receiver.TimeOutWait(ip_list.size(),5000); - - if(feedback_countelog("Time out! only received %d feedbacks \n",feedback_count); - return true; - } - - return true; +bool IteratorExecutorMaster::ExecuteBlockStreamIteratorsOnSites( + PhysicalOperatorBase* it, std::vector ip_list) { + assert(false); // shouldn't be here; + return true; } -bool IteratorExecutorMaster::ExecuteBlockStreamIteratorsOnSite(PhysicalOperatorBase* it,NodeID target_id){ - PhysicalQueryPlan* im = new PhysicalQueryPlan(it); - -// GETCURRENTTIME(s); - Message4K str= PhysicalQueryPlan::serialize4K(*im); -// cout<<"Yu debug: serialize message use:"<getIteratorExecutorSlave()->createNewThreadAndRun(new_plan); - logging_->log("The iterator tree has been sent to local slave.\n"); - return true; - } -// TimeOutReceiver receiver(endpoint); -// -// Theron::Catcher resultCatcher; -// receiver.RegisterHandler(&resultCatcher, &Theron::Catcher::Push); - ostringstream actor_name; - - actor_name<<"IteratorExecutorActor://"<Send(str,receiver.GetAddress(),Theron::Address(ip_port.str().c_str())); -// GETCURRENTTIME(t); -// printf("Yu debug:time when to send message: %ld.%ld\n", t.tv_sec, t.tv_usec); - framework->Send(str,Theron::Address(),Theron::Address(actor_name.str().c_str())); - logging_->log("The serialized iterator tree has been sent to %s.\n",actor_name.str().c_str()); -// -// unsigned feedback_count=0; -// feedback_count=receiver.TimeOutWait(1,5000); -// -// if(feedback_count!=1){ -// logging_->elog("Time out! no feedback received! \n"); -// return true; -// } -// logging_->log("Received the confirm feedback from %s",ip_port.str().c_str()); - return true; +// send serialized plan string to target +bool IteratorExecutorMaster::ExecuteBlockStreamIteratorsOnSite( + PhysicalOperatorBase* it, NodeID target_id, u_int64_t query_id = 0, + u_int32_t segment_id = 0) { + PhysicalQueryPlan* physical_plan = new PhysicalQueryPlan( + it, target_id, query_id, segment_id, + Environment::getInstance()->get_slave_node()->get_node_id()); + string str = PhysicalQueryPlan::TextSerializePlan(*physical_plan); + caf::scoped_actor self; + LOG(INFO)<<"!!!!!Master send Plan!!!!"<get_master_node()->GetNodeActorFromId( + target_id); + self->send(target_actor, SendPlanAtom::value, str, query_id, segment_id); + } catch (caf::bind_failure& e) { + LOG(ERROR) + << "master sending plan binds port error when connecting remote actor"; + } catch (caf::network_error& e) { + LOG(ERROR) << "master sending plan connect to remote node error due to " + "network error!"; + } + DELETE_PTR(physical_plan); + LOG(INFO) << "master send serialized plan to target slave : " << target_id + << " succeed!" << endl; + return true; } -bool IteratorExecutorMaster::Propogation(const int count,std::string target){ - printf("Master:%d\n",count); - ostringstream ip_port; - - ip_port<<"IteratorExecutorActor://"<Send(count,Theron::Address(),Theron::Address(ip_port.str().c_str())); - sleep(100); +bool IteratorExecutorMaster::Propogation(const int count, std::string target) { + assert(false); // shouldn't be here; + return true; } diff --git a/Executor/IteratorExecutorMaster.h b/Executor/IteratorExecutorMaster.h index 7e0c397b7..7fbec67bb 100755 --- a/Executor/IteratorExecutorMaster.h +++ b/Executor/IteratorExecutorMaster.h @@ -8,8 +8,6 @@ #ifndef ITERATOREXECUTORMASTER_H_ #define ITERATOREXECUTORMASTER_H_ -#include -#include #include #include #include @@ -26,14 +24,13 @@ class IteratorExecutorMaster { bool ExecuteBlockStreamIteratorsOnSites(PhysicalOperatorBase* it, std::vector ip_list); bool ExecuteBlockStreamIteratorsOnSite(PhysicalOperatorBase* it, - NodeID target_ip); + NodeID target_id, + u_int64_t query_id = 0, + u_int32_t segment_id = 0); bool Propogation(const int, std::string target); private: static IteratorExecutorMaster* _instance; - Theron::EndPoint* endpoint; - Theron::Framework* framework; - Logging* logging_; }; #endif /* ITERATOREXECUTORMASTER_H_ */ diff --git a/Executor/IteratorExecutorSlave.cpp b/Executor/IteratorExecutorSlave.cpp index 3b2e33bb6..b229ab66a 100755 --- a/Executor/IteratorExecutorSlave.cpp +++ b/Executor/IteratorExecutorSlave.cpp @@ -6,133 +6,62 @@ */ #include "IteratorExecutorSlave.h" +#include #include "../Environment.h" -#include "../common/Logging.h" #include "../Resource/CPUResource.h" #include "../utility/print_tool.h" -#include - - -IteratorExecutorSlave::IteratorExecutorSlave(){ - - - logging_=new IteratorExecutorSlaveLogging(); - endpoint=Environment::getInstance()->getEndPoint(); - - framework=new Theron::Framework(*endpoint); - framework->SetMaxThreads(1); -// framework->SetMinThreads(5); - logging_->log("Minimum thread is set to be %d",framework->GetMinThreads()); - std::ostringstream str; - str<<"IteratorExecutorActor://"<getNodeID(); - execute_iterator_actor=new ExecuteIteratorActor(this,*framework,str.str().c_str()); - logging_->log("Actor created with name: IteratorExecutorActor://%d",Environment::getInstance()->getNodeID()); - -} - -IteratorExecutorSlave::~IteratorExecutorSlave() { - delete execute_iterator_actor; - delete framework; - delete logging_; -} - -IteratorExecutorSlave::ExecuteIteratorActor::ExecuteIteratorActor(IteratorExecutorSlave* ies, Theron::Framework &framework, const char *const slaveID) - :Theron::Actor(framework, slaveID),ies(ies) -{ - RegisterHandler(this, &IteratorExecutorSlave::ExecuteIteratorActor::Handler256); - RegisterHandler(this, &IteratorExecutorSlave::ExecuteIteratorActor::Handler4K); - RegisterHandler(this, &IteratorExecutorSlave::ExecuteIteratorActor::progation); -} - -void IteratorExecutorSlave::ExecuteIteratorActor::Handler256(const Message256 &message,const Theron::Address from) -{ - - ies->logging_->log("New iterator tree received!\n"); - Message256 msg; - Send(msg,from); - PhysicalQueryPlan im=PhysicalQueryPlan::deserialize(message); - printf("_-_\n"); - im.run(); - - ies->logging_->log("iterator tree is successfully executed!"); - +#include "../utility/thread_pool.h" + +#define USE_THREAD_POOL + +IteratorExecutorSlave::IteratorExecutorSlave() {} +IteratorExecutorSlave::~IteratorExecutorSlave() {} +void IteratorExecutorSlave::createNewThreadAndRun(PhysicalQueryPlan* it) { + void** arg = new void* [2]; + arg[0] = it; + arg[1] = this; + +#ifndef USE_THREAD_POOL + pthread_t thread; + int error = pthread_create(&thread, NULL, run_iterator, arg); + if (error != 0) { + LOG(ERROR) << it->get_query_id() << " , " << it->get_segment_id_() + << " IteratorExecutorSlave Failed to create thread"; + return; + } +#else + Environment::getInstance()->getThreadPool()->AddTask(run_iterator, arg); +#endif + // lock_.acquire(); + // busy_thread_list_.insert(thread); + // lock_.release(); + + LOG(INFO) << it->get_query_id() << " , " << it->get_segment_id_() + << "A new Running thread is created!"; } -void IteratorExecutorSlave::ExecuteIteratorActor::Handler4K(const Message4K &message,const Theron::Address from) -{ -// ies->logging_->log("New iterator tree received!\n"); -// -// Send(int(0),from); -// ies->logging_->log("Sent the response message to the Receiver!"); -// IteratorMessage im=IteratorMessage::deserialize4K(message); -// -// im.run(); -// ies->logging_->log("iterator tree is successfully executed!"); -// printf("serialized size:%d\n\n\n\n\n\n",message.length); - ies->logging_->log("New iterator tree received!\n"); - - - ies->logging_->log("Sent the response message to the Receiver!"); -// GETCURRENTTIME(s); -// printf("Yu debug:time when received message: %ld.%ld\n", s.tv_sec, s.tv_usec); - PhysicalQueryPlan* runable_iterator_message=new PhysicalQueryPlan(); - -// GETCURRENTTIME(t); - *runable_iterator_message=PhysicalQueryPlan::deserialize4K(message); -// cout<<"Yu debug:deserialize message use :"<createNewThreadAndRun(runable_iterator_message); -// Send(int(0),from); - ies->logging_->log("iterator tree is added to the running queue"); -} -void IteratorExecutorSlave::ExecuteIteratorActor::progation(const int &message,const Theron::Address from){ - printf("Slave:%d\n",message); - sleep(1); - IteratorExecutorMaster::getInstance()->Propogation(message+1,"127.0.0.1"); -} - - -void IteratorExecutorSlave::createNewThreadAndRun(PhysicalQueryPlan* it){ - - pthread_t thread; - void** arg=new void*[2]; - arg[0]=it; - arg[1]=this; - pthread_create(&thread,NULL,run_iterator,arg); - lock_.acquire(); - busy_thread_list_.insert(thread); - lock_.release(); - - logging_->log("A new Running thread is created!"); -} -void* IteratorExecutorSlave::run_iterator(void* arg){ - PhysicalQueryPlan* it=(PhysicalQueryPlan*)(*(void**)arg); - IteratorExecutorSlave* Pthis=(IteratorExecutorSlave*)(*((void**)arg+1)); - -// p_green("A new thread (%lx) is created.\n",pthread_self()); -// printf("--------\n Before apply:"); -// CPUResourceManager::getInstance()->print(); -// int core=CPUResourceManager::getInstance()->applyCore(); -// it->run(); -// CPUResourceManager::getInstance()->freeCore(core); -// printf("--------\n After apply:"); - - - executePhysicalQueryPlan(*it); - -// CPUResourceManager::getInstance()->print(); - delete it; - Pthis->logging_->log("A iterator tree is successfully executed!\n"); - assert(Pthis->busy_thread_list_.find(pthread_self())!=Pthis->busy_thread_list_.end()); - Pthis->lock_.acquire(); - Pthis->busy_thread_list_.erase(pthread_self()); - Pthis->lock_.release(); -// p_green("Job in thread (%lx) finished.\n",pthread_self()); - delete[] ((void**)arg); +void* IteratorExecutorSlave::run_iterator(void* arg) { +#ifndef USE_THREAD_POOL + pthread_detach(pthread_self()); +#endif + PhysicalQueryPlan* it = (PhysicalQueryPlan*)(*(void**)arg); + IteratorExecutorSlave* Pthis = (IteratorExecutorSlave*)(*((void**)arg + 1)); + executePhysicalQueryPlan(*it); + it->destory(); + delete it; + LOG(INFO) << "A iterator tree is successfully executed!\n"; + + // Pthis->lock_.acquire(); + // assert(Pthis->busy_thread_list_.find(pthread_self()) != + // Pthis->busy_thread_list_.end()); + // Pthis->busy_thread_list_.erase(pthread_self()); + // Pthis->lock_.release(); + delete[]((void**)arg); } void IteratorExecutorSlave::executePhysicalQueryPlan(PhysicalQueryPlan plan) { -// int core=CPUResourceManager::getInstance()->applyCore(); -// printf("--------\n After apply:"); -// CPUResourceManager::getInstance()->print(); - plan.run(); -// CPUResourceManager::getInstance()->freeCore(core); + // int core=CPUResourceManager::getInstance()->applyCore(); + // printf("--------\n After apply:"); + // CPUResourceManager::getInstance()->print(); + plan.run(); + // CPUResourceManager::getInstance()->freeCore(core); } diff --git a/Executor/IteratorExecutorSlave.h b/Executor/IteratorExecutorSlave.h index 5e80d78b1..0bbd2f8df 100755 --- a/Executor/IteratorExecutorSlave.h +++ b/Executor/IteratorExecutorSlave.h @@ -7,55 +7,37 @@ #ifndef ITERATOREXECUTORSLAVE_H_ #define ITERATOREXECUTORSLAVE_H_ -#include #include #include #include #include -#ifdef DMALLOC -#include "dmalloc.h" -#endif #include "../common/Message.h" #include "../common/Logging.h" #include "../utility/lock.h" #include "../utility/Timer.h" using namespace std; +class PhysicalQueryPlan; +class IteratorExecutorSlave { + public: + IteratorExecutorSlave(); + virtual ~IteratorExecutorSlave(); + static void executePhysicalQueryPlan(PhysicalQueryPlan plan); -class IteratorExecutorSlave { -public: - friend class ExecuteIteratorActor; - class ExecuteIteratorActor: public Theron::Actor - { - public: - ExecuteIteratorActor(IteratorExecutorSlave *ies,Theron::Framework &framework, const char *const slaveID); - - private: - void Handler256(const Message256 &message,const Theron::Address from); - void Handler4K(const Message4K &message,const Theron::Address from); - void progation(const int &message,const Theron::Address from); - private: - IteratorExecutorSlave* ies; - }; - IteratorExecutorSlave(); - - virtual ~IteratorExecutorSlave(); - static void executePhysicalQueryPlan(PhysicalQueryPlan plan); -public: - void createNewThreadAndRun(PhysicalQueryPlan*); -private: - - static void* run_iterator(void*); -protected: - Logging * logging_; -private: - ExecuteIteratorActor* execute_iterator_actor; - Theron::Framework* framework; - Theron::EndPoint* endpoint; - std::string slave_id; - SpineLock lock_; - std::set busy_thread_list_; + public: + void createNewThreadAndRun(PhysicalQueryPlan*); + + private: + static void* run_iterator(void*); + + protected: + Logging* logging_; + + private: + std::string slave_id; + SpineLock lock_; + std::set busy_thread_list_; }; #endif /* ITERATOREXECUTORSLAVE_H_ */ diff --git a/Executor/Makefile.am b/Executor/Makefile.am index 40d0197a8..f7c40ade0 100644 --- a/Executor/Makefile.am +++ b/Executor/Makefile.am @@ -1,8 +1,7 @@ AM_CPPFLAGS= -fPIC -fpermissive -DTHERON_XS\ --I${THERON_HOME}/Include \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux @@ -17,24 +16,23 @@ LDADD = ../BlockStreamIterator/libblockstreamiterator.a \ ../common/Block/libblock.a \ ../common/libcommon.a \ ../utility/libutility.a \ - ${THERON_HOME}/Lib/libtherond.a \ + ${CAF_HOME}/build/lib/libcaf_core.so \ + ${CAF_HOME}/build/lib/libcaf_io.so \ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.so noinst_LIBRARIES=libexecutor.a -libexecutor_a_SOURCES = \ - AdaptiveEndPoint.cpp AdaptiveEndPoint.h \ - Coordinator.cpp Coordinator.cppnew \ - Coordinator.h exchange_tracker.cpp \ +libexecutor_a_SOURCES = exchange_tracker.cpp \ exchange_tracker.h expander_tracker.cpp \ expander_tracker.h IteratorExecutorMaster.cpp \ IteratorExecutorMaster.h IteratorExecutorSlave.cpp \ IteratorExecutorSlave.h PortManager.cpp \ - PortManager.h - -libexecutor_a_LIBADD = ${THERON_HOME}/Lib/libtherond.a \ - ${BOOST_HOME}/stage/lib/libboost_serialization.a \ - ${BOOST_HOME}/stage/lib/libboost_serialization.so + PortManager.h +libexecutor_a_LIBADD = \ + ${CAF_HOME}/build/lib/libcaf_core.so \ + ${CAF_HOME}/build/lib/libcaf_io.so \ + ${BOOST_HOME}/stage/lib/libboost_serialization.a \ + ${BOOST_HOME}/stage/lib/libboost_serialization.so SUBDIRS = Test DIST_SUBDIRS = Test diff --git a/Executor/Test/Makefile.am b/Executor/Test/Makefile.am index c6b373783..0b244379e 100644 --- a/Executor/Test/Makefile.am +++ b/Executor/Test/Makefile.am @@ -2,8 +2,7 @@ AM_CPPFLAGS= -fPIC -fpermissive \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${THERON_HOME}/Include \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux @@ -15,11 +14,10 @@ AM_LDFLAGS+=-ltcmalloc endif LDADD = ../libexecutor.a \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a\ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ - ${BOOST_HOME}/stage/lib/libboost_serialization.so \ - ${THERON_HOME}/Lib/libtherond.a + ${BOOST_HOME}/stage/lib/libboost_serialization.so noinst_LIBRARIES=libtest.a libtest_a_SOURCES = diff --git a/Executor/exchange_tracker.cpp b/Executor/exchange_tracker.cpp index f5dd3e867..f19f3f0f8 100755 --- a/Executor/exchange_tracker.cpp +++ b/Executor/exchange_tracker.cpp @@ -5,138 +5,117 @@ * Author: wangli */ +#include "./exchange_tracker.h" + +#include #include #include -#include "../common/Logging.h" #include "../Environment.h" -#include "../common/TimeOutReceiver.h" #include "../utility/rdtsc.h" #include "../common/ids.h" -#include - -#include "exchange_tracker.h" -ExchangeTracker::ExchangeTracker() { - endpoint=Environment::getInstance()->getEndPoint(); - framework=new Theron::Framework(*endpoint); - framework->SetMaxThreads(1); - logging_=new ExchangeTrackerLogging(); - std::ostringstream name; - name<<"ExchangeTrackerActor://"<getNodeID(); - actor=new ExchangeTrackerActor(this,framework,name.str().c_str()); - logging_->log("%s created!",name.str().c_str()); - - +#include "../node_manager/base_node.h" +#include "caf/all.hpp" +#include "caf/io/all.hpp" +#include "caf/response_handle.hpp" +using caf::actor; +using caf::after; +using caf::event_based_actor; +using caf::io::remote_actor; +using caf::response_handle; + +using claims::AskExchAtom; +using claims::OkAtom; +ExchangeTracker::ExchangeTracker() {} + +ExchangeTracker::~ExchangeTracker() {} +bool ExchangeTracker::RegisterExchange(ExchangeID id, std::string port) { + lock_.acquire(); + if (id_to_port.find(id) != id_to_port.end()) { + LOG(ERROR) << "RegisterExchange fails because the exchange id has already " + "existed."; + lock_.release(); + return false; + } + id_to_port[id] = port; + LOG(INFO) << "New exchange with id= " << id.exchange_id << " (port = " << port + << ")is successfully registered!"; + lock_.release(); + return true; } - -ExchangeTracker::~ExchangeTracker() { - delete logging_; - delete actor; - delete framework; - // TODO Auto-generated destructor stub +void ExchangeTracker::LogoutExchange(const ExchangeID& id) { + lock_.acquire(); + boost::unordered_map::const_iterator it = + id_to_port.find(id); + assert(it != id_to_port.cend()); + id_to_port.erase(it); + lock_.release(); + LOG(INFO) << "Exchange with id=(" << id.exchange_id << " , " + << id.partition_offset << " ) is logged out!"; } -bool ExchangeTracker::RegisterExchange(ExchangeID id, std::string port){ - lock_.acquire(); - if(id_to_port.find(id)!=id_to_port.end()){ - logging_->log("RegisterExchange fails because the exchange id has already existed."); - lock_.release(); - return false; - } - id_to_port[id]=port; - logging_->log("New exchange with id=%d (port %s)is successfully registered!",id.exchange_id,port.c_str()); - lock_.release(); - return true; -} -void ExchangeTracker::LogoutExchange(const ExchangeID &id){ - lock_.acquire(); - boost::unordered_map ::const_iterator it=id_to_port.find(id); -// if(it==id_to_port.cend()){ -// printf("Print:\n"); -// this->printAllExchangeId(); -// printf("Printed!\n"); -// } - assert(it!=id_to_port.cend()); - id_to_port.erase(it); - lock_.release(); - logging_->log("Exchange with id=(%d,%d) is logged out!",id.exchange_id,id.partition_offset); -} - -bool ExchangeTracker::AskForSocketConnectionInfo(ExchangeID exchange_id,NodeID target_id,NodeAddress & node_addr){ - unsigned long long int step1,step2; -// return 17002; - step1=curtick(); - step2=curtick(); - TimeOutReceiver* receiver=new TimeOutReceiver(endpoint); - Theron::Catcher ResultCatcher; - receiver->RegisterHandler(&ResultCatcher,&Theron::Catcher::Push); - std::ostringstream str; - str<<"ExchangeTrackerActor://"<Send(exchange_id,receiver->GetAddress(),Theron::Address(str.str().c_str())); - unsigned Timeout=30000; //timeout in millisecond - -// if(receiver->TimeOutWait(1,Timeout)==0){ -// /** -// * TODO: In current version, the request is only tried once. In the future, -// * the request should be sent repeatedly until the reply is received or the -// * times of timeouts exceeds some threshold. -// */ -// logging_->elog("Timeout when asking node[%s] for the connection info, the request Exchange ID is %u",target_ip.c_str(),exchange_id); -// assert(false); -// return 0; -// } - - -// receiver.Wait(1); - - Message256 feedback; - Theron::Address from; -// ResultCatcher.Pop(feedback,from); - NodeRegisterMessage received("0",0); -// printf("OOOOOOOOOOOO step 1:%4.4f\n",getSecond(step1)); - while(!ResultCatcher.Pop(received,from)); - node_addr.ip=received.get_ip(); - std::ostringstream str1; - str1<log("Receive Socket connection info from <%s>, content: %s:%s",from.AsString(),NCM.ip.c_str(),NCM.port.c_str()); -// receiver.~Receiver(); -// return atoi(NCM.port.c_str()); -// printf("OOOOOOOOOOOO step 2:%4.4f\n",getSecond(step2)); - receiver->~TimeOutReceiver(); - return received.ip!=0; +bool ExchangeTracker::AskForSocketConnectionInfo(const ExchangeID& exchange_id, + const NodeID& target_id, + NodeAddress& node_addr, + actor& target_actor) { + caf::scoped_actor self; + node_addr.ip = "0"; + node_addr.port = "0"; + int try_times = 0; + while (try_times < 3) { + try { + LOG(INFO)<<"ask exch Atom to "<sync_send(target_actor, AskExchAtom::value, exchange_id).await( + /// should add overtime! + [&](OkAtom, const string& ip, const string& port) { + node_addr.ip = ip; + node_addr.port = port; + try_times = 100; + LOG(INFO)<<"ip ~~~:"<> + [&]() { + ++try_times; + LOG(WARNING) << "asking exchange connection info, but timeout " + "5s!!! times= " << try_times << endl; + } + + ); + } catch (caf::network_error& e) { + PLOG(ERROR) << "master socket related errors occur when asking for socke " + "conn info " << endl; + assert(false); + return false; + } + } + return node_addr.ip != "0"; } - -ExchangeTracker::ExchangeTrackerActor::ExchangeTrackerActor(ExchangeTracker* et,Theron::Framework* framework,const char* Name) -:et(et),Actor(*framework,Name){ - RegisterHandler(this,&ExchangeTracker::ExchangeTrackerActor::AskForConnectionInfo); +bool ExchangeTracker::AskForSocketConnectionInfo(const ExchangeID& exchange_id, + const NodeID& target_id, + NodeAddress& node_addr) { + auto target_actor = + Environment::getInstance()->get_slave_node()->GetNodeActorFromId( + target_id); + + return AskForSocketConnectionInfo(exchange_id, target_id, node_addr, + target_actor); } - -void ExchangeTracker::ExchangeTrackerActor::AskForConnectionInfo(const ExchangeID &exchange_id, const Theron::Address from){ - et->logging_->log("%s is asking for the socket connecton info!",from.AsString()); - et->lock_.acquire(); - NodeRegisterMessage node_addr("0",0); - if(et->id_to_port.find(exchange_id)!=et->id_to_port.cend()){ - -// NodeConnectionMessage myNCM(Environment::getInstance()->getIp(),et->id_to_port[exchange_id]); - node_addr.set_ip(Environment::getInstance()->getIp()); - node_addr.port=atoi(et->id_to_port[exchange_id].c_str()); - Send(node_addr,from); -// Send(NodeConnectionMessage::serialize(myNCM),from); - et->logging_->log("The ask is answered!"); - } - else{ - -// Send(NodeConnectionMessage::serialize(NodeConnectionMessage("0","0")),from); - Send(node_addr,from); - et->logging_->log("No exchange matched for %lld!",exchange_id.exchange_id); - } - et->lock_.release(); - +NodeAddress ExchangeTracker::GetExchAddr(ExchangeID exch_id) { + lock_.acquire(); + NodeAddress ret; + if (id_to_port.find(exch_id) != id_to_port.cend()) { + ret.ip = Environment::getInstance()->getIp(); + ret.port = id_to_port[exch_id]; + } else { + ret.ip = "0"; + ret.port = "0"; + } + lock_.release(); + return ret; } - void ExchangeTracker::printAllExchangeId() const { - for(boost::unordered_map::const_iterator it=id_to_port.cbegin();it!=id_to_port.cend();it++){ - printf("(%ld,%ld) --->%s\n",it->first.exchange_id,it->first.partition_offset,it->second.c_str()); - } + for (boost::unordered_map::const_iterator it = + id_to_port.cbegin(); + it != id_to_port.cend(); it++) { + printf("(%ld,%ld) --->%s\n", it->first.exchange_id, + it->first.partition_offset, it->second.c_str()); + } } diff --git a/Executor/exchange_tracker.h b/Executor/exchange_tracker.h index 5802d8198..ca1e91fdf 100755 --- a/Executor/exchange_tracker.h +++ b/Executor/exchange_tracker.h @@ -12,49 +12,36 @@ #ifndef EXCHANGETRACKER_H_ #define EXCHANGETRACKER_H_ -#include #include -#include -#ifdef DMALLOC -#include "dmalloc.h" -#endif - -#include "../common/Logging.h" +#include #include "../utility/lock.h" #include "../common/ids.h" -class ExchangeTracker { -public: - ExchangeTracker(); - virtual ~ExchangeTracker(); - bool RegisterExchange(ExchangeID exchange_id, std::string port); - void LogoutExchange(const ExchangeID &exchange_id); - bool AskForSocketConnectionInfo(ExchangeID exchange_id,NodeID target_id, NodeAddress & node_addr); - void printAllExchangeId()const; -private: - Theron::EndPoint* endpoint; - Theron::Framework* framework; - Theron::Actor* actor; - boost::unordered_map id_to_port; - Logging* logging_; - Lock lock_; - +#include "caf/all.hpp" - ///////////////////////////////////////////////////////////// - /** - * RegisterActor - */ - friend class RegisterActor; - class ExchangeTrackerActor:public Theron::Actor{ - public: - ExchangeTrackerActor(ExchangeTracker* et,Theron::Framework* framework, const char* Name); - private: - void AskForConnectionInfo(const ExchangeID &exchange_id, const Theron::Address from); +using caf::actor; - private: - ExchangeTracker* et; - - - }; +/* + * maintain pair information of exchange, and provide interface for + * other exchange asking connection port + */ +class ExchangeTracker { + public: + ExchangeTracker(); + virtual ~ExchangeTracker(); + bool RegisterExchange(ExchangeID exchange_id, std::string port); + void LogoutExchange(const ExchangeID& exchange_id); + bool AskForSocketConnectionInfo(const ExchangeID& exchange_id, + const NodeID& target_id, + NodeAddress& node_addr); + bool AskForSocketConnectionInfo(const ExchangeID& exchange_id, + const NodeID& target_id, + NodeAddress& node_addr, actor& target_actor); + void printAllExchangeId() const; + NodeAddress GetExchAddr(ExchangeID exch_id); + + private: + boost::unordered_map id_to_port; + Lock lock_; }; #endif /* EXCHANGETRACKER_H_ */ diff --git a/Executor/expander_tracker.cpp b/Executor/expander_tracker.cpp index 05fcc027d..aa028719a 100644 --- a/Executor/expander_tracker.cpp +++ b/Executor/expander_tracker.cpp @@ -13,6 +13,7 @@ #include "../common/ids.h" #include "expander_tracker.h" +#include "../common/memory_handle.h" #define DECISION_SHRINK 0 #define DECISION_EXPAND 1 #define DECISION_KEEP 2 @@ -42,6 +43,7 @@ ExpanderTracker::~ExpanderTracker() { pthread_cancel(monitor_thread_id_); instance_ = 0; delete log_; + expander_id_to_status_.clear(); } ExpanderTracker* ExpanderTracker::getInstance() { @@ -179,8 +181,7 @@ ExpanderID ExpanderTracker::registerNewExpander( ExpanderID expander_id; lock_.acquire(); expander_id = IDsGenerator::getInstance()->getUniqueExpanderID(); - ExpanderStatus* es = new ExpanderStatus(expand_shrink); - expander_id_to_status_[expander_id] = es; + expander_id_to_status_[expander_id] = new ExpanderStatus(expand_shrink); expander_id_to_status_[expander_id]->addNewEndpoint( LocalStageEndPoint(stage_desc, "Expander", buffer)); expander_id_to_expand_shrink_[expander_id] = expand_shrink; @@ -197,11 +198,13 @@ void ExpanderTracker::unregisterExpander(ExpanderID expander_id) { it != thread_id_to_expander_id_.end(); it++) { // assert(it->second!=expander_id); } - // delete expander_id_to_status_[expander_id]; + auto es = expander_id_to_status_.find(expander_id)->second; expander_id_to_status_.erase(expander_id); + LOG(INFO) << "erased expander id:" << expander_id << " from expander_id_to_status_" << std::endl; expander_id_to_expand_shrink_.erase(expander_id); + DELETE_PTR(es); lock_.release(); } @@ -216,7 +219,7 @@ void ExpanderTracker::ExpanderStatus::addNewEndpoint( // return; // } // //if the endpoint is exchange or state_stage_start, then the segment - //might step into a new local stage. + // might step into a new local stage. // switch(new_end_point.type){ // case endpoint_state_stage_start:{ // assert(!pending_endpoints.empty()); @@ -245,7 +248,7 @@ void ExpanderTracker::ExpanderStatus::addNewEndpoint( if (new_end_point.type == stage_desc) { pending_endpoints.push(new_end_point); // printf("=======stage - //desc:%s\n",new_end_point.end_point_name.c_str()); + // desc:%s\n",new_end_point.end_point_name.c_str()); } else { /*new_end_point.type==stage_end*/ LocalStageEndPoint top = pending_endpoints.top(); @@ -327,16 +330,17 @@ int ExpanderTracker::decideExpandingOrShrinking( * correctness of the elastic iterator model. */ // { - // int ret=rand()%2;// overwrite the decide with a random seed to test - //the correctness of shrinkage and expansion. + // int ret=rand()%2;// overwrite the decide with a random seed to + // test + // the correctness of shrinkage and expansion. // // if(ret==DECISION_EXPAND){ // return - //expandeIfNotExceedTheMaxDegreeOfParallelism(current_degree_of_parallelism); + // expandeIfNotExceedTheMaxDegreeOfParallelism(current_degree_of_parallelism); // } // if(ret==DECISION_SHRINK){ // return - //shrinkIfNotExceedTheMinDegreeOfParallelims(current_degree_of_parallelism); + // shrinkIfNotExceedTheMinDegreeOfParallelims(current_degree_of_parallelism); // } // return ret; // } @@ -500,7 +504,7 @@ void* ExpanderTracker::monitoringThread(void* arg) { continue; } // Pthis->printStatus(); - boost::unordered_map::iterator it = + std::unordered_map::iterator it = Pthis->expander_id_to_status_.begin(); for (int tmp = 0; tmp < cur; tmp++) it++; ExpanderID id = it->first; @@ -508,16 +512,16 @@ void* ExpanderTracker::monitoringThread(void* arg) { assert(!Pthis->expander_id_to_expand_shrink_.empty()); bool print = true; // bool - //print=it->second.current_stage.dataflow_src_.end_point_name==std::string("Exchange"); + // print=it->second.current_stage.dataflow_src_.end_point_name==std::string("Exchange"); // print=print&(it->second.current_stage.dataflow_desc_.end_point_name.find("Aggregation")!=-1);// //----> Agg // bool - //print=(it->second.current_stage.dataflow_desc_.end_point_name.find("join")!=-1); + // print=(it->second.current_stage.dataflow_desc_.end_point_name.find("join")!=-1); //// ---> Join // print=print&(it->second.current_stage.dataflow_src_.end_point_name.find("Scan")!=-1); //// Scan ---> // printf("return=%d %d - //print=%d--------------\n",it->second.current_stage.dataflow_src_.end_point_name.find("Aggregation"),it->second.current_stage.dataflow_desc_.end_point_name.find("Aggregation"),print); + // print=%d--------------\n",it->second.current_stage.dataflow_src_.end_point_name.find("Aggregation"),it->second.current_stage.dataflow_desc_.end_point_name.find("Aggregation"),print); // bool print=true; // printf("\n"); SWITCHER(print, Pthis->log_->log("--------%d---------", id)) @@ -609,7 +613,7 @@ void ExpanderTracker::printStatus() { } printf("\n"); printf("ExpanderID : ExpanderStatus*\n"); - for (boost::unordered_map::iterator it = + for (std::unordered_map::iterator it = expander_id_to_status_.begin(); it != expander_id_to_status_.end(); it++) { printf("(%ld,%llx) ", it->first, it->second); @@ -634,10 +638,16 @@ void ExpanderTracker::printStatus() { } bool ExpanderTracker::trackExpander(ExpanderID id) const { + lock_.acquire(); if (expander_id_to_expand_shrink_.find(id) != - expander_id_to_expand_shrink_.end()) + expander_id_to_expand_shrink_.end()) { + lock_.release(); return true; - if (expander_id_to_status_.find(id) != expander_id_to_status_.end()) + } + if (expander_id_to_status_.find(id) != expander_id_to_status_.end()) { + lock_.release(); return true; + } + lock_.release(); return false; } diff --git a/Executor/expander_tracker.h b/Executor/expander_tracker.h index 75c592161..fca90ce1a 100644 --- a/Executor/expander_tracker.h +++ b/Executor/expander_tracker.h @@ -9,8 +9,9 @@ #define EXPANDERTRACKER_H_ #include -#include +//#include #include +#include #include #ifdef DMALLOC #include "dmalloc.h" @@ -23,205 +24,205 @@ #include "../common/ExpandedThreadTracker.h" typedef pthread_t expanded_thread_id; - -enum endpoint_type{stage_src,stage_desc}; - /** - * Local local_stage endpoint refers to the start or the end of a stage within a segment. - * It could be either exchange, state, or expander. - */ -struct LocalStageEndPoint{ - LocalStageEndPoint(endpoint_type tp,std::string name="Not Given",MonitorableBuffer* buffer_handle=0) - :type(tp),monitorable_buffer(buffer_handle),end_point_name(name){ - - } - LocalStageEndPoint():monitorable_buffer(0),end_point_name("Initial"),type(stage_src){ - } - LocalStageEndPoint(const LocalStageEndPoint& r){ - this->type=r.type; - this->monitorable_buffer=r.monitorable_buffer; - this->end_point_name=r.end_point_name; - } - endpoint_type type; - MonitorableBuffer* monitorable_buffer; - std::string end_point_name; +enum endpoint_type { stage_src, stage_desc }; +/** + * Local local_stage endpoint refers to the start or the end of a stage within a + * segment. + * It could be either exchange, state, or expander. + */ +struct LocalStageEndPoint { + LocalStageEndPoint(endpoint_type tp, std::string name = "Not Given", + MonitorableBuffer* buffer_handle = 0) + : type(tp), monitorable_buffer(buffer_handle), end_point_name(name) {} + LocalStageEndPoint() + : monitorable_buffer(0), end_point_name("Initial"), type(stage_src) {} + LocalStageEndPoint(const LocalStageEndPoint& r) { + this->type = r.type; + this->monitorable_buffer = r.monitorable_buffer; + this->end_point_name = r.end_point_name; + } + endpoint_type type; + MonitorableBuffer* monitorable_buffer; + std::string end_point_name; }; -//typedef std::pair local_stage; - -struct local_stage{ - enum type{from_buffer, buffer_to_buffer, to_buffer,no_buffer,incomplete }; - local_stage():type_(incomplete){ - - } - local_stage(const local_stage &r){ - this->type_=r.type_; - this->dataflow_src_=r.dataflow_src_; - this->dataflow_desc_=r.dataflow_desc_; - } -// operator=(const local_stage &r){ -// -// } - local_stage(LocalStageEndPoint start,LocalStageEndPoint end) - :dataflow_src_(start),dataflow_desc_(end){ - bool start_buffer=dataflow_src_.monitorable_buffer!=0; - bool end_buffer=dataflow_desc_.monitorable_buffer!=0; - if(start_buffer){ - if(end_buffer){ - type_=buffer_to_buffer; - } - else{ - type_=from_buffer; - } - } - else{ - if(end_buffer){ - type_=to_buffer; - } - else{ - type_=no_buffer; - } - } - } - LocalStageEndPoint dataflow_src_; - LocalStageEndPoint dataflow_desc_; - type type_; - std::string get_type_name(type tp)const{ - switch(tp){ - case from_buffer:{ - return std::string("from_buffer"); - } - case buffer_to_buffer:{ - return std::string("buffer_to_buffer"); - } - case to_buffer:{ - return std::string("to_buffer"); - } - case no_buffer:{ - return std::string("no_buffer"); - } - default:{ - return std::string("invalid type!"); - } - } -// return std::string(); - } - void print(){ - if(type_==incomplete){ - printf("Incomplete!\n"); - return; - } - printf("%s----->%s, type: %s\n",dataflow_src_.end_point_name.c_str(),dataflow_desc_.end_point_name.c_str(),get_type_name(type_).c_str()); - } - +// typedef std::pair local_stage; + +struct local_stage { + enum type { from_buffer, buffer_to_buffer, to_buffer, no_buffer, incomplete }; + local_stage() : type_(incomplete) {} + local_stage(const local_stage& r) { + this->type_ = r.type_; + this->dataflow_src_ = r.dataflow_src_; + this->dataflow_desc_ = r.dataflow_desc_; + } + // operator=(const local_stage &r){ + // + // } + local_stage(LocalStageEndPoint start, LocalStageEndPoint end) + : dataflow_src_(start), dataflow_desc_(end) { + bool start_buffer = dataflow_src_.monitorable_buffer != 0; + bool end_buffer = dataflow_desc_.monitorable_buffer != 0; + if (start_buffer) { + if (end_buffer) { + type_ = buffer_to_buffer; + } else { + type_ = from_buffer; + } + } else { + if (end_buffer) { + type_ = to_buffer; + } else { + type_ = no_buffer; + } + } + } + LocalStageEndPoint dataflow_src_; + LocalStageEndPoint dataflow_desc_; + type type_; + std::string get_type_name(type tp) const { + switch (tp) { + case from_buffer: { + return std::string("from_buffer"); + } + case buffer_to_buffer: { + return std::string("buffer_to_buffer"); + } + case to_buffer: { + return std::string("to_buffer"); + } + case no_buffer: { + return std::string("no_buffer"); + } + default: { return std::string("invalid type!"); } + } + // return std::string(); + } + void print() { + if (type_ == incomplete) { + printf("Incomplete!\n"); + return; + } + printf("%s----->%s, type: %s\n", dataflow_src_.end_point_name.c_str(), + dataflow_desc_.end_point_name.c_str(), get_type_name(type_).c_str()); + } }; - class ExpanderTracker { - - - enum segment_status {seg_no_producing, seg_normal_producing, seg_over_producing,seg_under_producing}; - - struct ExpandedThreadStatus{ - bool call_back_; - - }; - - /* - * This structure maintains the status of current expander in terms of running stage. - */ - struct ExpanderStatus{ - ExpanderStatus(ExpandabilityShrinkability* expand_shrink):perf_info(expand_shrink){ - - } -// ExpanderStatus(){}; - ~ExpanderStatus(); - PerformanceInfo perf_info; - local_stage current_stage; - std::stack pending_endpoints; - void addNewEndpoint(LocalStageEndPoint); - Lock lock; - }; - -public: - static ExpanderTracker* getInstance(); - virtual ~ExpanderTracker(); - /* - * Call this method when a new expanded thread is created, and the - * expander tracker will maintain a status of this thread. - */ - bool registerNewExpandedThreadStatus(expanded_thread_id id,ExpanderID exp_id); - - /* - * Call this method just before a expanded thread finishes its work - * such that the expander tracker could delete its status. - */ - bool deleteExpandedThreadStatus(expanded_thread_id id); - - /* - * Check whether an expanded thread specified by expanded_thread_id - * has callback request. Return false when the given expanded thread - * id is not in the list. - */ - bool isExpandedThreadCallBack(expanded_thread_id id); - - /* - * Call this method if you want to callback the expanded thread specified - * by the expanded thread id. Return false if the thread id does not exists - * in expander tracker or the thread has be called back. - */ - bool callbackExpandedThread(expanded_thread_id id); - - - bool addNewStageEndpoint(expanded_thread_id,LocalStageEndPoint); - - PerformanceInfo* getPerformanceInfo(expanded_thread_id); - - ExpanderID registerNewExpander(MonitorableBuffer* buffer,ExpandabilityShrinkability* expand_shrink); - void unregisterExpander(ExpanderID expander_id); - - /* - * return true if ExpanderTrack has any record regarding to the - * expander with specified id. - */ - bool trackExpander(ExpanderID id)const; - - static segment_status getSegmentStatus(local_stage&); - -private: - ExpanderTracker(); - static void* monitoringThread(void* arg); - - /* - * The access of current_stage might cause bug if thread-safe is not concerned. - */ - int decideExpandingOrShrinking(local_stage& current_stage,unsigned int current_degree_of_parallelism,bool print=true); - - int expandeIfNotExceedTheMaxDegreeOfParallelism(int current_degree_of_parallelism)const; - int shrinkIfNotExceedTheMinDegreeOfParallelims(int current_degree_of_parallelism)const; - - void printStatus(); -private: - static ExpanderTracker* instance_; - - Lock lock_; - - - boost::unordered_map thread_id_to_expander_id_; - - boost::unordered_map expander_id_to_status_; - - boost::unordered_map expander_id_to_expand_shrink_; - - /* - * A unordered map from expanded thread id to expanded thread status - */ -public://for debug, this should be private! - std::map id_to_status_; - - Logging* log_; - - pthread_t monitor_thread_id_; - + enum segment_status { + seg_no_producing, + seg_normal_producing, + seg_over_producing, + seg_under_producing + }; + + struct ExpandedThreadStatus { + bool call_back_; + }; + + /* + * This structure maintains the status of current expander in terms of running + * stage. + */ + struct ExpanderStatus { + ExpanderStatus(ExpandabilityShrinkability* expand_shrink) + : perf_info(expand_shrink) {} + // ExpanderStatus(){}; + ~ExpanderStatus(); + PerformanceInfo perf_info; + local_stage current_stage; + std::stack pending_endpoints; + void addNewEndpoint(LocalStageEndPoint); + Lock lock; + }; + + public: + static ExpanderTracker* getInstance(); + virtual ~ExpanderTracker(); + /* + * Call this method when a new expanded thread is created, and the + * expander tracker will maintain a status of this thread. + */ + bool registerNewExpandedThreadStatus(expanded_thread_id id, + ExpanderID exp_id); + + /* + * Call this method just before a expanded thread finishes its work + * such that the expander tracker could delete its status. + */ + bool deleteExpandedThreadStatus(expanded_thread_id id); + + /* + * Check whether an expanded thread specified by expanded_thread_id + * has callback request. Return false when the given expanded thread + * id is not in the list. + */ + bool isExpandedThreadCallBack(expanded_thread_id id); + + /* + * Call this method if you want to callback the expanded thread specified + * by the expanded thread id. Return false if the thread id does not exists + * in expander tracker or the thread has be called back. + */ + bool callbackExpandedThread(expanded_thread_id id); + + bool addNewStageEndpoint(expanded_thread_id, LocalStageEndPoint); + + PerformanceInfo* getPerformanceInfo(expanded_thread_id); + + ExpanderID registerNewExpander(MonitorableBuffer* buffer, + ExpandabilityShrinkability* expand_shrink); + void unregisterExpander(ExpanderID expander_id); + + /* + * return true if ExpanderTrack has any record regarding to the + * expander with specified id. + */ + bool trackExpander(ExpanderID id) const; + + static segment_status getSegmentStatus(local_stage&); + + private: + ExpanderTracker(); + static void* monitoringThread(void* arg); + + /* + * The access of current_stage might cause bug if thread-safe is not + * concerned. + */ + int decideExpandingOrShrinking(local_stage& current_stage, + unsigned int current_degree_of_parallelism, + bool print = true); + + int expandeIfNotExceedTheMaxDegreeOfParallelism( + int current_degree_of_parallelism) const; + int shrinkIfNotExceedTheMinDegreeOfParallelims( + int current_degree_of_parallelism) const; + + void printStatus(); + + private: + static ExpanderTracker* instance_; + + Lock lock_; + + boost::unordered_map + thread_id_to_expander_id_; + + std::unordered_map expander_id_to_status_; + + boost::unordered_map + expander_id_to_expand_shrink_; + + /* + * A unordered map from expanded thread id to expanded thread status + */ + public: // for debug, this should be private! + std::map id_to_status_; + + Logging* log_; + + pthread_t monitor_thread_id_; }; #endif /* EXPANDERTRACKER_H_ */ diff --git a/IndexManager/CSBIndexBuilding.cpp b/IndexManager/CSBIndexBuilding.cpp index bcab710ae..6758a15af 100644 --- a/IndexManager/CSBIndexBuilding.cpp +++ b/IndexManager/CSBIndexBuilding.cpp @@ -13,441 +13,480 @@ using std::stable_sort; -bottomLayerCollecting::bottomLayerCollecting(State state) :state_(state), partition_reader_iterator_(0), chunk_reader_iterator_(0), chunk_offset_(0), block_offset_(0) { - InitExpandedStatus(); -} -bottomLayerCollecting::bottomLayerCollecting(){ - InitExpandedStatus(); +bottomLayerCollecting::bottomLayerCollecting(State state) + : state_(state), + partition_reader_iterator_(0), + chunk_reader_iterator_(0), + chunk_offset_(0), + block_offset_(0) { + InitExpandedStatus(); } +bottomLayerCollecting::bottomLayerCollecting() { InitExpandedStatus(); } bottomLayerCollecting::~bottomLayerCollecting() { - // TODO Auto-generated destructor stub + // TODO Auto-generated destructor stub } -bottomLayerCollecting::State::State(ProjectionID projection_id, Schema* schema, unsigned key_indexing, unsigned block_size) -: projection_id_(projection_id), schema_(schema), key_indexing_(key_indexing), block_size_(block_size) { - +bottomLayerCollecting::State::State(ProjectionID projection_id, Schema* schema, + unsigned key_indexing, unsigned block_size) + : projection_id_(projection_id), + schema_(schema), + key_indexing_(key_indexing), + block_size_(block_size) {} + +bool bottomLayerCollecting::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset) { + AtomicPushBlockStream(BlockStreamBase::createBlockWithDesirableSerilaizedSize( + state_.schema_, state_.block_size_)); + if (TryEntryIntoSerializedSection()) { + computeOutputSchema(); + /* this is the first expanded thread*/ + PartitionStorage* partition_handle_; + if ((partition_handle_ = BlockManager::getInstance()->GetPartitionHandle( + PartitionID(state_.projection_id_, partition_offset))) == 0) { + printf("The partition[%s] does not exists!\n", + PartitionID(state_.projection_id_, partition_offset) + .getName() + .c_str()); + SetReturnStatus(false); + } else { + partition_reader_iterator_ = + partition_handle_->CreateAtomicReaderIterator(); + } + SetReturnStatus(true); + } + BarrierArrive(); + return GetReturnStatus(); } -bool bottomLayerCollecting::Open(const PartitionOffset& partition_offset) -{ - - AtomicPushBlockStream(BlockStreamBase::createBlockWithDesirableSerilaizedSize(state_.schema_, state_.block_size_)); - if(TryEntryIntoSerializedSection()){ - - computeOutputSchema(); - /* this is the first expanded thread*/ - PartitionStorage* partition_handle_; - if((partition_handle_=BlockManager::getInstance()->getPartitionHandle(PartitionID(state_.projection_id_,partition_offset)))==0){ - printf("The partition[%s] does not exists!\n",PartitionID(state_.projection_id_,partition_offset).getName().c_str()); - SetReturnStatus(false); - } - else{ - partition_reader_iterator_=partition_handle_->createAtomicReaderIterator(); - } - SetReturnStatus(true); - } - BarrierArrive(); - return GetReturnStatus(); +bool bottomLayerCollecting::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* block) { + remaining_block rb; + void* original_tuple; + void* tuple_new; + + // There are blocks which haven't been completely processed + if (atomicPopRemainingBlock(rb)) { + while ((original_tuple = rb.iterator->currentTuple()) > 0) { + const unsigned bytes = output_schema_->getTupleMaxSize(); + if ((tuple_new = block->allocateTuple(bytes)) > 0) { + // construct tuple_new + output_schema_->getcolumn(0) + .operate->assignment((void*)(&rb.chunk_offset), tuple_new); + output_schema_->getcolumn(1) + .operate->assignment(state_.schema_->getColumnAddess( + state_.key_indexing_, original_tuple), + output_schema_->getColumnAddess(1, tuple_new)); + output_schema_->getcolumn(2) + .operate->assignment((void*)(&rb.block_offset), + output_schema_->getColumnAddess(2, tuple_new)); + output_schema_->getcolumn(3) + .operate->assignment((void*)(&rb.tuple_offset), + output_schema_->getColumnAddess(3, tuple_new)); + rb.iterator->increase_cur_(); + rb.tuple_offset++; + + ///*for testing*/ + /// state_.schema_->displayTuple(original_tuple, " | "); + ///*for testing*/ + /// output_schema_->displayTuple(tuple_new, " | "); + ///*for testing*/ sleep(1); + } else { + atomicPushRemainingBlock(rb); + return true; + } + } + AtomicPushBlockStream(rb.block); + } + + // When the program arrivals here, it means that there is no remaining block + // or the remaining block is + // exhausted. What we should do is to ask a new block from the + // chunk_reader_iterator (or prartition_reader_iterator) + BlockStreamBase* block_for_asking = AtomicPopBlockStream(); + block_for_asking->setEmpty(); + rb.block = block_for_asking; + while (askForNextBlock(block_for_asking, rb)) { + // BlockStreamBase::BlockStreamTraverseIterator* traverse_iterator + //= + // block_for_asking->createIterator(); + rb.iterator = block_for_asking->createIterator(); + while ((original_tuple = rb.iterator->currentTuple()) > 0) { + const unsigned bytes = output_schema_->getTupleMaxSize(); + if ((tuple_new = block->allocateTuple(bytes)) > 0) { + // construct tuple_new + output_schema_->getcolumn(0) + .operate->assignment((void*)(&rb.chunk_offset), tuple_new); + output_schema_->getcolumn(1) + .operate->assignment(state_.schema_->getColumnAddess( + state_.key_indexing_, original_tuple), + output_schema_->getColumnAddess(1, tuple_new)); + output_schema_->getcolumn(2) + .operate->assignment((void*)(&rb.block_offset), + output_schema_->getColumnAddess(2, tuple_new)); + output_schema_->getcolumn(3) + .operate->assignment((void*)(&rb.tuple_offset), + output_schema_->getColumnAddess(3, tuple_new)); + rb.iterator->increase_cur_(); + rb.tuple_offset++; + + ///*for testing*/ + /// state_.schema_->displayTuple(original_tuple, " | "); + ///*for testing*/ + /// output_schema_->displayTuple(tuple_new, " | "); + ///*for testing*/ sleep(1); + } else { + atomicPushRemainingBlock(rb); + return true; + } + } + // traverse_iterator->~BlockStreamTraverseIterator(); + block_for_asking->setEmpty(); + } + AtomicPushBlockStream(block_for_asking); + if (!block->Empty()) return true; + return false; } -bool bottomLayerCollecting::Next(BlockStreamBase* block) { - remaining_block rb; - void* original_tuple; - void* tuple_new; - - // There are blocks which haven't been completely processed - if (atomicPopRemainingBlock(rb)) - { - while ((original_tuple = rb.iterator->currentTuple()) > 0) - { - const unsigned bytes = output_schema_->getTupleMaxSize(); - if ((tuple_new = block->allocateTuple(bytes)) > 0) - { - // construct tuple_new - output_schema_->getcolumn(0).operate->assignment((void*)(& rb.chunk_offset), tuple_new); - output_schema_->getcolumn(1).operate->assignment(state_.schema_->getColumnAddess(state_.key_indexing_, original_tuple), output_schema_->getColumnAddess(1, tuple_new)); - output_schema_->getcolumn(2).operate->assignment((void*)(& rb.block_offset), output_schema_->getColumnAddess(2, tuple_new)); - output_schema_->getcolumn(3).operate->assignment((void*)(& rb.tuple_offset), output_schema_->getColumnAddess(3, tuple_new)); - rb.iterator->increase_cur_(); - rb.tuple_offset++; - -///*for testing*/ state_.schema_->displayTuple(original_tuple, " | "); -///*for testing*/ output_schema_->displayTuple(tuple_new, " | "); -///*for testing*/ sleep(1); - } - else - { - atomicPushRemainingBlock(rb); - return true; - } - } - AtomicPushBlockStream(rb.block); - } - - // When the program arrivals here, it means that there is no remaining block or the remaining block is - // exhausted. What we should do is to ask a new block from the chunk_reader_iterator (or prartition_reader_iterator) - BlockStreamBase* block_for_asking = AtomicPopBlockStream(); - block_for_asking->setEmpty(); - rb.block=block_for_asking; - while (askForNextBlock(block_for_asking, rb)) - { -// BlockStreamBase::BlockStreamTraverseIterator* traverse_iterator = block_for_asking->createIterator(); - rb.iterator=block_for_asking->createIterator(); - while ((original_tuple = rb.iterator->currentTuple()) > 0) - { - const unsigned bytes = output_schema_->getTupleMaxSize(); - if ((tuple_new = block->allocateTuple(bytes)) > 0) - { - // construct tuple_new - output_schema_->getcolumn(0).operate->assignment((void*)(& rb.chunk_offset), tuple_new); - output_schema_->getcolumn(1).operate->assignment(state_.schema_->getColumnAddess(state_.key_indexing_, original_tuple), output_schema_->getColumnAddess(1, tuple_new)); - output_schema_->getcolumn(2).operate->assignment((void*)(& rb.block_offset), output_schema_->getColumnAddess(2, tuple_new)); - output_schema_->getcolumn(3).operate->assignment((void*)(& rb.tuple_offset), output_schema_->getColumnAddess(3, tuple_new)); - rb.iterator->increase_cur_(); - rb.tuple_offset++; - -///*for testing*/ state_.schema_->displayTuple(original_tuple, " | "); -///*for testing*/ output_schema_->displayTuple(tuple_new, " | "); -///*for testing*/ sleep(1); - } - else - { - atomicPushRemainingBlock(rb); - return true; - } - } -// traverse_iterator->~BlockStreamTraverseIterator(); - block_for_asking->setEmpty(); - } - AtomicPushBlockStream(block_for_asking); - if (!block->Empty()) - return true; - return false; -} +bool bottomLayerCollecting::Close(SegmentExecStatus* const exec_status) { + InitExpandedStatus(); + delete partition_reader_iterator_; + remaining_block_list_.clear(); + block_stream_list_.clear(); -bool bottomLayerCollecting::Close() { - InitExpandedStatus(); - delete partition_reader_iterator_; - remaining_block_list_.clear(); - block_stream_list_.clear(); - - return true; + return true; } -void bottomLayerCollecting::atomicPushRemainingBlock(remaining_block rb) -{ - lock_.acquire(); - remaining_block_list_.push_back(rb); - lock_.release(); +void bottomLayerCollecting::atomicPushRemainingBlock(remaining_block rb) { + lock_.acquire(); + remaining_block_list_.push_back(rb); + lock_.release(); } -bool bottomLayerCollecting::atomicPopRemainingBlock(remaining_block& rb) -{ - lock_.acquire(); - if (remaining_block_list_.size() > 0) - { - rb = remaining_block_list_.front(); - remaining_block_list_.pop_front(); - lock_.release(); - return true; - } - lock_.release(); - return false; +bool bottomLayerCollecting::atomicPopRemainingBlock(remaining_block& rb) { + lock_.acquire(); + if (remaining_block_list_.size() > 0) { + rb = remaining_block_list_.front(); + remaining_block_list_.pop_front(); + lock_.release(); + return true; + } + lock_.release(); + return false; } -void bottomLayerCollecting::AtomicPushBlockStream(BlockStreamBase* block) -{ - lock_.acquire(); - block_stream_list_.push_back(block); - lock_.release(); +void bottomLayerCollecting::AtomicPushBlockStream(BlockStreamBase* block) { + lock_.acquire(); + block_stream_list_.push_back(block); + lock_.release(); } -BlockStreamBase* bottomLayerCollecting::AtomicPopBlockStream() -{ - assert(!block_stream_list_.empty()); - lock_.acquire(); - BlockStreamBase* block = block_stream_list_.front(); - block_stream_list_.pop_front(); - lock_.release(); - return block; +BlockStreamBase* bottomLayerCollecting::AtomicPopBlockStream() { + assert(!block_stream_list_.empty()); + lock_.acquire(); + BlockStreamBase* block = block_stream_list_.front(); + block_stream_list_.pop_front(); + lock_.release(); + return block; } -bool bottomLayerCollecting::askForNextBlock(BlockStreamBase* & block, remaining_block& rb) -{ - if (chunk_reader_iterator_==0||chunk_reader_iterator_->nextBlock(block) == false) - { - chunk_reader_iterator_ = partition_reader_iterator_->nextChunk(); - - if (chunk_reader_iterator_ == 0){ - printf("Has been falsed!!!!!!!!!!!!!*&S*DF&(SD&F(S<><<<><><><><><>\n"); - return false; - } - chunk_reader_iterator_->nextBlock(block); - lock_.acquire(); - rb.chunk_offset = ++chunk_offset_; - block_offset_ = 0; - lock_.release(); - rb.block_offset = 0; - rb.tuple_offset = 0; - return true; - } - rb.chunk_offset = chunk_offset_; - lock_.acquire(); - rb.block_offset = ++block_offset_; - lock_.release(); - rb.tuple_offset = 0; - return true; +bool bottomLayerCollecting::askForNextBlock(BlockStreamBase*& block, + remaining_block& rb) { + if (chunk_reader_iterator_ == 0 || + chunk_reader_iterator_->NextBlock(block) == false) { + chunk_reader_iterator_ = partition_reader_iterator_->NextChunk(); + + if (chunk_reader_iterator_ == 0) { + printf("Has been falsed!!!!!!!!!!!!!*&S*DF&(SD&F(S<><<<><><><><><>\n"); + return false; + } + chunk_reader_iterator_->NextBlock(block); + lock_.acquire(); + rb.chunk_offset = ++chunk_offset_; + block_offset_ = 0; + lock_.release(); + rb.block_offset = 0; + rb.tuple_offset = 0; + return true; + } + rb.chunk_offset = chunk_offset_; + lock_.acquire(); + rb.block_offset = ++block_offset_; + lock_.release(); + rb.tuple_offset = 0; + return true; } +void bottomLayerCollecting::computeOutputSchema() { + std::vector column_list; + column_list.push_back(column_type(t_int)); // chunk offset + column_list.push_back(state_.schema_->getcolumn(state_.key_indexing_)); + column_list.push_back(column_type(t_u_smallInt)); // block offset + column_list.push_back(column_type(t_u_smallInt)); // tuple_offset -void bottomLayerCollecting::computeOutputSchema(){ - std::vector column_list; - column_list.push_back(column_type(t_int)); //chunk offset - column_list.push_back(state_.schema_->getcolumn(state_.key_indexing_)); - column_list.push_back(column_type(t_u_smallInt)); //block offset - column_list.push_back(column_type(t_u_smallInt)); //tuple_offset - - output_schema_ = new SchemaFix(column_list); + output_schema_ = new SchemaFix(column_list); } +bottomLayerSorting::bottomLayerSorting() { InitExpandedStatus(); } - - - - -bottomLayerSorting::bottomLayerSorting(){ - - InitExpandedStatus(); +bottomLayerSorting::bottomLayerSorting(State state) : state_(state) { + InitExpandedStatus(); } -bottomLayerSorting::bottomLayerSorting(State state) :state_(state) -{ - InitExpandedStatus(); +bottomLayerSorting::~bottomLayerSorting() {} + +bottomLayerSorting::State::State(Schema* schema, PhysicalOperatorBase* child, + unsigned block_size, + ProjectionID projection_id, + unsigned key_indexing, std::string index_name) + : schema_(schema), + child_(child), + block_size_(block_size), + projection_id_(projection_id), + key_indexing_(key_indexing), + index_name_(index_name) {} +bool bottomLayerSorting::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset) { + if (TryEntryIntoSerializedSection()) { + computeVectorSchema(); + const bool child_open_return = + state_.child_->Open(exec_status, partition_offset); + SetReturnStatus(child_open_return); + } + BarrierArrive(); + + // Construct the PartitionID for the next function to make up the ChunkID + partition_id_.projection_id = state_.projection_id_; + partition_id_.partition_off = partition_offset; + + // Open finished. Buffer all the child create dataset in different group + // according to their ChunkIDs + BlockStreamBase* block_for_asking = + BlockStreamBase::createBlock(state_.schema_, state_.block_size_); + block_for_asking->setEmpty(); + BlockStreamBase::BlockStreamTraverseIterator* iterator = NULL; + void* current_chunk = new ChunkOffset; + Operate* op_ = state_.schema_->getcolumn(1).operate->duplicateOperator(); + while (state_.child_->Next(exec_status, block_for_asking)) { + iterator = block_for_asking->createIterator(); + void* current_tuple = NULL; + while ((current_tuple = iterator->nextTuple()) != 0) { + state_.schema_->getColumnValue(0, current_tuple, current_chunk); + + if (tuples_in_chunk_.find(*(ChunkOffset*)current_chunk) == + tuples_in_chunk_.end()) { + vector tmp; + tuples_in_chunk_[*(ChunkOffset*)current_chunk] = tmp; + } + compare_node* c_node = + (compare_node*)malloc(sizeof(compare_node)); // newmalloc + c_node->vector_schema_ = vector_schema_; + c_node->tuple_ = malloc(vector_schema_->getTupleMaxSize()); // newmalloc + vector_schema_->copyTuple( + (char*)current_tuple + state_.schema_->getcolumn(0).get_length(), + c_node->tuple_); + // c_node->tuple_ = + // current_tuple+state_.schema_->getcolumn(0).get_length(); + // c_node->op_ = + // state_.schema_->getcolumn(1).operate->duplicateOperator(); + c_node->op_ = op_; + tuples_in_chunk_.find(*(ChunkOffset*)current_chunk) + ->second.push_back(c_node); + + // for testing begin + // if ((*(ChunkOffset*)current_chunk) == 0) + // { + // cout << "current chunk: " << + //*(ChunkOffset*)current_chunk << " tuple: "; + // vector_schema_->displayTuple(current_tuple+state_.schema_->getcolumn(0).get_length(), + //" | "); + // vector_schema_->displayTuple(tuples_in_chunk_.find(*(ChunkOffset*)current_chunk)->second.back()->tuple_, + //" | "); + // sleep(1); + // } + // for testing end + } + block_for_asking->setEmpty(); + } + + // for testing begin + // sleep(10000); + // cout << "Chunk Num: " << tuples_in_chunk_.size() << endl; + // sleep(1000); + // for testing end + + // Sorting the tuples in each chunk + /*for testing*/ cout << "Chunk num: " << tuples_in_chunk_.size() << endl; + for (std::map >::iterator iter = + tuples_in_chunk_.begin(); + iter != tuples_in_chunk_.end(); iter++) { + ///*for testing*/ cout << "chunk id: " << *(unsigned + /// short*)iter->first + ///<< + /// endl; + // for testing begin + cout << "Chunk size: " << iter->second.size() << endl; + // for (unsigned i = 0; i < iter->second.size(); i++) + // { + // vector_schema_->displayTuple(iter->second[i]->tuple_, + //"\t"); + //// sleep(1); + // } + // sleep(1000); + // for testing end + + stable_sort(iter->second.begin(), iter->second.end(), compare); + + // for testing begin + // for (unsigned i = 0; i < iter->second.size(); i++) + // { + // vector_schema_->displayTuple(iter->second[i]->tuple_, + //"\t"); + //// sleep(1); + // } + // sleep(1000); + // for testing end + } + + return GetReturnStatus(); } -bottomLayerSorting::~bottomLayerSorting() -{ - +bool bottomLayerSorting::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* block) { + switch (vector_schema_->getcolumn(0).type) { + case t_int: { + map csb_index_list; + csb_index_list.clear(); + for (std::map >::iterator iter = + tuples_in_chunk_.begin(); + iter != tuples_in_chunk_.end(); iter++) { + ChunkID* chunk_id = new ChunkID(); + chunk_id->partition_id = partition_id_; + chunk_id->chunk_off = iter->first; + CSBPlusTree* csb_tree = indexBuilding(iter->second); + csb_index_list[*chunk_id] = csb_tree; + + // char ch; + // cout << "Input any key to print the index(except + // 0 + // for not print): "; + // cin >> ch; + // if (ch != '0') + // csb_tree->printTree(); + } + // IndexManager::getInstance()->addIndexToList(state_.key_indexing_, + // csb_index_list); + IndexManager::getInstance()->insertIndexToList( + state_.index_name_, state_.key_indexing_, csb_index_list); + break; + } + default: { + cout << "[ERROR: (CSBIndexBuilding.cpp->bottomLayerSorting->next()]: The " + "data type is not defined!\n"; + break; + } + } + return false; + + /* original code for testing the Logical CSB Index Building iterator + for (std::map >::iterator iter = + tuples_in_chunk_.begin(); iter != tuples_in_chunk_.end(); iter++) + { + switch (vector_schema_->getcolumn(0).type) + { + case t_int: + { + CSBPlusTree* csb_tree = + indexBuilding(iter->second); + + //for testing the search result + vector ret; + while (true) + { + ret.clear(); + int sec_code = 0; + cout << "\nPlease input the sec_code for + searching: "; + cin >> sec_code; + ret = csb_tree->Search(sec_code); + if (ret.size() == 0) + { + cout << "The result set is NULL!\n"; + continue; + } + cout << "The result set size is: " << + ret.size() << "\nHow many to print? "; + cin >> sec_code; + for (int i = 0; i < ret.size() && i < + sec_code; i++) + cout << "<" << ret[i]->_block_off << + ", " << ret[i]->_tuple_off << ">\t"; + cout << endl; + } + //for testing end + + //register csb_tree into index_manager + + break; + } + default: + { + cout << "[ERROR: + (CSBIndexBuilding.cpp->bottomLayerSorting->next()]: The data type is not + defined!\n"; + break; + } + } + } + return false; + */ } -bottomLayerSorting::State::State(Schema* schema, PhysicalOperatorBase* child, unsigned block_size, ProjectionID projection_id, unsigned key_indexing, std::string index_name) -: schema_(schema), child_(child), block_size_(block_size), projection_id_(projection_id), key_indexing_(key_indexing), index_name_(index_name) { - -} -bool bottomLayerSorting::Open(const PartitionOffset& partition_offset) -{ - if (TryEntryIntoSerializedSection()) - { - computeVectorSchema(); - const bool child_open_return = state_.child_->Open(partition_offset); - SetReturnStatus(child_open_return); - } - BarrierArrive(); - - //Construct the PartitionID for the next function to make up the ChunkID - partition_id_.projection_id = state_.projection_id_; - partition_id_.partition_off = partition_offset; - - // Open finished. Buffer all the child create dataset in different group according to their ChunkIDs - BlockStreamBase* block_for_asking = BlockStreamBase::createBlock(state_.schema_, state_.block_size_); - block_for_asking->setEmpty(); - BlockStreamBase::BlockStreamTraverseIterator* iterator = NULL; - void* current_chunk = new ChunkOffset; - Operate* op_ = state_.schema_->getcolumn(1).operate->duplicateOperator(); - while (state_.child_->Next(block_for_asking)) - { - iterator = block_for_asking->createIterator(); - void* current_tuple = NULL; - while((current_tuple = iterator->nextTuple()) != 0) - { - state_.schema_->getColumnValue(0, current_tuple, current_chunk); - - if(tuples_in_chunk_.find(*(ChunkOffset*)current_chunk)==tuples_in_chunk_.end()){ - vector tmp; - tuples_in_chunk_[*(ChunkOffset*)current_chunk] = tmp; - } - compare_node* c_node = (compare_node*)malloc(sizeof(compare_node)); //newmalloc - c_node->vector_schema_ = vector_schema_; - c_node->tuple_ = malloc(vector_schema_->getTupleMaxSize()); //newmalloc - vector_schema_->copyTuple((char*)current_tuple+state_.schema_->getcolumn(0).get_length(),c_node->tuple_); -// c_node->tuple_ = current_tuple+state_.schema_->getcolumn(0).get_length(); -// c_node->op_ = state_.schema_->getcolumn(1).operate->duplicateOperator(); - c_node->op_ = op_; - tuples_in_chunk_.find(*(ChunkOffset*)current_chunk)->second.push_back(c_node); - -//for testing begin -// if ((*(ChunkOffset*)current_chunk) == 0) -// { -// cout << "current chunk: " << *(ChunkOffset*)current_chunk << " tuple: "; -// vector_schema_->displayTuple(current_tuple+state_.schema_->getcolumn(0).get_length(), " | "); -// vector_schema_->displayTuple(tuples_in_chunk_.find(*(ChunkOffset*)current_chunk)->second.back()->tuple_, " | "); -// sleep(1); -// } -//for testing end - - } - block_for_asking->setEmpty(); - } - -//for testing begin -// sleep(10000); -// cout << "Chunk Num: " << tuples_in_chunk_.size() << endl; -// sleep(1000); -//for testing end - - // Sorting the tuples in each chunk -/*for testing*/ cout << "Chunk num: " << tuples_in_chunk_.size() << endl; - for (std::map >::iterator iter = tuples_in_chunk_.begin(); iter != tuples_in_chunk_.end(); iter++) - { -///*for testing*/ cout << "chunk id: " << *(unsigned short*)iter->first << endl; -//for testing begin - cout << "Chunk size: " << iter->second.size() << endl; -// for (unsigned i = 0; i < iter->second.size(); i++) -// { -// vector_schema_->displayTuple(iter->second[i]->tuple_, "\t"); -//// sleep(1); -// } -// sleep(1000); -//for testing end - - stable_sort(iter->second.begin(), iter->second.end(), compare); - -//for testing begin -// for (unsigned i = 0; i < iter->second.size(); i++) -// { -// vector_schema_->displayTuple(iter->second[i]->tuple_, "\t"); -//// sleep(1); -// } -// sleep(1000); -//for testing end - } - - - return GetReturnStatus(); +bool bottomLayerSorting::Close(SegmentExecStatus* const exec_status) { + InitExpandedStatus(); + state_.child_->Close(exec_status); + cout << "bottomLayerSorting close finished!\n"; + return true; } -bool bottomLayerSorting::Next(BlockStreamBase* block) -{ - switch (vector_schema_->getcolumn(0).type) - { - case t_int: - { - map csb_index_list; - csb_index_list.clear(); - for (std::map >::iterator iter = tuples_in_chunk_.begin(); iter != tuples_in_chunk_.end(); iter++) - { - ChunkID* chunk_id = new ChunkID(); - chunk_id->partition_id = partition_id_; - chunk_id->chunk_off = iter->first; - CSBPlusTree* csb_tree = indexBuilding(iter->second); - csb_index_list[*chunk_id] = csb_tree; - -// char ch; -// cout << "Input any key to print the index(except 0 for not print): "; -// cin >> ch; -// if (ch != '0') -// csb_tree->printTree(); - - } -// IndexManager::getInstance()->addIndexToList(state_.key_indexing_, csb_index_list); - IndexManager::getInstance()->insertIndexToList(state_.index_name_, state_.key_indexing_, csb_index_list); - break; - } - default: - { - cout << "[ERROR: (CSBIndexBuilding.cpp->bottomLayerSorting->next()]: The data type is not defined!\n"; - break; - } - } - return false; - - -/* original code for testing the Logical CSB Index Building iterator - for (std::map >::iterator iter = tuples_in_chunk_.begin(); iter != tuples_in_chunk_.end(); iter++) - { - switch (vector_schema_->getcolumn(0).type) - { - case t_int: - { - CSBPlusTree* csb_tree = indexBuilding(iter->second); - -//for testing the search result - vector ret; - while (true) - { - ret.clear(); - int sec_code = 0; - cout << "\nPlease input the sec_code for searching: "; - cin >> sec_code; - ret = csb_tree->Search(sec_code); - if (ret.size() == 0) - { - cout << "The result set is NULL!\n"; - continue; - } - cout << "The result set size is: " << ret.size() << "\nHow many to print? "; - cin >> sec_code; - for (int i = 0; i < ret.size() && i < sec_code; i++) - cout << "<" << ret[i]->_block_off << ", " << ret[i]->_tuple_off << ">\t"; - cout << endl; - } -//for testing end - - //register csb_tree into index_manager - - break; - } - default: - { - cout << "[ERROR: (CSBIndexBuilding.cpp->bottomLayerSorting->next()]: The data type is not defined!\n"; - break; - } - } - } - return false; -*/ +bool bottomLayerSorting::compare(const compare_node* a, const compare_node* b) { + const void* left = a->vector_schema_->getColumnAddess(0, a->tuple_); + const void* right = b->vector_schema_->getColumnAddess(0, b->tuple_); + return a->op_->less(left, right); } -bool bottomLayerSorting::Close() -{ - InitExpandedStatus(); - state_.child_->Close(); - cout << "bottomLayerSorting close finished!\n"; - return true; +template +CSBPlusTree* bottomLayerSorting::indexBuilding( + vector chunk_tuples) { + data_offset* aray = new data_offset[chunk_tuples.size()]; + ///*for testing*/ cout << "chunk data size: " << chunk_tuples.size() << + /// endl + ///<< endl; + for (unsigned i = 0; i < chunk_tuples.size(); i++) { + aray[i]._key = + *(T*)(vector_schema_->getColumnAddess(0, chunk_tuples[i]->tuple_)); + aray[i]._block_off = *(unsigned short*)(vector_schema_->getColumnAddess( + 1, chunk_tuples[i]->tuple_)); + aray[i]._tuple_off = *(unsigned short*)(vector_schema_->getColumnAddess( + 2, chunk_tuples[i]->tuple_)); + ///*for testing*/ cout << aray[i]._key << "\t" << + /// aray[i]._block_off + ///<< + ///"\t" << aray[i]._tuple_off << endl; + } + CSBPlusTree* csb_tree = new CSBPlusTree(); + // csb_tree->BulkLoad(aray, chunk_tuples.size()); + cout << "*************************CSB indexing build " + "successfully!*************************\n"; + return csb_tree; } -bool bottomLayerSorting::compare(const compare_node* a, const compare_node* b) -{ - const void* left = a->vector_schema_->getColumnAddess(0, a->tuple_); - const void* right = b->vector_schema_->getColumnAddess(0, b->tuple_); - return a->op_->less(left, right); - -} - -template -CSBPlusTree* bottomLayerSorting::indexBuilding(vector chunk_tuples) -{ - data_offset* aray = new data_offset [chunk_tuples.size()]; -///*for testing*/ cout << "chunk data size: " << chunk_tuples.size() << endl << endl; - for (unsigned i = 0; i < chunk_tuples.size(); i++) - { - aray[i]._key = *(T*)(vector_schema_->getColumnAddess(0, chunk_tuples[i]->tuple_)); - aray[i]._block_off = *(unsigned short*)(vector_schema_->getColumnAddess(1, chunk_tuples[i]->tuple_)); - aray[i]._tuple_off = *(unsigned short*)(vector_schema_->getColumnAddess(2, chunk_tuples[i]->tuple_)); -///*for testing*/ cout << aray[i]._key << "\t" << aray[i]._block_off << "\t" << aray[i]._tuple_off << endl; - } - CSBPlusTree* csb_tree = new CSBPlusTree(); -// csb_tree->BulkLoad(aray, chunk_tuples.size()); - cout << "*************************CSB indexing build successfully!*************************\n"; - return csb_tree; -} - -void bottomLayerSorting::computeVectorSchema(){ - std::vector column_list; - column_list.push_back(state_.schema_->getcolumn(1)); - column_list.push_back(column_type(t_u_smallInt)); //block offset - column_list.push_back(column_type(t_u_smallInt)); //tuple_offset - - vector_schema_ = new SchemaFix(column_list); +void bottomLayerSorting::computeVectorSchema() { + std::vector column_list; + column_list.push_back(state_.schema_->getcolumn(1)); + column_list.push_back(column_type(t_u_smallInt)); // block offset + column_list.push_back(column_type(t_u_smallInt)); // tuple_offset + vector_schema_ = new SchemaFix(column_list); } diff --git a/IndexManager/CSBIndexBuilding.h b/IndexManager/CSBIndexBuilding.h index 07464197f..f75537029 100644 --- a/IndexManager/CSBIndexBuilding.h +++ b/IndexManager/CSBIndexBuilding.h @@ -14,12 +14,14 @@ #include "../common/data_type.h" #include "../common/Schema/Schema.h" #include "../common/Block/BlockStream.h" +#include "../exec_tracker/segment_exec_status.h" #include "../physical_operator/physical_operator.h" #include "../storage/PartitionStorage.h" #include "../storage/ChunkStorage.h" #include "CSBPlusTree.h" using claims::physical_operator::PhysicalOperator; +using claims::SegmentExecStatus; template CSBPlusTree* indexBuilding(Schema* schema, vector chunk_tuples); @@ -72,14 +74,15 @@ class bottomLayerCollecting : public PhysicalOperator { bottomLayerCollecting(); bottomLayerCollecting(State state); virtual ~bottomLayerCollecting(); - bool Open(const PartitionOffset& partition_offset = 0); - bool Next(BlockStreamBase* block); - bool Close(); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset = 0); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); + bool Close(SegmentExecStatus* const exec_status); void Print() { printf("CCSBIndexingBuilding\n"); } private: State state_; - PartitionStorage::PartitionReaderItetaor* partition_reader_iterator_; + PartitionStorage::PartitionReaderIterator* partition_reader_iterator_; ChunkReaderIterator* chunk_reader_iterator_; std::list remaining_block_list_; std::list block_stream_list_; @@ -150,9 +153,10 @@ class bottomLayerSorting : public PhysicalOperator { bottomLayerSorting(State state); virtual ~bottomLayerSorting(); - bool Open(const PartitionOffset& partition_offset = 0); - bool Next(BlockStreamBase* block); - bool Close(); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset = 0); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); + bool Close(SegmentExecStatus* const exec_status); private: static bool compare(const compare_node* a, const compare_node* b); diff --git a/IndexManager/IndexScanIterator.cpp b/IndexManager/IndexScanIterator.cpp index 46bc11c52..69de3de10 100644 --- a/IndexManager/IndexScanIterator.cpp +++ b/IndexManager/IndexScanIterator.cpp @@ -41,12 +41,12 @@ bool IndexScanIterator::Open(const PartitionOffset& partition_off) /* this is the first expanded thread*/ csb_index_list_ = IndexManager::getInstance()->getAttrIndex(state_.index_id_); PartitionStorage* partition_handle_; - if((partition_handle_=BlockManager::getInstance()->getPartitionHandle(PartitionID(state_.projection_id_,partition_off)))==0){ + if((partition_handle_=BlockManager::getInstance()->GetPartitionHandle(PartitionID(state_.projection_id_,partition_off)))==0){ printf("The partition[%s] does not exists!\n",PartitionID(state_.projection_id_,partition_off).getName().c_str()); SetReturnStatus(false); } else{ - partition_reader_iterator_=partition_handle_->createAtomicReaderIterator(); + partition_reader_iterator_=partition_handle_->CreateAtomicReaderIterator(); // chunk_reader_iterator_ = partition_reader_iterator_->nextChunk(); } SetReturnStatus(true); @@ -177,13 +177,13 @@ bool IndexScanIterator::atomicPopRemainingBlock(remaining_block& rb) bool IndexScanIterator::askForNextBlock(remaining_block& rb) { - if (chunk_reader_iterator_ == 0 || chunk_reader_iterator_->nextBlock(rb.block) == false || rb.iter_result_map == rb.result_set->end()) + if (chunk_reader_iterator_ == 0 || chunk_reader_iterator_->NextBlock(rb.block) == false || rb.iter_result_map == rb.result_set->end()) { - chunk_reader_iterator_ = partition_reader_iterator_->nextChunk(); + chunk_reader_iterator_ = partition_reader_iterator_->NextChunk(); if (chunk_reader_iterator_ == 0) return false; - chunk_reader_iterator_->nextBlock(rb.block); + chunk_reader_iterator_->NextBlock(rb.block); rb.block_off = 0; //search the CSB+-Tree index to get the new chunk's search-result diff --git a/IndexManager/IndexScanIterator.h b/IndexManager/IndexScanIterator.h index 430dd8db0..1ad1a02f3 100644 --- a/IndexManager/IndexScanIterator.h +++ b/IndexManager/IndexScanIterator.h @@ -132,7 +132,7 @@ class IndexScanIterator : public PhysicalOperator { private: State state_; - PartitionStorage::PartitionReaderItetaor* partition_reader_iterator_; + PartitionStorage::PartitionReaderIterator* partition_reader_iterator_; ChunkReaderIterator* chunk_reader_iterator_; std::map csb_index_list_; diff --git a/IndexManager/Makefile.am b/IndexManager/Makefile.am index 9040f0425..5d712e82f 100644 --- a/IndexManager/Makefile.am +++ b/IndexManager/Makefile.am @@ -2,8 +2,7 @@ AM_CPPFLAGS= -fPIC -fpermissive\ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${THERON_HOME}/Include \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux @@ -21,11 +20,10 @@ LDADD = ../BlockStreamIterator/libblockstreamiterator.a \ ../common/libcommon.a \ ../common/Schema/libschema.a \ ../common/Block/libblock.a \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a\ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ - ${BOOST_HOME}/stage/lib/libboost_serialization.so \ - ${THERON_HOME}/Lib/libtherond.a + ${BOOST_HOME}/stage/lib/libboost_serialization.so noinst_LIBRARIES=libindexmanager.a libindexmanager_a_SOURCES = \ diff --git a/IndexManager/Test/Makefile.am b/IndexManager/Test/Makefile.am index eae2806bc..27ce33838 100644 --- a/IndexManager/Test/Makefile.am +++ b/IndexManager/Test/Makefile.am @@ -2,8 +2,7 @@ AM_CPPFLAGS= -fPIC -fpermissive \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${THERON_HOME}/Include \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux @@ -23,11 +22,10 @@ LDADD = \ ../../Resource/libresource.a \ ../../common/libcommon.a \ ../../utility/libutility.a \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a\ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ - ${BOOST_HOME}/stage/lib/libboost_serialization.so \ - ${THERON_HOME}/Lib/libtherond.a + ${BOOST_HOME}/stage/lib/libboost_serialization.so noinst_LIBRARIES=libtest.a libtest_a_SOURCES = diff --git a/Makefile.am b/Makefile.am index 9a067928e..c10d72d8c 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,16 +1,17 @@ AM_CPPFLAGS=-fPIC -DTHERON_XS\ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux \ --I${THERON_HOME}/Include \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${GTEST_HOME}/include - +-I${GTEST_HOME}/include \ +-I${CAF_HOME}/libcaf_io \ +-I${CAF_HOME}/libcaf_core #-L/usr/local/lib \ #-I/usr/local/include -AM_LDFLAGS= -lc -lm -lrt -lpthread -lboost_serialization -lboost_date_time -lboost_system \ - -lconfig++ -lxs -lnuma -lreadline -lhistory -lz -ltinfo -Wl,--no-as-needed -ldl -rdynamic -lglog +AM_LDFLAGS= -lc -lm -lrt -lcaf_core -lcaf_io -lpthread -lboost_serialization -lboost_date_time -lboost_system \ + -lconfig++ -lxs -lnuma -lreadline -lhistory -lz -ltinfo -Wl,--no-as-needed -ldl -rdynamic -lglog + if OPT_TCMALLOC AM_CPPFLAGS+=-fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-free @@ -21,6 +22,7 @@ LDADD = \ Test/libtest.a \ Test/common/libcommon.a \ Test/utility/libutility.a \ + node_manager/libnodemanager.a \ common/serialization/libserialization.a \ Client/libclient.a \ Client/Test/libtest.a \ @@ -33,6 +35,7 @@ LDADD = \ sql_parser/Test/libparser_test.a \ sql_parser/parser/libparser.a \ sql_parser/ast_node/libast_node.a \ + exec_tracker/libexec_tracker.a \ logical_operator/liblogicalqueryplan.a \ physical_operator/libphysicalqueryplan.a \ storage/libstorage.a \ @@ -55,13 +58,12 @@ LDADD = \ common/types/Test/libtest.a \ common/types/ttmath/libttmath.a \ utility/libutility.a \ - ${THERON_HOME}/Lib/libtherond.a \ + ${CAF_HOME}/build/lib/libcaf_core.so \ + ${CAF_HOME}/build/lib/libcaf_io.so \ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.so \ - ${BOOST_HOME}/stage/lib/libboost_system.a \ ${BOOST_HOME}/stage/lib/libboost_system.so \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.so \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a \ + ${HADOOP_HOME}/lib/native/libhdfs.so\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ ${GTEST_HOME}/libgtest.a \ ${GLOG_HOME}/.libs/libglog.so.0 \ @@ -79,7 +81,7 @@ include_HEADERS = Config.h \ bin_PROGRAMS = claimsserver \ client \ - test + test client_SOURCES = Client.cpp \ Environment.cpp \ @@ -96,13 +98,14 @@ test_SOURCES = Test/gtest_main.cpp\ IDsGenerator.cpp \ Config.cpp - - +#node_SOURCES = Config.cpp \ +# node_manager/node_main.cpp + SUBDIRS= catalog Client common Daemon Executor IndexManager\ loader physical_operator logical_operator Resource \ -storage Test utility codegen sql_parser stmt_handler +storage Test utility codegen sql_parser stmt_handler node_manager exec_tracker DIST_SUBDIRS = catalog Client common Daemon Executor IndexManager\ loader physical_operator logical_operator Resource \ -storage Test utility codegen sql_parser stmt_handler +storage Test utility codegen sql_parser stmt_handler node_manager exec_tracker diff --git a/Resource/BufferManager.cpp b/Resource/BufferManager.cpp index fa12cf181..b7ce400da 100755 --- a/Resource/BufferManager.cpp +++ b/Resource/BufferManager.cpp @@ -6,53 +6,74 @@ */ #include "BufferManager.h" -BufferManager* BufferManager::instance_=0; -BufferManager::BufferManager() { - totol_capacity_=(unsigned long )1024*1024*1024*80; - storage_budget_max_=(unsigned long )1024*1024*1024*60; - storage_budget_min_=(unsigned long )1024*1024*1024*60; - storage_used_=0; - intermediate_buffer_budget_max_=896*1024*1024; - intermediate_buffer_budget_min_=896*1024*1024; - intermediate_buffer_used_=0; - memory_storage_=MemoryChunkStore::getInstance(); - logging_=new BufferManagerLogging(); - logging_->log("Initialized!"); +#include +#include + +using std::__basic_file; +using std::basic_fstream; +BufferManager* BufferManager::instance_ = NULL; +BufferManager::BufferManager() { + page_size = sysconf(_SC_PAGESIZE); + total_memory = sysconf(_SC_PHYS_PAGES); + totol_capacity_ = (unsigned long)total_memory * page_size; + storage_budget_max_ = totol_capacity_; + storage_budget_min_ = totol_capacity_; + // totol_capacity_ = (unsigned long)1024 * 1024 * 1024 * 80; + // storage_budget_max_ = (unsigned long)1024 * 1024 * 1024 * 60; + // storage_budget_min_ = (unsigned long)1024 * 1024 * 1024 * 60; + storage_used_ = 0; + intermediate_buffer_budget_max_ = 896 * 1024 * 1024; + intermediate_buffer_budget_min_ = 896 * 1024 * 1024; + intermediate_buffer_used_ = 0; + memory_storage_ = MemoryChunkStore::GetInstance(); + logging_ = new BufferManagerLogging(); + logging_->log("Initialized!"); + logging_->log("%d MB total memory", totol_capacity_ / 1024 / 1024); } BufferManager::~BufferManager() { - // TODO Auto-generated destructor stub - instance_=0; - delete logging_; + // TODO Auto-generated destructor stub + instance_ = NULL; + delete logging_; } -BufferManager* BufferManager::getInstance(){ - if(instance_==0){ - instance_= new BufferManager(); - } - return instance_; +BufferManager* BufferManager::getInstance() { + if (NULL == instance_) { + instance_ = new BufferManager(); + } + return instance_; } -unsigned long int BufferManager::getTotalUsed()const{ - return intermediate_buffer_used_+storage_used_; +unsigned long int BufferManager::getTotalUsed() const { + return intermediate_buffer_used_ + storage_used_; } -bool BufferManager::applyStorageDedget(unsigned long size){ - bool ret; - lock_.acquire(); - if(storage_used_+size<=storage_budget_max_){ - storage_used_+=size; - ret=true; - } - logging_->log("%d MB applied, %d MB left!",size/1024/1024,(storage_budget_max_-storage_used_)/1024/1024); - lock_.release(); - return ret; +bool BufferManager::applyStorageDedget(unsigned long size) { + bool ret = false; + actucl_free_memory = sysconf(_SC_AVPHYS_PAGES) * page_size; + lock_.acquire(); + + if (size <= (actucl_free_memory)) { + if (storage_used_ + size <= + (storage_budget_max_ * Config::memory_utilization / 100)) { + storage_used_ += size; + ret = true; + } + } + logging_->log("%d MB applied, %d MB left!", size / 1024 / 1024, + (storage_budget_max_ - storage_used_) / 1024 / 1024); + logging_->log("%d MB actucl left free memory", + actucl_free_memory / 1024 / 1024); + lock_.release(); + return ret; } -void BufferManager::returnStorageBudget(unsigned long size){ - lock_.acquire(); - storage_used_-=size; - lock_.release(); +void BufferManager::returnStorageBudget(unsigned long size) { + lock_.acquire(); + storage_used_ -= size; + lock_.release(); } -unsigned BufferManager::getStorageMemoryBudegeInMilibyte()const{ - return storage_budget_max_/1024/1024; +unsigned BufferManager::getStorageMemoryBudegeInMilibyte() const { + logging_->log("%d MB MAX STORAGE BUDGET memory", + storage_budget_max_ / 1024 / 1024); + return storage_budget_max_ / 1024 / 1024; } diff --git a/Resource/BufferManager.h b/Resource/BufferManager.h index ac63a1bc9..ff5e3f67a 100755 --- a/Resource/BufferManager.h +++ b/Resource/BufferManager.h @@ -7,34 +7,41 @@ #ifndef BUFFERMANAGER_H_ #define BUFFERMANAGER_H_ -#include "../storage/MemoryStore.h" +#include "../storage/MemoryManager.h" #include "../utility/lock.h" #include "../common/Logging.h" +#include "../Config.h" +#include "unistd.h" #ifdef DMALLOC #include "dmalloc.h" #endif class BufferManager { -public: - static BufferManager* getInstance(); - virtual ~BufferManager(); - bool applyStorageDedget(unsigned long size); - unsigned getStorageMemoryBudegeInMilibyte()const; - void returnStorageBudget(unsigned long size); -private: - BufferManager(); - unsigned long int getTotalUsed()const; -private: - unsigned long int totol_capacity_; - unsigned long int storage_budget_max_; - unsigned long int storage_used_; - unsigned long int storage_budget_min_; - unsigned long int intermediate_buffer_budget_max_; - unsigned long int intermediate_buffer_budget_min_; - unsigned long int intermediate_buffer_used_; - MemoryChunkStore* memory_storage_; - static BufferManager* instance_; - Lock lock_; - Logging* logging_; + public: + static BufferManager* getInstance(); + virtual ~BufferManager(); + bool applyStorageDedget(unsigned long size); + unsigned getStorageMemoryBudegeInMilibyte() const; + void returnStorageBudget(unsigned long size); + + private: + BufferManager(); + unsigned long int getTotalUsed() const; + + private: + unsigned long int totol_capacity_; + unsigned long int storage_budget_max_; + unsigned long int storage_used_; + unsigned long int storage_budget_min_; + unsigned long int intermediate_buffer_budget_max_; + unsigned long int intermediate_buffer_budget_min_; + unsigned long int intermediate_buffer_used_; + long long total_memory; + long long page_size; + long long actucl_free_memory; + MemoryChunkStore* memory_storage_; + static BufferManager* instance_; + Lock lock_; + Logging* logging_; }; #endif /* BUFFERMANAGER_H_ */ diff --git a/Resource/Makefile.am b/Resource/Makefile.am index beb1ec3d1..6ed19a2de 100644 --- a/Resource/Makefile.am +++ b/Resource/Makefile.am @@ -1,9 +1,8 @@ AM_CPPFLAGS= -fPIC -fpermissive \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux \ --I${THERON_HOME}/Include \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization @@ -29,4 +28,3 @@ libresouce_a_SOURCES = \ BufferManager.cpp BufferManager.h \ CPUResource.cpp CPUResource.h -libresouce_a_LIBADD = ${THERON_HOME}/Lib/libtherond.a diff --git a/Resource/NodeTracker.cpp b/Resource/NodeTracker.cpp index 7a1e44de9..f47cc3a86 100755 --- a/Resource/NodeTracker.cpp +++ b/Resource/NodeTracker.cpp @@ -6,46 +6,63 @@ */ #include "NodeTracker.h" -NodeTracker* NodeTracker::instance_=0; -NodeTracker::NodeTracker():allocate_cur_(0) { -} -NodeTracker* NodeTracker::GetInstance(){ - if(instance_==0){ - instance_=new NodeTracker(); - } - return instance_; +#include +#include + +#include "../Environment.h" +NodeTracker* NodeTracker::instance_ = 0; +NodeTracker::NodeTracker() : allocate_cur_(0) {} +NodeTracker* NodeTracker::GetInstance() { + if (instance_ == 0) { + instance_ = new NodeTracker(); + } + return instance_; } NodeTracker::~NodeTracker() { - // TODO Auto-generated destructor stub + // TODO Auto-generated destructor stub } -int NodeTracker::RegisterNode(NodeAddress new_node_address){ - if(address_to_id_.find(new_node_address)!=address_to_id_.end()){ - /*node_name already exists.*/ - return -1; - } - const int allocated_id=allocate_cur_++; - address_to_id_[new_node_address]=allocated_id; - return allocated_id; +int NodeTracker::RegisterNode(NodeAddress new_node_address) { + assert(false); + if (address_to_id_.find(new_node_address) != address_to_id_.end()) { + /*node_name already exists.*/ + return -1; + } + const int allocated_id = allocate_cur_++; + address_to_id_[new_node_address] = allocated_id; + return allocated_id; } -std::string NodeTracker::GetNodeIP(const NodeID& target)const{ - boost::unordered_map::const_iterator it=address_to_id_.cbegin(); - while(it!=address_to_id_.cend()){ - if(it->second==target) - return it->first.ip; - it++; - } - return NULL;//TODO avoid return NULL in case of no matching target by changing the return type to be boolean.*/ -// return NULL; +std::string NodeTracker::GetNodeIP(const NodeID& target) const { +#ifdef THERON + boost::unordered_map::const_iterator it = + address_to_id_.cbegin(); + while (it != address_to_id_.cend()) { + if (it->second == target) return it->first.ip; + it++; + } + return NULL; // TODO avoid return NULL in case of no matching target by + // changing the return type to be boolean.*/ + // return NULL; +#else + return Environment::getInstance() + ->get_master_node() + ->GetNodeAddrFromId(target) + .first; +#endif } -std::vector NodeTracker::GetNodeIDList()const{ - std::vector ret; - boost::unordered_map::const_iterator it=address_to_id_.cbegin(); - while(it!=address_to_id_.cend()){ - ret.push_back(it->second); - it++; - } - return ret; +std::vector NodeTracker::GetNodeIDList() const { + std::vector ret; +#ifdef THERON + boost::unordered_map::const_iterator it = + address_to_id_.cbegin(); + while (it != address_to_id_.cend()) { + ret.push_back(it->second); + it++; + } +#else + ret = Environment::getInstance()->get_slave_node()->GetAllNodeID(); +#endif + return ret; } diff --git a/Resource/ResourceManagerMaster.cpp b/Resource/ResourceManagerMaster.cpp index 96d0663c5..5cbb41f19 100755 --- a/Resource/ResourceManagerMaster.cpp +++ b/Resource/ResourceManagerMaster.cpp @@ -6,139 +6,97 @@ */ #include "ResourceManagerMaster.h" -#include "../Environment.h" -ResourceManagerMaster::ResourceManagerMaster() { - node_tracker_=NodeTracker::GetInstance(); - logging_=new ResourceManagerMasterLogging(); - endpoint_=Environment::getInstance()->getEndPoint(); - framework=new Theron::Framework(*endpoint_); - acter_=new ResourceManagerMasterActor(framework,this); -} +#include -ResourceManagerMaster::~ResourceManagerMaster() { - acter_->~ResourceManagerMasterActor(); - framework->~Framework(); - node_tracker_->~NodeTracker(); +#include "../Environment.h" +ResourceManagerMaster::ResourceManagerMaster() { + node_tracker_ = NodeTracker::GetInstance(); } -NodeID ResourceManagerMaster::RegisterNewSlave(NodeAddress new_slave_address){ - NodeID new_node_id=node_tracker_->RegisterNode(new_slave_address); - if(new_node_id==-1){ - /* Node with the given ip has already existed.*/ - logging_->elog("[%s:%s] has already exists",new_slave_address.ip.c_str(),new_slave_address.port.c_str()); - return false; - } - -// -// if(node_to_resourceinfo_.find(new_node_id)!=node_to_resourceinfo_.end()){ -// /*The slaveId has already existed.*/ -// return false; -// } - node_to_resourceinfo_[new_node_id]=new InstanceResourceInfo(); - - logging_->log("[ip=%s:%s, id=%d] is successfully registered.",new_slave_address.ip.c_str(),new_slave_address.port.c_str(),new_node_id); -// hashmap::iterator it=node_to_resourceinfo_.begin(); -// while(it!=node_to_resourceinfo_.end()){ -// printf("%d--> ",it->first); -// it++; -// } -// printf("\n"); +ResourceManagerMaster::~ResourceManagerMaster() {} - return new_node_id; +void ResourceManagerMaster::RegisterNewSlave(const NodeID new_node_id) { + node_to_resourceinfo_[new_node_id] = new InstanceResourceInfo(); } -std::vector ResourceManagerMaster::getSlaveIDList(){ - std::vector ret; - boost::unordered_map::iterator it=node_to_resourceinfo_.begin(); - while(it!=node_to_resourceinfo_.end()){ - ret.push_back(it->first); - it++; - } - return ret; -} -bool ResourceManagerMaster::ApplyDiskBuget(NodeID target, unsigned size_in_mb){ - if(node_to_resourceinfo_.find(target)==node_to_resourceinfo_.cend()){ - /* target slave does not exist.*/ - return false; - } - if(node_to_resourceinfo_[target]->disk.take(size_in_mb)) - return true; - return false; +void ResourceManagerMaster::UnRegisterSlave(const NodeID old_node_id){ + auto it = node_to_resourceinfo_.find(old_node_id); + if (it != node_to_resourceinfo_.end()) + { + delete node_to_resourceinfo_[old_node_id]; + node_to_resourceinfo_.erase(old_node_id); + }else{ + LOG(INFO)<<"do not need Register"<disk.put(size_in_mb); - return true; +std::vector ResourceManagerMaster::getSlaveIDList() { + std::vector ret; + boost::unordered_map::iterator it = + node_to_resourceinfo_.begin(); + while (it != node_to_resourceinfo_.end()) { + ret.push_back(it->first); + it++; + } + return ret; } - -bool ResourceManagerMaster::ApplyMemoryBuget(NodeID target, unsigned size_in_mb){ - if(node_to_resourceinfo_.find(target)==node_to_resourceinfo_.cend()){ - - /* target slave does not exist.*/ - return false; - } - if(node_to_resourceinfo_[target]->memory.take(size_in_mb)) - return true; - cout<<"no memory"<disk.take(size_in_mb)) return true; + return false; } -bool ResourceManagerMaster::ReturnMemoryBuget(NodeID target, unsigned size_in_mb) -{ - if(node_to_resourceinfo_.find(target) == node_to_resourceinfo_.cend()) - return false; - node_to_resourceinfo_[target]->memory.put(size_in_mb); - return true; +bool ResourceManagerMaster::ReturnDiskBuget(NodeID target, + unsigned size_in_mb) { + if (node_to_resourceinfo_.find(target) == node_to_resourceinfo_.cend()) + return false; + node_to_resourceinfo_[target]->disk.put(size_in_mb); + return true; } -bool ResourceManagerMaster::RegisterDiskBuget(NodeID report_node_id, unsigned size_in_mb){ - if(node_to_resourceinfo_.find(report_node_id)==node_to_resourceinfo_.end()){ - /* target slave does not exists*/ - return false; - } - node_to_resourceinfo_[report_node_id]->disk.initialize(size_in_mb); - logging_->log("Node(id=%d) reports its disk capacity=%d",report_node_id,size_in_mb); - return true; +bool ResourceManagerMaster::ApplyMemoryBuget(NodeID target, + unsigned size_in_mb) { + if (node_to_resourceinfo_.find(target) == node_to_resourceinfo_.cend()) { + /* target slave does not exist.*/ + return false; + } + if (node_to_resourceinfo_[target]->memory.take(size_in_mb)) return true; + LOG(ERROR) << "node :"<memory.initialize(size_in_mb); - logging_->log("Node(id=%d) reports its memory capacity=%d",report_node_id,size_in_mb); - return true; -} - -ResourceManagerMaster::ResourceManagerMasterActor::ResourceManagerMasterActor(Theron::Framework* framework,ResourceManagerMaster* rmm) -:Theron::Actor(*framework,"ResourceManagerMaster"),rmm_(rmm){ - RegisterHandler(this,&ResourceManagerMasterActor::ReceiveStorageBudgetReport); - RegisterHandler(this,&ResourceManagerMasterActor::ReceiveNewNodeRegister); +bool ResourceManagerMaster::ReturnMemoryBuget(NodeID target, + unsigned size_in_mb) { + if (node_to_resourceinfo_.find(target) == node_to_resourceinfo_.cend()) + return false; + node_to_resourceinfo_[target]->memory.put(size_in_mb); + return true; } -void ResourceManagerMaster::ResourceManagerMasterActor::ReceiveStorageBudgetReport(const StorageBudgetMessage &message,const Theron::Address from){ - if(!rmm_->RegisterDiskBuget(message.nodeid,message.disk_budget)){ - rmm_->logging_->elog("Fail to add the budget information to rmm!"); - } - if(!rmm_->RegisterMemoryBuget(message.nodeid,message.memory_budget)){ - rmm_->logging_->elog("Fail to add the budget information to rmm!"); - } -// rmm_->logging_->log("The storage of Slave[%d] has been registered, the disk=[%d]MB, memory=[%d]MB",message.nodeid,message.disk_budget,message.memory_budget); -// printf("The storage of Slave[%d] has been registered, the disk=[%d]MB, memory=[%d]MB\n",message.nodeid,message.disk_budget,message.memory_budget); -// Send(0,from); +bool ResourceManagerMaster::RegisterDiskBuget(NodeID report_node_id, + unsigned size_in_mb) { + if (node_to_resourceinfo_.find(report_node_id) == + node_to_resourceinfo_.end()) { + LOG(WARNING) << "target slave " << report_node_id << " does not exists!"; + return false; + } + node_to_resourceinfo_[report_node_id]->disk.initialize(size_in_mb); + LOG(INFO) << "Node(id= " << report_node_id + << ") reports its disk capacity=" << size_in_mb; + return true; } -void ResourceManagerMaster::ResourceManagerMasterActor::ReceiveNewNodeRegister(const NodeRegisterMessage &message,const Theron::Address from){ - - NodeAddress node_addr; - node_addr.ip=message.get_ip(); - std::ostringstream str; - str<RegisterNewSlave(node_addr); - rmm_->logging_->log("Received register request from %s:%d, the allocated NodeID=%d",message.get_ip().c_str(),message.port,assigned_node_id); - Send(assigned_node_id,from); +bool ResourceManagerMaster::RegisterMemoryBuget(NodeID report_node_id, + unsigned size_in_mb) { + if (node_to_resourceinfo_.find(report_node_id) == + node_to_resourceinfo_.end()) { + LOG(WARNING) << "target slave " << report_node_id << " does not exists!"; + return false; + } + node_to_resourceinfo_[report_node_id]->memory.initialize(size_in_mb); + LOG(INFO) << "Node(id= " << report_node_id + << ") reports its memory capacity=" << size_in_mb; + return true; } diff --git a/Resource/ResourceManagerMaster.h b/Resource/ResourceManagerMaster.h index ed6a2a44b..994062b0d 100755 --- a/Resource/ResourceManagerMaster.h +++ b/Resource/ResourceManagerMaster.h @@ -9,47 +9,28 @@ #define RESOURCEMANAGERMASTER_H_ #include #include -#include -#ifdef DMALLOC -#include "dmalloc.h" -#endif #include "ResourceInfo.h" #include "NodeTracker.h" #include "../common/Message.h" -#include "../common/Logging.h" class ResourceManagerMaster { -public: - class ResourceManagerMasterActor:public Theron::Actor{ - public: - ResourceManagerMasterActor(Theron::Framework* framework,ResourceManagerMaster* rmm); - ~ResourceManagerMasterActor(){}; - private: - void ReceiveStorageBudgetReport(const StorageBudgetMessage &message,const Theron::Address from); - void ReceiveNewNodeRegister(const NodeRegisterMessage &message,const Theron::Address from); - ResourceManagerMaster* rmm_; - }; - ResourceManagerMaster(); - virtual ~ResourceManagerMaster(); - NodeID RegisterNewSlave(NodeAddress); - /* notify the ResourceManager how large the budget is for a target node*/ - bool RegisterDiskBuget(NodeID report_node_id, unsigned size_in_mb); - bool RegisterMemoryBuget(NodeID report_node_id, unsigned size_in_mb); - std::vector getSlaveIDList(); - bool ApplyDiskBuget(NodeID target, unsigned size_in_mb); - bool ReturnDiskBuget(NodeID target, unsigned size_in_mb); - bool ApplyMemoryBuget(NodeID target, unsigned size_in_mb); - bool ReturnMemoryBuget(NodeID target, unsigned size_in_mb); -private: - boost::unordered_map node_to_resourceinfo_; - NodeTracker *node_tracker_; - Logging* logging_; - Theron::EndPoint *endpoint_; - Theron::Framework *framework; - ResourceManagerMasterActor* acter_; - /*Actor*/ - - + public: + ResourceManagerMaster(); + virtual ~ResourceManagerMaster(); + NodeID RegisterNewSlave(NodeAddress); + void UnRegisterSlave(const NodeID old_node_id); + void RegisterNewSlave(const NodeID new_node_id); + /* notify the ResourceManager how large the budget is for a target node*/ + bool RegisterDiskBuget(NodeID report_node_id, unsigned size_in_mb); + bool RegisterMemoryBuget(NodeID report_node_id, unsigned size_in_mb); + std::vector getSlaveIDList(); + bool ApplyDiskBuget(NodeID target, unsigned size_in_mb); + bool ReturnDiskBuget(NodeID target, unsigned size_in_mb); + bool ApplyMemoryBuget(NodeID target, unsigned size_in_mb); + bool ReturnMemoryBuget(NodeID target, unsigned size_in_mb); + private: + boost::unordered_map node_to_resourceinfo_; + NodeTracker* node_tracker_; }; #endif /* RESOURCEMANAGERMASTER_H_ */ diff --git a/Resource/ResourceManagerSlave.cpp b/Resource/ResourceManagerSlave.cpp index 4092a0087..8f547fa52 100755 --- a/Resource/ResourceManagerSlave.cpp +++ b/Resource/ResourceManagerSlave.cpp @@ -6,46 +6,37 @@ */ #include "ResourceManagerSlave.h" -#include "../Environment.h" -#include "../common/TimeOutReceiver.h" -#define ResourceManagerMasterName "ResourceManagerMaster" -InstanceResourceManager::InstanceResourceManager() { - framework_=new Theron::Framework(*Environment::getInstance()->getEndPoint()); - logging_=new ResourceManagerMasterLogging(); -} -InstanceResourceManager::~InstanceResourceManager() { - delete framework_; - delete logging_; -} -NodeID InstanceResourceManager::Register(){ - NodeID ret=10; - TimeOutReceiver receiver(Environment::getInstance()->getEndPoint()); - Theron::Catcher resultCatcher; - receiver.RegisterHandler(&resultCatcher, &Theron::Catcher::Push); +#include + +#include "../Environment.h" +#include "../node_manager/base_node.h" +#include "caf/io/all.hpp" +#include "caf/all.hpp" +using caf::after; +using caf::io::remote_actor; +using claims::NodeAddr; +using claims::OkAtom; +using claims::StorageBudgetAtom; +InstanceResourceManager::InstanceResourceManager() {} - std::string ip=Environment::getInstance()->getIp(); - unsigned port=Environment::getInstance()->getPort(); - NodeRegisterMessage message(ip,port); +InstanceResourceManager::~InstanceResourceManager() {} - framework_->Send(message,receiver.GetAddress(),Theron::Address("ResourceManagerMaster")); - Theron::Address from; - if(receiver.TimeOutWait(1,1000)==1){ +void InstanceResourceManager::ReportStorageBudget( + StorageBudgetMessage& message) { + caf::scoped_actor self; + auto master_actor = + Environment::getInstance()->get_slave_node()->GetMasterActor(); + self->sync_send(master_actor, StorageBudgetAtom::value, message).await( - resultCatcher.Pop(ret,from); - logging_->log("Successfully registered to the master, the allocated id =%d.",ret); - return ret; - } - else{ - logging_->elog("Failed to get NodeId from the master."); - return -1; - } -} -void InstanceResourceManager::ReportStorageBudget(StorageBudgetMessage& message){ - framework_->Send(message,Theron::Address(),Theron::Address(ResourceManagerMasterName)); + [=](OkAtom) { LOG(INFO) << "reporting storage budget is ok!" << endl; }, + after(std::chrono::seconds(30)) >> + [=]() { + LOG(WARNING) << "reporting storage budget, but timeout 30s !!" + << endl; + }); + LOG(INFO)<<"node :"< -#ifdef DMALLOC -#include "dmalloc.h" -#endif #include "../common/Message.h" -#include "../common/Logging.h" #include "CPUResource.h" typedef int NodeID; +/* + * just for reporting storage budget + */ +class StorageBudgetMessage; class InstanceResourceManager { -public: - InstanceResourceManager(); - virtual ~InstanceResourceManager(); - NodeID Register(); - void ReportStorageBudget(StorageBudgetMessage&); - void setStorageBudget(unsigned long memory, unsigned long disk); -private: - Theron::Framework *framework_; - Logging* logging_; - CPUResourceManager cpu_resource_; + public: + InstanceResourceManager(); + virtual ~InstanceResourceManager(); + void ReportStorageBudget(StorageBudgetMessage&); + void setStorageBudget(unsigned long memory, unsigned long disk); + + private: + CPUResourceManager cpu_resource_; }; #endif /* RESOURCEMANAGERSLAVE_H_ */ diff --git a/Server.cpp b/Server.cpp index 7f9cc8755..aa9f9a79e 100644 --- a/Server.cpp +++ b/Server.cpp @@ -106,10 +106,15 @@ int main(int argc, char** argv) { // create_poc_data_one_partitions(); // print_welcome(); // ExecuteLogicalQueryPlan(); - while (true) sleep(1); + // while (std::cin.get() != 'q') sleep(1); + // } else { + // Environment::getInstance(master); + // while (std::cin.get() != 'q') sleep(1); + // } + while (1) sleep(1); } else { Environment::getInstance(master); - while (true) sleep(1); + while (1) sleep(1); } #else int pid = fork(); diff --git a/Test/Makefile.am b/Test/Makefile.am index 9d538e7d4..25dc5ffd5 100644 --- a/Test/Makefile.am +++ b/Test/Makefile.am @@ -1,10 +1,9 @@ AM_CPPFLAGS= -fPIC -fpermissive\ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${THERON_HOME}/Include \ -I${GTEST_HOME}/include AM_LDFLAGS=-lc -lm -lrt -lboost_serialization -lxs @@ -30,11 +29,10 @@ LDADD = TestSuit/libtestsuit.a \ ../common/types/libtypes.a \ ../sql_parser/parser/libparser_test.a \ ../utility/libutility.a \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a\ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.so \ - ${THERON_HOME}/Lib/libtherond.a \ ${GTEST_HOME}/libgtest.a noinst_LIBRARIES=libtest.a diff --git a/Test/TestSuit/Makefile.am b/Test/TestSuit/Makefile.am index fe76b95df..a1b50e982 100644 --- a/Test/TestSuit/Makefile.am +++ b/Test/TestSuit/Makefile.am @@ -1,10 +1,9 @@ AM_CPPFLAGS= -fPIC -fpermissive\ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${THERON_HOME}/Include \ -I${GTEST_HOME}/include AM_LDFLAGS=-lc -lm -lrt -lboost_serialization -lxs @@ -25,11 +24,10 @@ LDADD = ../libtest.a \ ../../common/types/libtypes.a \ ../../common/Block/libblock.a \ ../../utility/libutility.a \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a\ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.so \ - ${THERON_HOME}/Lib/libtherond.a \ ${GTEST_HOME}/libgtest.a noinst_LIBRARIES=libtestsuit.a diff --git a/Test/TestSuit/hash_table_test.h b/Test/TestSuit/hash_table_test.h index 5ba03e775..26528d7c5 100644 --- a/Test/TestSuit/hash_table_test.h +++ b/Test/TestSuit/hash_table_test.h @@ -13,15 +13,12 @@ #include "../../common/ids.h" #include "../../common/hash.h" #include "../../common/Block/BlockStreamBuffer.h" -<<<<<<< HEAD -#include "../../Catalog/Column.h" -======= -#include "../../catalog/Column.h" +#include "../../catalog/column.h" ->>>>>>> FETCH_HEAD #include "../../physical_operator/physical_operator_base.h" #include "../../storage/PartitionStorage.h" +#include "../../storage/PartitionReaderIterator.h" #include "../../storage/BlockManager.h" #define block_size (1024 * 1024) @@ -209,13 +206,14 @@ static void startup_catalog() { } } } + struct Arg { BlockStreamBuffer* buffer; BasicHashTable** hash_table; Schema* schema; PartitionFunction* hash; PhysicalOperatorBase* iterator; - PartitionStorage::PartitionReaderItetaor* partition_reader; + PartitionReaderIterator* partition_reader; Barrier* barrier; unsigned tid; }; @@ -245,7 +243,7 @@ void* insert_into_hash_table_from_projection(void* argment) { unsigned nbuckets = arg.hash->getNumberOfPartitions(); unsigned long long int start = curtick(); printf("tuple length=%d\n", arg.schema->getTupleMaxSize()); - while (arg.partition_reader->nextBlock(fetched_block)) { + while (arg.partition_reader->NextBlock(fetched_block)) { void* tuple; BlockStreamBase::BlockStreamTraverseIterator* it = fetched_block->createIterator(); @@ -315,9 +313,9 @@ static double projection_scan(unsigned degree_of_parallelism) { // arg.hash_table=hashtable; arg.schema = schema; arg.partition_reader = BlockManager::getInstance() - ->getPartitionHandle(PartitionID( + ->GetPartitionHandle(PartitionID( table->getProjectoin(1)->getProjectionID(), 0)) - ->createAtomicReaderIterator(); + ->CreateAtomicReaderIterator(); arg.barrier = new Barrier(nthreads); pthread_t pid[1000]; unsigned long long int start = curtick(); diff --git a/Test/common/Makefile.am b/Test/common/Makefile.am index 236b04126..1c569bd39 100644 --- a/Test/common/Makefile.am +++ b/Test/common/Makefile.am @@ -1,10 +1,9 @@ AM_CPPFLAGS= -fPIC -fpermissive\ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${THERON_HOME}/Include \ -I${GTEST_HOME}/include AM_LDFLAGS=-lc -lm -lrt -lboost_serialization -lxs @@ -21,11 +20,10 @@ LDADD = ../../catalog/libcatalog.a \ ../../common/libcommon.a \ ../../common/types/libtypes.a \ ../../utility/libutility.a \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a\ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.so \ - ${THERON_HOME}/Lib/libtherond.a \ ${GTEST_HOME}/libgtest.a noinst_LIBRARIES=libcommon.a diff --git a/Test/common/errno_test.h b/Test/common/errno_test.h index 6bd176881..9da72578c 100644 --- a/Test/common/errno_test.h +++ b/Test/common/errno_test.h @@ -31,6 +31,8 @@ #include #include + +#include "../../common/error_define.h" #include "../../utility/Timer.h" #include "../../common/error_no.h" using std::cout; diff --git a/Test/gtest_main.cpp b/Test/gtest_main.cpp index 01fdc8e3b..7de2856f4 100644 --- a/Test/gtest_main.cpp +++ b/Test/gtest_main.cpp @@ -24,20 +24,20 @@ #include #include #include -#define GLOG_NO_ABBREVIATED_SEVERITIES -//#include "../codegen/codegen_test.h" -#include "../common/log/logging.h" +// #include "../codegen/codegen_test.h" +#include "./common/errno_test.h" +#include "../common/test/operate_test.h" #include "../common/types/Test/data_type_test.h" #include "../loader/test/data_injector_test.h" //#include "../common/file_handle/test/disk_file_handle_imp_test.h" //#include "../common/file_handle/test/hdfs_file_handle_imp_test.h" +//#include "../loader/test/single_thread_single_file_connector_test.h" //#include "../loader/test/single_file_connector_test.h" -//#include "../loader/test/single_file_connector_test.cpp" +#include "./iterator/elastic_iterator_model_test.h" //#include "../loader/test/table_file_connector_test.h" -//#include "../loader/test/table_file_connector_test.cpp" -#include "./common/errno_test.h" -#include "../common/test/operate_test.h" -#include "iterator/elastic_iterator_model_test.h" + +#define GLOG_NO_ABBREVIATED_SEVERITIES +#include "../common/log/logging.h" static int consumed_args = 0; diff --git a/Test/iterator/elastic_iterator_model_test.h b/Test/iterator/elastic_iterator_model_test.h index b24d34c26..1eb67b5a3 100644 --- a/Test/iterator/elastic_iterator_model_test.h +++ b/Test/iterator/elastic_iterator_model_test.h @@ -43,7 +43,50 @@ class ElasticIteratorModelTest : public ::testing::Test { std::string ElasticIteratorModelTest::ip_; int ElasticIteratorModelTest::port_; - +/* +TEST_F(ElasticIteratorModelTest, LoadFromHdfs){ + EXPECT_TRUE(client_.connected()); + ResultSet rs; + std::string command ; + command = "load table PART from "; + command += '"'; + command +="HDFS:/test/claims/part.tbl"; + command +='"'; + command +=" with '|','\\n';"; + std::string message; + client_.submit(command, message, rs); + message = message.substr(0,22); + EXPECT_STREQ("load data successfully", message.c_str()); +}*/ + +TEST_F(ElasticIteratorModelTest, LoadFromHdfs_part){ + EXPECT_TRUE(client_.connected()); + ResultSet rs; + std::string command; + command = "load table PART from "; + command += '"'; + command +="HDFS:/test/claims/part.tbl"; + command +='"'; + command +=" with '|','\\n';"; + std::string message; + client_.submit(command, message, rs); + message = message.substr(0,22); + EXPECT_STREQ("load data successfully", message.c_str()); +} +TEST_F(ElasticIteratorModelTest, LoadFromHdfs_customer){ + EXPECT_TRUE(client_.connected()); + ResultSet rs; + std::string command; + command = "load table CUSTOMER from "; + command += '"'; + command +="HDFS:/test/claims/customer.tbl"; + command +='"'; + command +=" with '|','\\n';"; + std::string message; + client_.submit(command, message, rs); + message = message.substr(0,22); + EXPECT_STREQ("load data successfully", message.c_str()); +} TEST_F(ElasticIteratorModelTest, Scan) { EXPECT_TRUE(client_.connected()); ResultSet rs; @@ -98,8 +141,8 @@ TEST_F(ElasticIteratorModelTest, ScalaAggregation) { BlockStreamBase::BlockStreamTraverseIterator *b_it = it.nextBlock()->createIterator(); EXPECT_EQ(6001215, *(long *)b_it->currentTuple()); - //NValue v; - //v.createDecimalFromString("153078795.0000"); + // NValue v; + // v.createDecimalFromString("153078795.0000"); Decimal v(65, 30, "153078795.0000"); EXPECT_TRUE( v.op_equals(*(Decimal *)((char *)b_it->currentTuple() + sizeof(long)))); @@ -124,7 +167,7 @@ TEST_F(ElasticIteratorModelTest, AggregationSmallGroups) { message, rs); EXPECT_EQ(3, rs.getNumberOftuples()); } -TEST_F(ElasticIteratorModelTest, Join) { +TEST_F(ElasticIteratorModelTest, EqualJoin) { ResultSet rs; std::string message; client_.submit( @@ -201,7 +244,7 @@ TEST_F(ElasticIteratorModelTest, FilteredJoin) { ResultSet rs; std::string message; client_.submit( - "select count(*) from PART,LINEITEM where PART.row_id%10=1 and " + "select count(*) from PART,LINEITEM where PART.row_id%10=1 and " "LINEITEM.row_id % 10 =1 and PART.row_id = LINEITEM.row_id;", message, rs); DynamicBlockBuffer::Iterator it = rs.createIterator(); @@ -211,6 +254,20 @@ TEST_F(ElasticIteratorModelTest, FilteredJoin) { delete b_it; } +TEST_F(ElasticIteratorModelTest, OuterJoin) { + ResultSet rs; + std::string message; + client_.submit( + "SELECT COUNT(*) FROM CUSTOMER LEFT OUTER JOIN ORDERS ON " + "C_CUSTKEY = O_CUSTKEY AND O_COMMENT NOT LIKE '%unusual%deposits%';", + message, rs); + DynamicBlockBuffer::Iterator it = rs.createIterator(); + BlockStreamBase::BlockStreamTraverseIterator *b_it = + it.nextBlock()->createIterator(); + EXPECT_EQ(1533872, *(long *)b_it->nextTuple()); + delete b_it; +} + // delete data test. TEST_F(ElasticIteratorModelTest, createTable) { string createtablesql = @@ -327,6 +384,28 @@ TEST_F(ElasticIteratorModelTest, droptestdata) { cout << message << endl; } +// TEST_F(ElasticIteratorModelTest, CreateTempTableForTableFileConnectorTest) { +// string table_name = "sfdfsf"; +// string create_table_stmt = +// "create table " + table_name + " (a int , b varchar(12));"; +// string create_prj_stmt1 = "create projection on " + table_name + +// " (a , b ) number = 2 partitioned on a ;"; +// string create_prj_stmt2 = "create projection on " + table_name + +// " (a ) number = 3 partitioned on a ;"; +// +// ResultSet rs; +// string message = ""; +// client_.submit(create_table_stmt.c_str(), message, rs); +// EXPECT_EQ("create table successfully\n", message); +// cout << message << endl; +// client_.submit(create_prj_stmt1.c_str(), message, rs); +// EXPECT_EQ("create projection successfully\n", message); +// cout << message << endl; +// client_.submit(create_prj_stmt2.c_str(), message, rs); +// EXPECT_EQ("create projection successfully\n", message); +// cout << message << endl; +//} + // add by cswang 19 Oct, 2015 #endif /* ELASTIC_ITERATOR_MODEL_TEST_H_ */ diff --git a/Test/test_adaptiveendpoint.cpp b/Test/test_adaptiveendpoint.cpp deleted file mode 100755 index 7412122c2..000000000 --- a/Test/test_adaptiveendpoint.cpp +++ /dev/null @@ -1,37 +0,0 @@ -/* - * test_adaptiveendpoint.cpp - * - * Created on: Aug 8, 2013 - * Author: wangli - */ -#include -#include "../Executor/Coordinator.h" -#include "../Executor/AdaptiveEndPoint.h" -int main234112234(int argc, char** argv){ - printf("0 for coordinate, 1 for AdaptiveEndPoint!\n"); - char input; - scanf("%c",&input); - if(input=='0'){ - Coordinator* C=new Coordinator(); - //AdaptiveEndPoint* AEP=new AdaptiveEndPoint("AdaptiveEndPoint","10.11.1.224","20202"); - - while(true){ - sleep(1); - } - - C->~Coordinator(); - } - else{ - printf("Argc=%d\n",argc); - if(argc!=3){ - printf("Please give the adaptive endpoint ip and port!\n"); - return 1; - } - std::ostringstream name; - name<<"AdaptiveEndPoint://"<添加编译选项 AM_LDFLAGS=-lc -lm -lrt -lboost_serialization -lboost_date_time -lboost_system -lxs @@ -23,8 +22,7 @@ LDADD = ../storage/libstorage.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.so \ ${BOOST_HOME}/stage/lib/libboost_system.a \ - ${BOOST_HOME}/stage/lib/libboost_system.so \ - ${THERON_HOME}/Lib/libtherond.a + ${BOOST_HOME}/stage/lib/libboost_system.so #DIR = ${shell /bin/pwd} #INCLUDES = -I${DIR}/ThirdParty @@ -45,4 +43,3 @@ libcatalog_a_SOURCES = \ # if it is need to include stat folder and Test folder, add stat and Test at next two lines SUBDIRS = stat DIST_SUBDIRS = stat - diff --git a/catalog/Test/Makefile.am b/catalog/Test/Makefile.am index 42fcda5b3..6909a729e 100644 --- a/catalog/Test/Makefile.am +++ b/catalog/Test/Makefile.am @@ -1,11 +1,9 @@ - AM_CPPFLAGS= -fPIC -fpermissive \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ --I${JAVA_HOME}/include/linux\ --I${THERON_HOME}/Include +-I${JAVA_HOME}/include/linux AM_LDFLAGS=-lc -lm -lrt -lboost_serialization -lboost_date_time -lboost_system -lxs @@ -22,8 +20,7 @@ LDADD = ../../logical_operator/liblogicalqueryplan.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.so \ ${BOOST_HOME}/stage/lib/libboost_system.a \ - ${BOOST_HOME}/stage/lib/libboost_system.so \ - ${THERON_HOME}/Lib/libtherond.a + ${BOOST_HOME}/stage/lib/libboost_system.so noinst_LIBRARIES=libtest.a libtest_a_SOURCES = diff --git a/catalog/Test/Partitioner_test.cpp b/catalog/Test/Partitioner_test.cpp deleted file mode 100755 index d4afab818..000000000 --- a/catalog/Test/Partitioner_test.cpp +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Partitioner_test.cpp - * - * Created on: Oct 30, 2013 - * Author: wangli - */ -#include - -#include "../../catalog/catalog.h" -#include "../../catalog/ProjectionBinding.h" -#include "../../catalog/table.h" -#include "../../common/hash.h" -#include "../../Environment.h" -using namespace std; - -int TableDescriptor_test() { - Catalog* catalog = Catalog::getInstance(); - - TableDescriptor* table = - new TableDescriptor("Student", catalog->allocate_unique_table_id()); - table->addAttribute("Name", data_type(t_string), 10); - table->addAttribute("Age", data_type(t_int)); - table->addAttribute("Gender", data_type(t_int)); - table->addAttribute("Score", data_type(t_float)); - - vector index; - index.push_back(0); - index.push_back(1); - index.push_back(3); - const int partition_key_index = 3; - table->createHashPartitionedProjection(index, partition_key_index, 64); - - catalog->add_table(table); -} - -int BindingTest() { - Environment::getInstance(true); - ResourceManagerMaster* rmms = - Environment::getInstance()->getResourceManagerMaster(); - // rmms->RegisterNewSlave("192.168.1.1"); - // rmms->RegisterNewSlave("192.168.1.2"); - // rmms->RegisterNewSlave("192.168.1.3"); - // rmms->RegisterNewSlave("192.168.1.4"); - // rmms->RegisterNewSlave("192.168.1.5"); - // rmms->RegisterDiskBuget(0,0); - // rmms->RegisterDiskBuget(1,1000); - // rmms->RegisterDiskBuget(2,0); - // rmms->RegisterDiskBuget(3,10000); - // rmms->RegisterDiskBuget(4,0); - - /////////////////////////////////////// - /* the following codes should be triggered by DDL module*/ - TableDescriptor* table = new TableDescriptor( - "Student", - Environment::getInstance()->getCatalog()->allocate_unique_table_id()); - table->addAttribute("Name", data_type(t_string), 10); - table->addAttribute("Age", data_type(t_int)); - table->addAttribute("Gender", data_type(t_int)); - table->addAttribute("Score", data_type(t_float)); - - vector index; - index.push_back(0); - index.push_back(1); - index.push_back(3); - const int partition_key_index = 3; - table->createHashPartitionedProjection(index, partition_key_index, 4); - Catalog* catalog = Environment::getInstance()->getCatalog(); - catalog->add_table(table); - /////////////////////////////////////// - - //////////////////////////////////////// - /* the following codes should be triggered by Load module*/ - ; - for (unsigned i = 0; - i < table->getProjectoin(0)->getPartitioner()->getNumberOfPartitions(); - i++) { - catalog->getTable(0)->getProjectoin(0)->getPartitioner()->RegisterPartition( - i, 12); - } - //////////////////////////////////////// - - ProjectionBinding* pb = new ProjectionBinding(); - pb->BindingEntireProjection( - catalog->getTable(0)->getProjectoin(0)->getPartitioner()); - // pb->BindingEntireProjection(catalog->getTable(0)->getProjectoin(0)->getPartitoiner()); -} - -int Theron_test() { - Theron::EndPoint* endpoint = - new Theron::EndPoint("endpoint", "tcp://127.0.0.1:24444"); - Theron::Framework* framework = new Theron::Framework(*endpoint); -} - -int main2342343333() { - BindingTest(); - - cout << "Waiting~" << endl; - while (true) { - sleep(1); - } - // return TableDescriptor_test(); -} diff --git a/catalog/catalog.cpp b/catalog/catalog.cpp index 5882add20..a01eccc8c 100644 --- a/catalog/catalog.cpp +++ b/catalog/catalog.cpp @@ -39,10 +39,12 @@ #include "../common/error_define.h" #include "../common/file_handle/file_handle_imp.h" #include "../common/file_handle/file_handle_imp_factory.h" +#include "../common/file_handle/hdfs_connector.h" +#include "../common/memory_handle.h" #include "../common/rename.h" #include "../Config.h" -#include "../loader/file_connector.h" #include "../loader/single_file_connector.h" +using claims::common::FileHandleImpFactory; using std::vector; using std::string; using std::endl; @@ -51,7 +53,7 @@ using claims::common::rCatalogNotFound; using claims::common::rDataPathError; using claims::common::FileOpenFlag; using claims::common::FilePlatform; -using claims::loader::FileConnector; +using claims::common::HdfsConnector; using claims::loader::SingleFileConnector; namespace claims { @@ -62,6 +64,12 @@ Catalog* Catalog::instance_ = NULL; Catalog::Catalog() { logging = new CatalogLogging(); binding_ = new ProjectionBinding(); + write_connector_ = new SingleFileConnector( + Config::local_disk_mode ? FilePlatform::kDisk : FilePlatform::kHdfs, + Config::catalog_file, FileOpenFlag::kCreateFile); + read_connector_ = new SingleFileConnector( + Config::local_disk_mode ? FilePlatform::kDisk : FilePlatform::kHdfs, + Config::catalog_file, FileOpenFlag::kReadFile); } Catalog::~Catalog() { @@ -168,26 +176,33 @@ void Catalog::outPut() { // 2014-3-20---save as a file---by Yu RetCode Catalog::saveCatalog() { + // LockGuard guard(write_lock_); std::ostringstream oss; boost::archive::text_oarchive oa(oss); oa << *this; + assert(0 != oss.str().length() && "catalog has nothing!!"); int ret = rSuccess; - FileConnector* connector = new SingleFileConnector( - Config::local_disk_mode ? FilePlatform::kDisk : FilePlatform::kHdfs, - Config::catalog_file); - - EXEC_AND_ONLY_LOG_ERROR(ret, connector->Open(FileOpenFlag::kCreateFile), - "catalog file name:" << Config::catalog_file); EXEC_AND_ONLY_LOG_ERROR( - ret, connector->Flush(static_cast(oss.str().c_str()), - oss.str().length()), - "catalog file name:" << Config::catalog_file); + ret, write_connector_->Open(), + "failed to open catalog file: " << Config::catalog_file + << " with Overwrite mode"); + assert(ret == rSuccess && "failed to open catalog "); + // EXEC_AND_ONLY_LOG_ERROR(ret, write_connector_->Delete(), + // "failed to delete catalog file"); + // FileHandleImp* write_handler = + EXEC_AND_LOG_RETURN( + ret, write_connector_->AtomicFlush( + static_cast(oss.str().c_str()), oss.str().length()), + "write catalog " << oss.str().length() << " chars", + "failed to flush into catalog file: " << Config::catalog_file); - EXEC_AND_ONLY_LOG_ERROR(ret, connector->Close(), - "catalog file name:" << Config::catalog_file); - return rSuccess; + assert(ret == rSuccess && "failed to write catalog "); + EXEC_AND_ONLY_LOG_ERROR( + ret, write_connector_->Close(), + "failed to close catalog file: " << Config::catalog_file); + return ret; } bool Catalog::IsDataFileExist() { @@ -205,17 +220,17 @@ bool Catalog::IsDataFileExist() { if ('T' == file_ptr->d_name[0]) { LOG(INFO) << "The data disk file started with 'T': " << file_ptr->d_name[0] << " is existed" << endl; + closedir(dir); return true; } } LOG(INFO) << "There are no data file in disk" << endl; + closedir(dir); return false; } else { - hdfsFS hdfsfs = - hdfsConnect(Config::hdfs_master_ip.c_str(), Config::hdfs_master_port); int file_num; - hdfsFileInfo* file_list = - hdfsListDirectory(hdfsfs, Config::data_dir.c_str(), &file_num); + hdfsFileInfo* file_list = hdfsListDirectory( + HdfsConnector::Instance(), Config::data_dir.c_str(), &file_num); for (int cur = 0; cur < file_num; ++cur) { LOG(INFO) << " " << file_list[cur].mName << "----"; string full_file_name(file_list[cur].mName); @@ -238,16 +253,14 @@ bool Catalog::IsDataFileExist() { RetCode Catalog::restoreCatalog() { int ret = rSuccess; string catalog_file = Config::catalog_file; - SingleFileConnector* connector = new SingleFileConnector( - Config::local_disk_mode ? FilePlatform::kDisk : FilePlatform::kHdfs, - catalog_file); + // LockGuard guard(write_lock_); // check whether there is catalog file if there are data file - if (!connector->CanAccess() && IsDataFileExist()) { + if (!read_connector_->CanAccess() && IsDataFileExist()) { LOG(ERROR) << "The data file are existed while catalog file " << catalog_file << " is not existed!" << endl; return rCatalogNotFound; - } else if (!connector->CanAccess()) { + } else if (!read_connector_->CanAccess()) { LOG(INFO) << "The catalog file and data file all are not existed" << endl; return rSuccess; } else if (!IsDataFileExist()) { @@ -255,19 +268,24 @@ RetCode Catalog::restoreCatalog() { "The catalog file will be overwrite" << endl; return rSuccess; } else { + EXEC_AND_ONLY_LOG_ERROR(ret, read_connector_->Open(), + "failed to open catalog file: " + << Config::catalog_file << " with Read mode"); + assert(ret == rSuccess && "failed to open catalog "); uint64_t file_length = 0; - void* buffer; - EXEC_AND_RETURN_ERROR(ret, connector->Open(FileOpenFlag::kReadFile), - "catalog file name: " << catalog_file); - EXEC_AND_RETURN_ERROR(ret, connector->LoadTotalFile(buffer, &file_length), + void* buffer = NULL; + EXEC_AND_RETURN_ERROR(ret, + read_connector_->LoadTotalFile(buffer, &file_length), "catalog file name: " << catalog_file); + assert(0 != file_length && "catalog'length must not be 0 !"); LOG(INFO) << "Start to deserialize catalog ..." << endl; string temp(static_cast(buffer), file_length); + DELETE_PTR(buffer); // TEST std::istringstream iss(temp); boost::archive::text_iarchive ia(iss); ia >> *this; - return rSuccess; + return ret; } } @@ -296,7 +314,6 @@ bool Catalog::DropTable(const std::string table_name, const TableID id) { } if (isnamedrop && istableIDdrop) { - // table_id_allocator.decrease_table_id(); isdropped = true; } else { if (!isnamedrop) { @@ -337,6 +354,24 @@ void Catalog::GetAllTables(ostringstream& ostr) const { } } } - +vector Catalog::GetAllTablesID()const +{ + vector table_id_list; + for (int id = 0; id < getTableCount(); ++id){ + auto it_tableid_to_table = tableid_to_table.find(id); + if (tableid_to_table.end() != it_tableid_to_table) { + string tbname = it_tableid_to_table->second->getTableName(); + int len = tbname.length(); + if (len >= 4 && tbname.substr(len - 4, len) == "_DEL" && + name_to_table.find(tbname.substr(0, len - 4)) != + name_to_table.cend()) { + // hide the deleted data table created by claims + }else{ + table_id_list.push_back(it_tableid_to_table->first); + } + } + } + return table_id_list; +} } /* namespace catalog */ } /* namespace claims */ diff --git a/catalog/catalog.h b/catalog/catalog.h index aa85a319d..f2db2974c 100644 --- a/catalog/catalog.h +++ b/catalog/catalog.h @@ -39,8 +39,16 @@ #include "../common/error_define.h" #include "../common/Logging.h" #include "../utility/lock.h" + namespace claims { + +namespace loader { +class SingleFileConnector; +}; + namespace catalog { +using loader::SingleFileConnector; +class SingleFileConnector; struct TableIDAllocator { TableIDAllocator() { table_id_curosr = 0; } @@ -76,6 +84,7 @@ class Catalog { TableDescriptor* getTable(const TableID&) const; TableDescriptor* getTable(const std::string& table_name) const; void GetAllTables(ostringstream& ostr) const; + vector GetAllTablesID()const; ProjectionDescriptor* getProjection(const ProjectionID&) const; ProjectionBinding* getBindingModele() const; @@ -110,8 +119,10 @@ class Catalog { Logging* logging; ProjectionBinding* binding_; static Catalog* instance_; + SingleFileConnector* write_connector_ = NULL; + SingleFileConnector* read_connector_ = NULL; + // Lock write_lock_; - // 2014-3-20---add serialize function---by Yu friend class boost::serialization::access; template void serialize(Archive& ar, const unsigned int version) { diff --git a/catalog/partitioner.h b/catalog/partitioner.h index 0ba8ed9f8..4f14cb962 100644 --- a/catalog/partitioner.h +++ b/catalog/partitioner.h @@ -279,7 +279,7 @@ class Partitioner { bool allPartitionBound() const; vector getPartitionIDList(); - + vector getPartitionList(){ return partition_info_list; } private: Attribute *partition_key_; PartitionFunction *partition_function_; diff --git a/catalog/projection_binding.cpp b/catalog/projection_binding.cpp index cc86f400b..a037c9b4a 100644 --- a/catalog/projection_binding.cpp +++ b/catalog/projection_binding.cpp @@ -31,6 +31,7 @@ #include "../catalog/projection_binding.h" +#include #include #include "../Environment.h" #include "../utility/maths.h" @@ -48,6 +49,13 @@ ProjectionBinding::~ProjectionBinding() { bool ProjectionBinding::BindingEntireProjection( Partitioner* part, const StorageLevel& desriable_storage_level) { + lock_.acquire(); + if (part->allPartitionBound()) { + LOG(WARNING) << "occurr to competition that sending binding message" + << endl; + lock_.release(); + return true; + } if (part->get_binding_mode_() == OneToOne) { std::vector > partition_id_to_nodeid_list; ResourceManagerMaster* rmm = @@ -124,19 +132,21 @@ bool ProjectionBinding::BindingEntireProjection( // // BlockManagerMaster::getInstance()->SendBindingMessage(partition_id,number_of_chunks,MEMORY,target); } - /* conduct the binding according to the bingding information list*/ + + /* conduct the binding according to the binding information list*/ for (unsigned i = 0; i < partition_id_to_nodeid_list.size(); i++) { const unsigned partition_off = partition_id_to_nodeid_list[i].first; const NodeID node_id = partition_id_to_nodeid_list[i].second; - /* update the information in Catalog*/ - part->bindPartitionToNode(partition_off, node_id); - /* notify the StorageManger of the target node*/ PartitionID partition_id(part->getProejctionID(), partition_off); const unsigned number_of_chunks = part->getPartitionChunks(partition_off); BlockManagerMaster::getInstance()->SendBindingMessage( partition_id, number_of_chunks, desriable_storage_level, node_id); + /* update the information in Catalog*/ + part->bindPartitionToNode(partition_off, node_id); } + + lock_.release(); return true; } diff --git a/catalog/projection_binding.h b/catalog/projection_binding.h index ffab4fffc..291cd4653 100644 --- a/catalog/projection_binding.h +++ b/catalog/projection_binding.h @@ -33,6 +33,7 @@ #include "../catalog/partitioner.h" #include "../storage/StorageLevel.h" +#include "../utility/lock.h" namespace claims { namespace catalog { @@ -45,6 +46,9 @@ class ProjectionBinding { bool BindingEntireProjection( Partitioner*, const StorageLevel& desriable_storage_level = MEMORY); bool UnbindingEntireProjection(Partitioner* part); + + private: + Lock lock_; }; } /* namespace catalog */ diff --git a/catalog/stat/Analyzer.cpp b/catalog/stat/Analyzer.cpp index 10a1e437c..658e1fcdd 100755 --- a/catalog/stat/Analyzer.cpp +++ b/catalog/stat/Analyzer.cpp @@ -8,6 +8,7 @@ #include "../../catalog/stat/Analyzer.h" #include #include +#include #include #include "../../catalog/attribute.h" @@ -22,8 +23,9 @@ #include "../../logical_operator/logical_query_plan_root.h" #include "../../logical_operator/logical_scan.h" #include "../../physical_operator/physical_aggregation.h" - +#include "../../exec_tracker/segment_exec_status.h" using namespace claims::catalog; +using claims::SegmentExecStatus; using namespace claims::logical_operator; using claims::physical_operator::PhysicalAggregation; using std::map; @@ -81,10 +83,10 @@ void Analyzer::analyse(const AttributeID& attrID) { collector_node_id, aggregation, LogicalQueryPlanRoot::kResultCollector); PhysicalOperatorBase* collector = root->GetPhysicalPlan(1024 * 64 - sizeof(unsigned)); - - collector->Open(); - collector->Next(0); - collector->Close(); + SegmentExecStatus* exec_status = new SegmentExecStatus(make_pair(0, 0)); + collector->Open(exec_status); + collector->Next(exec_status, 0); + collector->Close(exec_status); ResultSet* resultset = collector->GetResultSet(); ResultSet::Iterator it = resultset->createIterator(); @@ -267,9 +269,11 @@ void Analyzer::compute_table_stat(const TableID& tab_id) { PhysicalOperatorBase* collector = root->GetPhysicalPlan(1024 * 64 - sizeof(unsigned)); - collector->Open(); - collector->Next(0); - collector->Close(); + SegmentExecStatus* exec_status = new SegmentExecStatus(make_pair(0, 0)); + + collector->Open(exec_status); + collector->Next(exec_status, 0); + collector->Close(exec_status); ResultSet* resultset = collector->GetResultSet(); ResultSet::Iterator it = resultset->createIterator(); @@ -398,9 +402,11 @@ unsigned long Analyzer::getDistinctCardinality(const AttributeID& attr_id) { PhysicalOperatorBase* collector = root->GetPhysicalPlan(1024 * 64 - sizeof(unsigned)); - collector->Open(); - collector->Next(0); - collector->Close(); + SegmentExecStatus* exec_status = new SegmentExecStatus(make_pair(0, 0)); + + collector->Open(exec_status); + collector->Next(exec_status, 0); + collector->Close(exec_status); ResultSet* resultset = collector->GetResultSet(); ResultSet::Iterator it = resultset->createIterator(); BlockStreamBase::BlockStreamTraverseIterator* b_it = @@ -464,10 +470,11 @@ Histogram* Analyzer::computeHistogram(const AttributeID& attr_id, PhysicalOperatorBase* collector = root->GetPhysicalPlan(1024 * 64 - sizeof(unsigned)); + SegmentExecStatus* exec_status = new SegmentExecStatus(make_pair(0, 0)); - collector->Open(); - collector->Next(0); - collector->Close(); + collector->Open(exec_status); + collector->Next(exec_status, 0); + collector->Close(exec_status); ResultSet* resultset = collector->GetResultSet(); ResultSet::Iterator it = resultset->createIterator(); diff --git a/catalog/table.cpp b/catalog/table.cpp index fff0c62db..63206e9ea 100755 --- a/catalog/table.cpp +++ b/catalog/table.cpp @@ -29,17 +29,29 @@ #include #include + +#include "../common/file_handle/file_handle_imp_factory.h" #include "../common/Schema/SchemaFix.h" +#include "../loader/table_file_connector.h" +using claims::common::FilePlatform; using claims::utility::LockGuard; namespace claims { namespace catalog { + +// only for deserialization +TableDescriptor::TableDescriptor() {} + TableDescriptor::TableDescriptor(const string& name, const TableID table_id) - : tableName(name), table_id_(table_id), row_number_(0) {} + : tableName(name), table_id_(table_id), row_number_(0) { + write_connector_ = new TableFileConnector( + Config::local_disk_mode ? FilePlatform::kDisk : FilePlatform::kHdfs, this, + common::kAppendFile); +} TableDescriptor::~TableDescriptor() {} void TableDescriptor::addAttribute(Attribute attr) { - LockGuard guard(lock_); + LockGuard guard(update_lock_); attributes.push_back(attr); } @@ -47,7 +59,7 @@ void TableDescriptor::addAttribute(Attribute attr) { bool TableDescriptor::addAttribute(string attname, data_type dt, unsigned max_length, bool unique, bool can_be_null) { - LockGuard guard(lock_); + LockGuard guard(update_lock_); attname = tableName + '.' + attname; /*check for attribute rename*/ @@ -60,58 +72,24 @@ bool TableDescriptor::addAttribute(string attname, data_type dt, return true; } -void TableDescriptor::addColumn(ProjectionDescriptor* column) {} - -bool TableDescriptor::createHashPartitionedProjection( - vector column_list, ColumnOffset partition_key_index, - unsigned number_of_partitions) { - LockGuard guard(lock_); - ProjectionID projection_id(table_id_, projection_list_.size()); - ProjectionDescriptor* projection = new ProjectionDescriptor(projection_id); - - // projection->projection_offset_=projection_list_.size(); - for (unsigned i = 0; i < column_list.size(); i++) { - projection->addAttribute(attributes[column_list[i]]); - } - - PartitionFunction* hash_function = - PartitionFunctionFactory::createGeneralModuloFunction( - number_of_partitions); - projection->DefinePartitonier(number_of_partitions, - attributes[partition_key_index], hash_function); - - projection_list_.push_back(projection); - return true; +RetCode TableDescriptor::InitFileConnector() { + write_connector_ = new TableFileConnector( + Config::local_disk_mode ? FilePlatform::kDisk : FilePlatform::kHdfs, this, + common::kAppendFile); } -bool TableDescriptor::createHashPartitionedProjectionOnAllAttribute( - std::string partition_attribute_name, unsigned number_of_partitions) { - LockGuard guard(lock_); - ProjectionID projection_id(table_id_, projection_list_.size()); - ProjectionDescriptor* projection = new ProjectionDescriptor(projection_id); - for (unsigned i = 0; i < attributes.size(); i++) { - projection->addAttribute(attributes[i]); - } - - PartitionFunction* hash_function = - PartitionFunctionFactory::createGeneralModuloFunction( - number_of_partitions); - projection->DefinePartitonier(number_of_partitions, - getAttribute2(partition_attribute_name), - hash_function); - - projection_list_.push_back(projection); - return true; -} -bool TableDescriptor::createHashPartitionedProjection( - vector column_list, std::string partition_attribute_name, +RetCode TableDescriptor::createHashPartitionedProjection( + const vector& column_list, Attribute partition_attribute, unsigned number_of_partitions) { - LockGuard guard(lock_); + // LockGuard guard(update_lock_); + if (!update_lock_.try_lock()) { + LOG(WARNING) << "failed to lock update_lock, may someone is loading or " + "inserting data"; + return common::rResourceIsLocked; + } ProjectionID projection_id(table_id_, projection_list_.size()); ProjectionDescriptor* projection = new ProjectionDescriptor(projection_id); - // projection->projection_offset_=projection_list_.size(); - // projection->addAttribute(attributes[0]); for (unsigned i = 0; i < column_list.size(); i++) { projection->addAttribute(attributes[column_list[i]]); } @@ -119,24 +97,29 @@ bool TableDescriptor::createHashPartitionedProjection( PartitionFunction* hash_function = PartitionFunctionFactory::createGeneralModuloFunction( number_of_partitions); - // projection->partitioner = new Partitioner( - // number_of_partitions, attributes[partition_key_index], hash_function); - projection->DefinePartitonier(number_of_partitions, - getAttribute(partition_attribute_name), + projection->DefinePartitonier(number_of_partitions, partition_attribute, hash_function); projection_list_.push_back(projection); - return true; + // AddProjectionLocks(number_of_partitions); + // UpdateConnectorWithNewProj(number_of_partitions); + write_connector_->UpdateWithNewProj(); + update_lock_.release(); + return rSuccess; } -bool TableDescriptor::createHashPartitionedProjection( - vector attribute_list, std::string partition_attribute_name, + +RetCode TableDescriptor::createHashPartitionedProjection( + const vector& attribute_list, Attribute partition_attr, unsigned number_of_partitions) { - LockGuard guard(lock_); + // LockGuard guard(update_lock_); + if (!update_lock_.try_lock()) { + LOG(WARNING) << "failed to lock update_lock, may someone is loading or " + "inserting data"; + return common::rResourceIsLocked; + } ProjectionID projection_id(table_id_, projection_list_.size()); ProjectionDescriptor* projection = new ProjectionDescriptor(projection_id); - // projection->projection_offset_=projection_list_.size(); - projection->addAttribute(attributes[0]); // add row_id for (unsigned i = 0; i < attribute_list.size(); i++) { projection->addAttribute(attribute_list[i]); } @@ -144,14 +127,22 @@ bool TableDescriptor::createHashPartitionedProjection( PartitionFunction* hash_function = PartitionFunctionFactory::createGeneralModuloFunction( number_of_partitions); - projection->DefinePartitonier(number_of_partitions, - getAttribute(partition_attribute_name), + projection->DefinePartitonier(number_of_partitions, partition_attr, hash_function); projection_list_.push_back(projection); - return true; + // AddProjectionLocks(number_of_partitions); + write_connector_->UpdateWithNewProj(); + update_lock_.release(); + return rSuccess; } +// void TableDescriptor::AddProjectionLocks(int number_of_partitions) { +// vector locks; +// for (int i = 0; i < number_of_partitions; ++i) locks.push_back(Lock()); +// partitions_write_lock_.push_back(locks); +//} + bool TableDescriptor::isExist(const string& name) const { for (unsigned i = 0; i < attributes.size(); i++) { if (attributes[i].attrName == name) return true; @@ -206,6 +197,24 @@ Attribute TableDescriptor::getAttribute2(const std::string& name) const { return NULL; } +vector> TableDescriptor::GetAllPartitionsPath() const { + vector> write_paths; + for (int i = 0; i < getNumberOfProjection(); i++) { + vector prj_write_path; + prj_write_path.clear(); + for (int j = 0; + j < projection_list_[i]->getPartitioner()->getNumberOfPartitions(); + ++j) { + prj_write_path.push_back( + PartitionID(getProjectoin(i)->getProjectionID(), j).getPathAndName()); + } + write_paths.push_back(prj_write_path); + } + LOG(INFO) << " table:" << getTableName() << " has the below partition:"; + for (auto prj : write_paths) + for (auto part : prj) LOG(INFO) << part; + return write_paths; +} Schema* TableDescriptor::getSchema() const { const vector attributes = getAttributes(); std::vector columns; diff --git a/catalog/table.h b/catalog/table.h index 018268c92..bf39e9250 100755 --- a/catalog/table.h +++ b/catalog/table.h @@ -46,38 +46,72 @@ #include "../utility/lock_guard.h" using claims::catalog::ProjectionDescriptor; +// using claims::common::FilePlatform; using claims::utility::LockGuard; namespace claims { -namespace catalog { +namespace loader { +class DataInjector; +class TableFileConnector; +}; +namespace catalog { +using claims::loader::TableFileConnector; class TableDescriptor { public: - TableDescriptor() {} + friend class claims::loader::DataInjector; + friend class claims::loader::TableFileConnector; + + public: + TableDescriptor(); TableDescriptor(const string& name, const TableID table_id); virtual ~TableDescriptor(); - void addAttribute(Attribute attr); - bool addAttribute(string attname, data_type dt, unsigned max_length = 0, - bool unique = false, bool can_be_null = true); - - // void addProjection(vector id_list); - bool createHashPartitionedProjection(vector column_list, - ColumnOffset partition_key_index, - unsigned number_of_partitions); - bool createHashPartitionedProjection(vector column_list, - std::string partition_attribute_name, - unsigned number_of_partitions); - bool createHashPartitionedProjection(vector column_list, - std::string partition_attribute_name, - unsigned number_of_partitions); - bool createHashPartitionedProjectionOnAllAttribute( - std::string partition_attribute_name, unsigned number_of_partitions); bool isExist(const string& name) const; + inline TableID get_table_id() const { return table_id_; } inline string getTableName() const { return tableName; } + + vector> GetAllPartitionsPath() const; + + ProjectionDescriptor* getProjectoin(ProjectionOffset) const; + unsigned getNumberOfProjection() const; + vector* GetProjectionList() { + return &projection_list_; + } + // void addProjection(vector id_list); + RetCode createHashPartitionedProjection(vector column_list, + ColumnOffset partition_key_index, + unsigned number_of_partitions) { + return createHashPartitionedProjection( + column_list, attributes[partition_key_index], number_of_partitions); + } + RetCode createHashPartitionedProjection(vector column_list, + std::string partition_attribute_name, + unsigned number_of_partitions) { + return createHashPartitionedProjection( + column_list, getAttribute(partition_attribute_name), + number_of_partitions); + } + RetCode createHashPartitionedProjection(vector attribute_list, + std::string partition_attribute_name, + unsigned number_of_partitions) { + return createHashPartitionedProjection( + attribute_list, getAttribute(partition_attribute_name), + number_of_partitions); + } + RetCode createHashPartitionedProjectionOnAllAttribute( + std::string partition_attribute_name, unsigned number_of_partitions) { + return createHashPartitionedProjection( + attributes, getAttribute2(partition_attribute_name), + number_of_partitions); + } + ColumnOffset getColumnID(const string& attrName) const; - map > getColumnLocations(const string& attrName) const; + map> getColumnLocations(const string& attrName) const; + void addAttribute(Attribute attr); + bool addAttribute(string attname, data_type dt, unsigned max_length = 0, + bool unique = false, bool can_be_null = true); vector getAttributes() const { return attributes; } vector getAttributes(vector index_list) const { vector attribute_list; @@ -93,52 +127,107 @@ class TableDescriptor { } Attribute getAttribute(const std::string& name) const; Attribute getAttribute2(const std::string& name) const; - /* the following methods are considered to be deleted.*/ - void addColumn(ProjectionDescriptor* column); - inline string get_table_name() const { return tableName; } - inline TableID get_table_id() const { return table_id_; } - ProjectionDescriptor* getProjectoin(ProjectionOffset) const; - unsigned getNumberOfProjection() const; - vector* GetProjectionList() { - return &projection_list_; - } + inline unsigned int getNumberOfAttribute() { return attributes.size(); } Schema* getSchema() const; - inline void setRowNumber(unsigned long row_number) { - LockGuard guard(lock_); - row_number_ = row_number; - } - inline unsigned long getRowNumber() { return row_number_; } - inline unsigned long isEmpty() { return row_number_ == 0; } - inline unsigned int getNumberOfAttribute() { - return attributes.size(); - } // add by Yu + inline uint64_t getRowNumber() { return row_number_; } + inline bool isEmpty() { return row_number_ == 0; } inline bool HasDeletedTuples() { return has_deleted_tuples_; } inline void SetDeletedTuples(bool has_deleted_tuples) { has_deleted_tuples_ = has_deleted_tuples; } + /* + void LockPartition(int i, int j) { + assert(i < partitions_write_lock_.size() && "projection id range over"); + assert(j < partitions_write_lock_[i].size() && "partition id range over"); + partitions_write_lock_[i][j].acquire(); + } + void UnlockPartition(int i, int j) { + assert(i < partitions_write_lock_.size() && "projection id range over"); + assert(j < partitions_write_lock_[i].size() && "partition id range over"); + partitions_write_lock_[i][j].release(); + } + */ + + TableFileConnector& get_connector() { return *write_connector_; } + + private: + RetCode InitFileConnector(); + + RetCode createHashPartitionedProjection( + const vector& attribute_list, Attribute partition_attr, + unsigned number_of_partitions); + + RetCode createHashPartitionedProjection( + const vector& column_list, Attribute partition_attribute, + unsigned number_of_partitions); + + // RetCode UpdateConnectorWithNewProj(int partition_num) { + // // TODO(yukai) + // // int proj_index = projection_list_.size() - 1; + // // vector prj_write_path; + // // vector prj_locks; + // // vector prj_imps; + // // for (int j = 0; j < projection_list_[proj_index] + // // ->getPartitioner() + // // ->getNumberOfPartitions(); + // // ++j) { + // // string path = + // // PartitionID(getProjectoin(proj_index)->getProjectionID(), j) + // // .getPathAndName(); + // // prj_write_path.push_back(path); + // // + // // prj_locks.push_back(Lock()); + // // + // // + // prj_imps.push_back(FileHandleImpFactory::Instance().CreateFileHandleImp( + // // platform_, path)); + // // } + // // write_connector_->write_path_name_.push_back(prj_write_path); + // + // DELETE_PTR(write_connector_); + // write_connector_ = new TableFileConnector( + // Config::local_disk_mode ? FilePlatform::kDisk : FilePlatform::kHdfs, + // this); + // } + + void AddProjectionLocks(int number_of_partitions); + + // void InitLocks() { + // if (partitions_write_lock_.size() != getNumberOfProjection()) { + // partitions_write_lock_.clear(); + // for (auto it : projection_list_) { + // AddProjectionLocks(it->getPartitioner()->getNumberOfPartitions()); + // } + // } + // } protected: string tableName; vector attributes; TableID table_id_; vector projection_list_; - unsigned long row_number_; + uint64_t row_number_; bool has_deleted_tuples_ = false; - Lock lock_; - // delete for debugging - // hashmap columns; + // Loading or inserting blocks updating table(create new projection and so + // on), vice versa. + SpineLock update_lock_; + // vector> partitions_write_lock_; + + TableFileConnector* write_connector_ = NULL; friend class boost::serialization::access; template - void serialize(Archive& ar, const unsigned int version) { + void serialize(Archive& ar, const unsigned int version) { // NOLINT ar& tableName& attributes& table_id_& projection_list_& row_number_& has_deleted_tuples_; + // InitLocks(); + InitFileConnector(); } }; } /* namespace catalog */ } /* namespace claims */ -#endif // CATALOG_TABLE1_H_ +#endif // CATALOG_TABLE_H_ diff --git a/codegen/Makefile.am b/codegen/Makefile.am index f6d71c942..f5825045a 100644 --- a/codegen/Makefile.am +++ b/codegen/Makefile.am @@ -1,7 +1,6 @@ AM_CPPFLAGS= -fPIC -fpermissive -DTHERON_XS \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${THERON_HOME}/Include \ -I${GTEST_HOME}/include \ -I/usr/local/include -D_DEBUG -D_GNU_SOURCE -D__STDC_CONSTANT_MACROS \ -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS \ diff --git a/common/Block/BlockStream.cpp b/common/Block/BlockStream.cpp index ef499d835..044f460a1 100755 --- a/common/Block/BlockStream.cpp +++ b/common/Block/BlockStream.cpp @@ -6,8 +6,11 @@ */ #include #include +#include +#include #include "../../configure.h" #include "./BlockStream.h" + BlockStreamFix::BlockStreamFix(unsigned block_size, unsigned tuple_size) : BlockStreamBase(block_size), tuple_size_(tuple_size) { free_ = start; @@ -33,6 +36,17 @@ BlockStreamBase* BlockStreamBase::createBlock(const Schema* const& schema, return new BlockStreamVar(block_size, schema); } } + +BlockStreamBase* BlockStreamBase::createBlock2(const Schema* const& schema, + unsigned block_size) { + if (schema->getSchemaType() == Schema::fixed) { + return new BlockStreamFix(block_size - sizeof(BlockStreamFix::tail_info), + schema->getTupleMaxSize(), 0, 0); + } else { + return new BlockStreamVar(block_size, schema); + } +} + BlockStreamBase* BlockStreamBase::createBlockWithDesirableSerilaizedSize( Schema* schema, unsigned block_size) { if (schema->getSchemaType() == Schema::fixed) { @@ -55,19 +69,9 @@ void* BlockStreamFix::getBlockDataAddress() { return start; } bool BlockStreamFix::switchBlock(BlockStreamBase& block) { BlockStreamFix* blockfix = (BlockStreamFix*)█ assert(blockfix->BlockSize == BlockSize); - - /* swap the data pointer */ - char* data_temp; - data_temp = blockfix->start; - blockfix->start = (this->start); - this->start = data_temp; - - /* swap the free pointer */ - char* free_temp; - free_temp = blockfix->free_; - blockfix->free_ = this->free_; - this->free_ = free_temp; - + std::swap(blockfix->start, start); + std::swap(blockfix->free_, free_); + std::swap(blockfix->tuple_size_, tuple_size_); return true; } @@ -111,6 +115,7 @@ bool BlockStreamFix::serialize(Block& block) const { printf("tuple count=%d in serialize()\n", tail->tuple_count); assert(false); } + // LOG(ERROR) << "||||" << tail->tuple_count; // /* the number of tuples*/ // int* diff --git a/common/Block/BlockStream.h b/common/Block/BlockStream.h index c16ba4a58..d6b5bdec5 100755 --- a/common/Block/BlockStream.h +++ b/common/Block/BlockStream.h @@ -107,7 +107,8 @@ class BlockStreamBase : public Block { }; static BlockStreamBase* createBlock(const Schema* const& schema, unsigned block_size); - + static BlockStreamBase* createBlock2(const Schema* const& schema, + unsigned block_size); /** * @li:I add this function in order to end the chaos of setting the block size * when initializing the BlockStream. -_- diff --git a/common/Block/BlockStreamBuffer.cpp b/common/Block/BlockStreamBuffer.cpp index c9af11b8a..4ef85a733 100755 --- a/common/Block/BlockStreamBuffer.cpp +++ b/common/Block/BlockStreamBuffer.cpp @@ -7,104 +7,117 @@ #include "BlockStreamBuffer.h" -BlockStreamBuffer::BlockStreamBuffer(unsigned block_size, unsigned block_count, Schema* schema):received_block_count_(0),total_block_count_(block_count),block_size_(block_size) { - lock_.acquire(); - for(unsigned i=0;i<><><><><><><><>!\n"); - while(!block_stream_empty_list_.empty()){ - const BlockStreamBase* block=block_stream_empty_list_.front(); - block_stream_empty_list_.pop_front(); -// block->~BlockStreamBase(); - delete block; - } + // printf("BlockStreawmBuffer being deconstructed<><><><><><><><><>!\n"); + while (!block_stream_empty_list_.empty()) { + const BlockStreamBase* block = block_stream_empty_list_.front(); + block_stream_empty_list_.pop_front(); + // block->~BlockStreamBase(); + delete block; + } - while(!block_stream_used_list_.empty()){ - const BlockStreamBase* block=block_stream_used_list_.front(); - block_stream_used_list_.pop_front(); -// block->~BlockStreamBase(); - delete block; - } -// assert(block_stream_used_list_.empty()); - sema_empty_block_.destroy(); -// printf("BlockStreawmBuffer being deconstructed<><><><><><><><><>!\n"); + while (!block_stream_used_list_.empty()) { + const BlockStreamBase* block = block_stream_used_list_.front(); + block_stream_used_list_.pop_front(); + // block->~BlockStreamBase(); + delete block; + } + // assert(block_stream_used_list_.empty()); + sema_empty_block_.destroy(); + // printf("BlockStreawmBuffer being deconstructed<><><><><><><><><>!\n"); } -void BlockStreamBuffer::insertBlock(BlockStreamBase* block){ +void BlockStreamBuffer::insertBlock(BlockStreamBase* block) { + sema_empty_block_.wait(); + lock_.acquire(); - sema_empty_block_.wait(); - lock_.acquire(); + BlockStreamBase* empty_block = block_stream_empty_list_.front(); + block_stream_empty_list_.pop_front(); + empty_block->setEmpty(); + // assert(block_stream_empty_list_.size()==sema_empty_block_.get_value()); - BlockStreamBase* empty_block=block_stream_empty_list_.front(); - block_stream_empty_list_.pop_front(); - -// assert(block_stream_empty_list_.size()==sema_empty_block_.get_value()); - - if(block->isIsReference()){ - empty_block->deepCopy(block); - } - else{ - /* swap the data address of empty block and the input block to avoid memory copy.*/ - block->switchBlock(*empty_block); - } - /* add the empty block which now hold the read data into the used block list*/ - block_stream_used_list_.push_back(empty_block); -// printf("[after inserted]: sema=%d, empty_list=%d\n",sema_empty_block_.get_value(),block_stream_empty_list_.size()); - received_block_count_++; - lock_.release(); + if (block->isIsReference()) { + empty_block->deepCopy(block); + } else { + /* swap the data address of empty block and the input block to avoid memory + * copy.*/ + block->switchBlock(*empty_block); + } + // LOG(INFO) << "block insert before has " << block->getTuplesInBlock() + // << " tuples and after " << empty_block->getTuplesInBlock() + // << " tuples " << endl; + /* add the empty block which now hold the read data into the used block list*/ + block_stream_used_list_.push_back(empty_block); + // printf("[after inserted]: sema=%d, + // empty_list=%d\n",sema_empty_block_.get_value(),block_stream_empty_list_.size()); + received_block_count_++; + lock_.release(); } -bool BlockStreamBuffer::getBlock(BlockStreamBase &block){ -// assert(block_stream_empty_list_.size()==sema_empty_block_.get_value()); - lock_.acquire(); -// assert(block_stream_empty_list_.size()==sema_empty_block_.get_value()); - if(!block_stream_used_list_.empty()){ - BlockStreamBase* fetched_block=block_stream_used_list_.front(); - block_stream_used_list_.pop_front(); - lock_.release(); - - fetched_block->switchBlock(block); - lock_.acquire(); - block_stream_empty_list_.push_back(fetched_block); - sema_empty_block_.post(); -// assert(block_stream_empty_list_.size()==sema_empty_block_.get_value()); -// printf("[after get(suc)]:sema=%d, empty_list=%d\n",sema_empty_block_.get_value(),block_stream_empty_list_.size()); - lock_.release(); - return true; - } -// printf("[after get(fail)]sema=%d, empty_list=%d\n",sema_empty_block_.get_value(),block_stream_empty_list_.size()); - lock_.release(); - return false; +bool BlockStreamBuffer::getBlock(BlockStreamBase& block) { + // assert(block_stream_empty_list_.size()==sema_empty_block_.get_value()); + lock_.acquire(); + // assert(block_stream_empty_list_.size()==sema_empty_block_.get_value()); + if (!block_stream_used_list_.empty()) { + BlockStreamBase* fetched_block = block_stream_used_list_.front(); + block_stream_used_list_.pop_front(); + lock_.release(); + // LOG(INFO) << "block fetch before has [ " + // << fetched_block->getTuplesInBlock() << " ] tuples at " + // << pthread_self(); + fetched_block->switchBlock(block); + // LOG(INFO) << " and after [ " << block.getTuplesInBlock() << " ]tuples + // " + // << pthread_self(); + lock_.acquire(); + block_stream_empty_list_.push_back(fetched_block); + sema_empty_block_.post(); + // assert(block_stream_empty_list_.size()==sema_empty_block_.get_value()); + // printf("[after get(suc)]:sema=%d, + // empty_list=%d\n",sema_empty_block_.get_value(),block_stream_empty_list_.size()); + lock_.release(); + return true; + } + // printf("[after get(fail)]sema=%d, + // empty_list=%d\n",sema_empty_block_.get_value(),block_stream_empty_list_.size()); + lock_.release(); + return false; } -bool BlockStreamBuffer::Empty() { - - lock_.acquire(); - const bool ret=block_stream_used_list_.empty(); - lock_.release(); - return ret; +bool BlockStreamBuffer::Empty() { + lock_.acquire(); + const bool ret = block_stream_used_list_.empty(); + lock_.release(); + return ret; } -unsigned BlockStreamBuffer::getBlockInBuffer(){ - - lock_.acquire(); -// if(block_stream_used_list_.empty()) -// return 0; -// return block_stream_used_list_.size(); - const unsigned ret=block_stream_used_list_.size(); - lock_.release(); - return ret; +unsigned BlockStreamBuffer::getBlockInBuffer() { + lock_.acquire(); + // if(block_stream_used_list_.empty()) + // return 0; + // return block_stream_used_list_.size(); + const unsigned ret = block_stream_used_list_.size(); + lock_.release(); + return ret; } -double BlockStreamBuffer::getBufferUsage(){ - return (double)getBlockInBuffer()/(double)total_block_count_; +double BlockStreamBuffer::getBufferUsage() { + return (double)getBlockInBuffer() / (double)total_block_count_; } -long BlockStreamBuffer::getReceivedDataSizeInKbytes(){ - return received_block_count_*block_size_/1024; +long BlockStreamBuffer::getReceivedDataSizeInKbytes() { + return received_block_count_ * block_size_ / 1024; } diff --git a/common/Block/DynamicBlockBuffer.cpp b/common/Block/DynamicBlockBuffer.cpp index 55fc7db19..74647bb0a 100755 --- a/common/Block/DynamicBlockBuffer.cpp +++ b/common/Block/DynamicBlockBuffer.cpp @@ -11,88 +11,81 @@ #include "BlockStream.h" -DynamicBlockBuffer::DynamicBlockBuffer() { - // TODO Auto-generated constructor stub - -} -DynamicBlockBuffer::DynamicBlockBuffer(const DynamicBlockBuffer& r){ - this->block_list_=r.block_list_; - this->lock_=r.lock_; +DynamicBlockBuffer::DynamicBlockBuffer() { block_list_.clear(); } +DynamicBlockBuffer::DynamicBlockBuffer(const DynamicBlockBuffer& r) { + this->block_list_ = r.block_list_; + this->lock_ = r.lock_; } DynamicBlockBuffer::~DynamicBlockBuffer() { - for(unsigned i=0;icur_ = it.cur_; + this->dbb_ = it.dbb_; + this->lock_ = it.lock_; } -DynamicBlockBuffer::Iterator::Iterator(const Iterator & it){ - this->cur_=it.cur_; - this->dbb_=it.dbb_; - this->lock_=it.lock_; +BlockStreamBase* DynamicBlockBuffer::Iterator::nextBlock() { + BlockStreamBase* ret; + ret = dbb_->getBlock(cur_); + if (ret > 0) { + cur_++; + } + return ret; } -BlockStreamBase* DynamicBlockBuffer::Iterator::nextBlock(){ - BlockStreamBase* ret; - ret=dbb_->getBlock(cur_); - if(ret>0){ - cur_++; - } - return ret; -} -BlockStreamBase* DynamicBlockBuffer::Iterator::atomicNextBlock(){ - - lock_.acquire(); - BlockStreamBase* ret=nextBlock(); - lock_.release(); - return ret; +BlockStreamBase* DynamicBlockBuffer::Iterator::atomicNextBlock() { + lock_.acquire(); + BlockStreamBase* ret = nextBlock(); + lock_.release(); + return ret; } -DynamicBlockBuffer::Iterator DynamicBlockBuffer::createIterator()const{ - return Iterator(this); +DynamicBlockBuffer::Iterator DynamicBlockBuffer::createIterator() const { + return Iterator(this); } -void DynamicBlockBuffer::destory(){ - DynamicBlockBuffer::Iterator it=this->createIterator(); - BlockStreamBase* block_to_deallocate; - while(block_to_deallocate=it.nextBlock()){ - delete block_to_deallocate; - } - this->block_list_.clear(); +void DynamicBlockBuffer::destory() { + DynamicBlockBuffer::Iterator it = this->createIterator(); + BlockStreamBase* block_to_deallocate; + while (block_to_deallocate = it.nextBlock()) { + delete block_to_deallocate; + } + this->block_list_.clear(); } -unsigned DynamicBlockBuffer::getNumberOfBlocks(){ - lock_.acquire(); - unsigned ret; - ret= block_list_.size(); - lock_.release(); - return ret; +unsigned DynamicBlockBuffer::getNumberOfBlocks() { + lock_.acquire(); + unsigned ret; + ret = block_list_.size(); + lock_.release(); + return ret; } -unsigned long DynamicBlockBuffer::getNumberOftuples()const{ - unsigned long ret=0; - DynamicBlockBuffer::Iterator it=this->createIterator(); - BlockStreamBase* block; - while(block=(BlockStreamBase*)it.nextBlock()){ - ret+=block->getTuplesInBlock(); - } - return ret; +unsigned long DynamicBlockBuffer::getNumberOftuples() const { + unsigned long ret = 0; + DynamicBlockBuffer::Iterator it = this->createIterator(); + BlockStreamBase* block; + while (block = (BlockStreamBase*)it.nextBlock()) { + ret += block->getTuplesInBlock(); + } + return ret; } diff --git a/common/Expression/Makefile.am b/common/Expression/Makefile.am index c3e9b040e..44b7f12e2 100644 --- a/common/Expression/Makefile.am +++ b/common/Expression/Makefile.am @@ -1,8 +1,7 @@ AM_CPPFLAGS= -fPIC -fpermissive -DTHERON_XS\ --I${THERON_HOME}/Include \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux @@ -17,7 +16,6 @@ LDADD = ../../catalog/libcatalog.a \ ../../common/libcommon.a \ ../../common/Schema/libschema.a \ ../../utility/libutility.a \ - ${THERON_HOME}/Lib/libtherond.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ ${BOOST_HOME}/stage/lib/libboost_system.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.so \ diff --git a/common/Makefile.am b/common/Makefile.am index 267da40b2..80da904dd 100644 --- a/common/Makefile.am +++ b/common/Makefile.am @@ -1,7 +1,7 @@ AM_CPPFLAGS= -fPIC -fpermissive -DTHERON_XS \ +-I${HADOOP_HOME}/include \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${THERON_HOME}/Include \ -I${GTEST_HOME}/include AM_LDFLAGS=-lc -lm -lrt -lboost_serialization -lpthread -lxs @@ -21,7 +21,7 @@ endif # types/libtypes.a # ${BOOST_HOME}/stage/lib/libboost_serialization.a \ # ${BOOST_HOME}/stage/lib/libboost_serialization.so \ -# ${THERON_HOME}/Lib/libtherond.a \ +# # ${GTEST_HOME}/libgtest.a noinst_LIBRARIES=libcommon.a @@ -33,7 +33,6 @@ libcommon_a_SOURCES = \ Logging.cpp Logging.h \ Mapping.cpp Mapping.h \ Message.cpp Message.h \ - TimeOutReceiver.cpp TimeOutReceiver.h \ TypeCast.cpp TypeCast.h \ TypePromotionMap.cpp TypePromotionMap.h \ data_type.h data_type.cpp functions.h \ diff --git a/common/Message.cpp b/common/Message.cpp index 34cc21b50..18545d984 100755 --- a/common/Message.cpp +++ b/common/Message.cpp @@ -7,27 +7,34 @@ #include "Message.h" -THERON_DEFINE_REGISTERED_MESSAGE(BlockStatusRespond) -THERON_DEFINE_REGISTERED_MESSAGE(BlockStatusMessage) -THERON_DEFINE_REGISTERED_MESSAGE(StorageBudgetMessage) -THERON_DEFINE_REGISTERED_MESSAGE(RegisterStorageRespond) -THERON_DEFINE_REGISTERED_MESSAGE(HeartBeatMessage) -THERON_DEFINE_REGISTERED_MESSAGE(HeartBeatRespond) -THERON_DEFINE_REGISTERED_MESSAGE(MatcherMessage) -THERON_DEFINE_REGISTERED_MESSAGE(MatcherRespond) -THERON_DEFINE_REGISTERED_MESSAGE(Message256) -THERON_DEFINE_REGISTERED_MESSAGE(Message4K) -THERON_DEFINE_REGISTERED_MESSAGE(int) -THERON_DEFINE_REGISTERED_MESSAGE(unsigned long long int) -THERON_DEFINE_REGISTERED_MESSAGE(NodeRegisterMessage) -THERON_DEFINE_REGISTERED_MESSAGE(PartitionBindingMessage) -THERON_DEFINE_REGISTERED_MESSAGE(ExchangeID) -THERON_DEFINE_REGISTERED_MESSAGE(PartitionUnbindingMessage) +#include "../exec_tracker/segment_exec_status.h" +using claims::SegmentExecStatus; +void PhysicalQueryPlan::run() { + SegmentExecStatus* segment_exec_status = new SegmentExecStatus( + make_pair(query_id_, segment_id_ * kMaxNodeNum + target_node_id_), + coor_node_id_); + segment_exec_status->RegisterToTracker(); + segment_exec_status->UpdateStatus( + SegmentExecStatus::ExecStatus::kOk, + "physical plan reveived succeed and begin to open()", 0, true); + bool ret = block_stream_iterator_root_->Open(segment_exec_status); + if (ret) { + segment_exec_status->UpdateStatus(SegmentExecStatus::ExecStatus::kOk, + "physical plan open() succeed", 0, true); + while (block_stream_iterator_root_->Next(segment_exec_status, 0)) { + } -void PhysicalQueryPlan::run() -{ - block_stream_iterator_root_->Open(); - while(block_stream_iterator_root_->Next(0)); - block_stream_iterator_root_->Close(); + segment_exec_status->UpdateStatus(SegmentExecStatus::ExecStatus::kOk, + "physical plan next() succeed", 0, true); + } else { + segment_exec_status->UpdateStatus(SegmentExecStatus::ExecStatus::kError, + "physical plan open() failed", 0, true); + } + ret = block_stream_iterator_root_->Close(segment_exec_status); + segment_exec_status->UpdateStatus(SegmentExecStatus::ExecStatus::kDone, + "physical plan close() succeed", 0, true); + + // segment_exec_status->UnRegisterFromTracker(); + // delete segment_exec_status; } diff --git a/common/Message.h b/common/Message.h index a33d664f3..647ca8499 100755 --- a/common/Message.h +++ b/common/Message.h @@ -21,29 +21,38 @@ #include #include #include -#include "Theron/Defines.h" -#include "Theron/Theron.h" +#include +#include +#include +#include + +#include "../common/memory_handle.h" #include "serialization/RegisterDerivedClass.h" #include "../physical_operator/physical_operator_base.h" #include "../Debug.h" +#include "../Environment.h" +#include "../node_manager/base_node.h" #include "../storage/StorageLevel.h" #include "ids.h" - +using claims::NodeAddr; using claims::physical_operator::PhysicalOperatorBase; // It's better to use fixed length information for implementation concern. -THERON_DECLARE_REGISTERED_MESSAGE(ExchangeID) struct StorageBudgetMessage { - explicit StorageBudgetMessage(const int& disk_budget, - const int& memory_budget, const int& nodeid) + StorageBudgetMessage(const int& disk_budget, const int& memory_budget, + const int& nodeid) : disk_budget(disk_budget), memory_budget(memory_budget), nodeid(nodeid) {} + StorageBudgetMessage() : nodeid(1000000), disk_budget(0), memory_budget(0) {} int disk_budget; int memory_budget; int nodeid; + bool operator==(const StorageBudgetMessage& lhs) const { + return lhs.nodeid == nodeid && lhs.memory_budget == memory_budget && + disk_budget == lhs.disk_budget; + } }; -THERON_DECLARE_REGISTERED_MESSAGE(StorageBudgetMessage) struct PartitionBindingMessage { PartitionBindingMessage(const PartitionID& pid, const unsigned& num, @@ -53,13 +62,11 @@ struct PartitionBindingMessage { unsigned number_of_chunks; StorageLevel storage_level; }; -THERON_DECLARE_REGISTERED_MESSAGE(PartitionBindingMessage) struct PartitionUnbindingMessage { PartitionUnbindingMessage(const PartitionID& pid) : partition_id(pid){}; PartitionID partition_id; }; -THERON_DECLARE_REGISTERED_MESSAGE(PartitionUnbindingMessage) struct RegisterStorageRespond { explicit RegisterStorageRespond(const char* const text) { @@ -68,7 +75,6 @@ struct RegisterStorageRespond { } char mText[REGISTER_MESSAGE_LEN]; }; -THERON_DECLARE_REGISTERED_MESSAGE(RegisterStorageRespond) struct HeartBeatMessage { explicit HeartBeatMessage(const char* const text) { @@ -77,7 +83,6 @@ struct HeartBeatMessage { } char mText[HEARTBEAT_MESSAGE_LEN]; }; -THERON_DECLARE_REGISTERED_MESSAGE(HeartBeatMessage) struct HeartBeatRespond { explicit HeartBeatRespond(const char* const text) { @@ -86,7 +91,6 @@ struct HeartBeatRespond { } char mText[HEARTBEAT_MESSAGE_LEN]; }; -THERON_DECLARE_REGISTERED_MESSAGE(HeartBeatRespond) struct BlockStatusMessage { explicit BlockStatusMessage(const char* const text) { @@ -95,7 +99,6 @@ struct BlockStatusMessage { } char mText[BLOCK_STATUS_MESSAGE_LEN]; }; -THERON_DECLARE_REGISTERED_MESSAGE(BlockStatusMessage) struct BlockStatusRespond { explicit BlockStatusRespond(const char* const text) { @@ -104,7 +107,6 @@ struct BlockStatusRespond { } char mText[BLOCK_STATUS_MESSAGE_LEN]; }; -THERON_DECLARE_REGISTERED_MESSAGE(BlockStatusRespond) struct MatcherMessage { explicit MatcherMessage(const char* const filename, const char* const bmi) { @@ -116,7 +118,6 @@ struct MatcherMessage { char filenameText[MATCHER_MESSAGE_FILENAME_LEN]; char bmiText[MATCHER_MESSAGE_BMI_LEN]; }; -THERON_DECLARE_REGISTERED_MESSAGE(MatcherMessage) struct MatcherRespond { explicit MatcherRespond(const char* const text) { @@ -125,7 +126,6 @@ struct MatcherRespond { } char mText[MATCHER_MESSAGE_PROJECT_LEN]; }; -THERON_DECLARE_REGISTERED_MESSAGE(MatcherRespond) /* NodeRegisterMessage has the same function compared with NodeConnectionMessage * except for that @@ -148,22 +148,18 @@ struct NodeRegisterMessage { unsigned ip; unsigned port; }; -THERON_DECLARE_REGISTERED_MESSAGE(NodeRegisterMessage) struct Message256 { unsigned length; char message[256 - sizeof(unsigned)]; static unsigned Capacity() { return 256 - sizeof(unsigned); } }; -THERON_DECLARE_REGISTERED_MESSAGE(Message256) + struct Message4K // temporary ways to expand the the serialization capacity { unsigned length; char message[4096 * 50 - sizeof(unsigned)]; static unsigned Capacity() { return 4096 * 50 - sizeof(unsigned); } }; -THERON_DECLARE_REGISTERED_MESSAGE(Message4K) -THERON_DECLARE_REGISTERED_MESSAGE(int) -THERON_DECLARE_REGISTERED_MESSAGE(unsigned long long int) template static T Deserialize(Message256 input) { std::string received(input.message, input.length); @@ -287,6 +283,21 @@ static Message4K Serialize4K(T& object) { // // return ret; } +template +string TextSerialize(const T& obj) { + stringstream ss; + boost::archive::text_oarchive oa(ss); + oa << obj; + return ss.str(); +} +template +T TextDeserialize(const string& obj) { + T ret; + stringstream ss(obj); + boost::archive::text_iarchive ia(ss); + ia >> ret; + return ret; +} class CreateTableRespond { public: @@ -365,10 +376,20 @@ class RegisterSlaveMessage { */ class PhysicalQueryPlan { public: - PhysicalQueryPlan(PhysicalOperatorBase* it) - : block_stream_iterator_root_(it){}; + PhysicalQueryPlan(PhysicalOperatorBase* it, NodeID node_id, + u_int64_t query_id, u_int32_t segment_id, + unsigned int coor_node_id) + : block_stream_iterator_root_(it), + target_node_id_(node_id), + query_id_(query_id), + segment_id_(segment_id), + coor_node_id_(coor_node_id) {} PhysicalQueryPlan(const PhysicalQueryPlan& r) { block_stream_iterator_root_ = r.block_stream_iterator_root_; + target_node_id_ = r.target_node_id_; + query_id_ = r.query_id_; + coor_node_id_ = r.coor_node_id_; + segment_id_ = r.segment_id_; } PhysicalQueryPlan() : block_stream_iterator_root_(0){}; @@ -384,7 +405,7 @@ class PhysicalQueryPlan { * the undesirable destruction of iterator caused by the default destructor of * IteratorMessage */ - void destory() { block_stream_iterator_root_->~PhysicalOperatorBase(); } + void destory() { DELETE_PTR(block_stream_iterator_root_); } void run(); static PhysicalQueryPlan deserialize(Message256 message) { return Deserialize(message); @@ -403,14 +424,28 @@ class PhysicalQueryPlan { return Serialize4K(input); } + static string TextSerializePlan(const PhysicalQueryPlan& input) { + return TextSerialize(input); + } + static PhysicalQueryPlan TextDeserializePlan(const string& message) { + return TextDeserialize(message); + } + u_int64_t get_query_id() { return query_id_; } + u_int32_t get_segment_id_() { return segment_id_; } + private: PhysicalOperatorBase* block_stream_iterator_root_; + NodeID target_node_id_; + u_int64_t query_id_; + u_int32_t segment_id_; + unsigned int coor_node_id_; friend class boost::serialization::access; template void serialize(Archive& ar, const unsigned int version) { Register_Schemas(ar); Register_Block_Stream_Iterator(ar); - ar& block_stream_iterator_root_; + ar& block_stream_iterator_root_& target_node_id_& query_id_& coor_node_id_& + segment_id_; // ar & block_stream_iterator_root_; } }; diff --git a/common/Schema/Makefile.am b/common/Schema/Makefile.am index 736b7c887..44222467f 100644 --- a/common/Schema/Makefile.am +++ b/common/Schema/Makefile.am @@ -2,8 +2,8 @@ AM_CPPFLAGS= -fPIC -fpermissive \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${GTEST_HOME}/include - +-I${GTEST_HOME}/include \ +-I${HADOOP_HOME}/include AM_LDFLAGS=-lc -lm -lrt lboost_serialization -lxs if OPT_TCMALLOC diff --git a/common/Schema/Schema.cpp b/common/Schema/Schema.cpp index ceaa3ff89..91bbed5f5 100755 --- a/common/Schema/Schema.cpp +++ b/common/Schema/Schema.cpp @@ -10,9 +10,13 @@ #include #include +#include "../../common/memory_handle.h" Schema::Schema(const std::vector& columns) : columns(columns) {} Schema::Schema(const Schema& r) { this->columns = r.columns; } -Schema::~Schema() {} +Schema::~Schema() { +// for (auto& i : columns) DELETE_PTR(i.operate); +// columns.clear(); +} unsigned Schema::getncolumns() const { return columns.size(); } diff --git a/common/Schema/Test/Makefile.am b/common/Schema/Test/Makefile.am index 8f1c396ef..b3b8cf975 100644 --- a/common/Schema/Test/Makefile.am +++ b/common/Schema/Test/Makefile.am @@ -1,9 +1,7 @@ - AM_CPPFLAGS= -fPIC -fpermissive \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${THERON_HOME}/Include \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux \ -I${GTEST_HOME}/include @@ -23,10 +21,9 @@ LDADD = ../libschema.a \ ../../../logical_operator/liblogicalqueryplan.a \ ../../../common/Block/libblock.a \ ../../libcommon.a \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a \ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.so \ - ${THERON_HOME}/Lib/libtherond.a \ + ${HADOOP_HOME}/lib/native/libhdfs.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.so \ ${GTEST_HOME}/libgtest.a diff --git a/common/TimeOutReceiver.cpp b/common/TimeOutReceiver.cpp deleted file mode 100755 index 8b8c1162f..000000000 --- a/common/TimeOutReceiver.cpp +++ /dev/null @@ -1,30 +0,0 @@ -/* - * TimeOutReceiver.cpp - * - * Created on: Oct 25, 2013 - * Author: wangli - */ - -#include "TimeOutReceiver.h" -#include "../utility/rdtsc.h" -// -TimeOutReceiver::TimeOutReceiver(Theron::EndPoint *endpoint) -:Theron::Receiver(*endpoint){ - -} -TimeOutReceiver::TimeOutReceiver(Theron::EndPoint *endpoint,const char* name) -:Theron::Receiver(*endpoint,name){ - -} - -TimeOutReceiver::~TimeOutReceiver() { -} -unsigned TimeOutReceiver::TimeOutWait(unsigned expected_message_count,unsigned time_out_in_ms){ - unsigned long long int start=curtick(); - unsigned count(0); - while(count -#ifdef DMALLOC -#include "dmalloc.h" -#endif -#ifndef TIMEOUTRECEIVER_H_ -#define TIMEOUTRECEIVER_H_ - -class TimeOutReceiver:public Theron::Receiver { -public: - TimeOutReceiver(Theron::EndPoint *endpoint); - TimeOutReceiver(Theron::EndPoint *endpoint,const char* name); - virtual ~TimeOutReceiver(); - /** - * This function supports timeout Wait. - * The calling of this method will be blocked until one of the following conditions satisfied. - * (1) expected number of messages are received before the timeout. Then the return value is the number - * of expected messages. - * (2) The time is out before expected number of messages are received. Then the return value is the number - * of received messages. - * E.g., TimeOutWait(10,1000)==10 will return true if 10 messages are received within 1 second, and will false - * if less than 10 messages are received within 1 second. - * - */ - unsigned TimeOutWait(unsigned expected_message_count,unsigned time_out_in_ms); -}; - -#endif /* TIMEOUTRECEIVER_H_ */ diff --git a/common/data_type.cpp b/common/data_type.cpp index ea4ae99e8..b64dc58ae 100644 --- a/common/data_type.cpp +++ b/common/data_type.cpp @@ -46,6 +46,17 @@ using claims::common::rIncorrectData; using claims::common::rInvalidNullData; using claims::common::kErrorMessage; using namespace claims::common; +int null_int_value = NULL_INT; +float null_float_value = NULL_FLOAT; +double null_double_value = NULL_DOUBLE; +unsigned long null_u_long_value = ULONG_LONG_MAX; +char null_string_value[] = {1}; +date null_date_value(NULL_DATE); +ptime null_datetime_value(NULL_DATETIME); +short null_small_int_value = NULL_SMALL_INT; +unsigned short null_u_small_int_value = NULL_U_SMALL_INT; +Decimal null_decimal_value = Decimal::CreateNullDecimal(); +int null_boolean_value = 2; /** * if a string to input is warning, we modify it to a right value * and return it's warning-code @@ -475,6 +486,14 @@ RetCode OperateUSmallInt::CheckSet(string& str) const { /* * ToDo There is still some work for decimal type */ + +/* +bool OperateDecimal::CheckStr(string str) const { + + return true; +} +*/ + RetCode OperateDecimal::CheckSet(string& str) const { RetCode ret = rSuccess; if ((str == "" || str == "NULL") && nullable) return rSuccess; @@ -483,12 +502,15 @@ RetCode OperateDecimal::CheckSet(string& str) const { ELOG(ret, str); return ret; } - if (Decimal::StringToDecimal(this->precision_, this->scale_, str)) { + // it is checked in StrtoDecimal in class Decimal, it will be zero if str is invalied + /* + if (CheckStr(str)) { ret = rSuccess; } else { ret = rInvalidInsertData; ELOG(ret, str); } + */ return ret; } diff --git a/common/data_type.h b/common/data_type.h index 0164bba2f..5372e39ca 100755 --- a/common/data_type.h +++ b/common/data_type.h @@ -44,7 +44,7 @@ using boost::lexical_cast; using namespace decimal; #define DATA_TYPE_NUMBER 20 enum data_type { - t_smallInt, + t_smallInt = 0, t_int, t_u_long, t_float, @@ -111,6 +111,18 @@ typedef void (*fun)(void*, void*); #define NULL_U_SMALL_INT USHRT_MAX #define NULL_BOOLEAN 2 +extern int null_int_value; +extern float null_float_value; +extern double null_double_value; +extern unsigned long null_u_long_value; +extern char null_string_value[]; +extern date null_date_value; +extern ptime null_datetime_value; +extern short null_small_int_value; +extern unsigned short null_u_small_int_value; +extern Decimal null_decimal_value; +extern int null_boolean_value; + static NValue nvalue_null = NValue::getDecimalValueFromString( "99999999999999999999999999.999999999999"); const int max_double_length = 1 + 308; @@ -180,9 +192,11 @@ inline void IncreaseByOne(void* target, void* increment) { template <> inline void IncreaseByOne(void* target, void* increment) {} +static Decimal decimalone(1, 0, "1"); + template <> inline void IncreaseByOne(void* target, void* increment) { - *(Decimal*)target = ((Decimal*)target)->op_add(Decimal(1, 0, "1")); + *(Decimal*)target = ((Decimal*)target)->op_add(decimalone); } template //暂时先实现这点 inline void ADD_IncreaseByOne(void* target, void* increment) { @@ -195,7 +209,7 @@ inline void ADD_IncreaseByOne(void* target, void* increment) {} template <> inline void ADD_IncreaseByOne(void* target, void* increment) { *(Decimal*)target = ((Decimal*)target)->op_add(*(Decimal*)increment); // add - *(Decimal*)target = ((Decimal*)target)->op_add(Decimal(1, 0, "1")); + *(Decimal*)target = ((Decimal*)target)->op_add(decimalone); } // template<> // inline void ADD_IncreaseByOne(void* target, void* increment) @@ -254,6 +268,7 @@ class Operate { virtual Operate* duplicateOperator() const = 0; inline virtual bool setNull(void* value) = 0; + inline virtual bool getNull(void* value) = 0; inline virtual bool isNull(void* value) const = 0; inline virtual RetCode CheckSet(string& str) const = 0; @@ -331,6 +346,12 @@ class OperateInt : public Operate { return true; } + inline bool getNull(void* value) { + if (this->nullable == false) return false; + value = &null_int_value; + return true; + } + inline bool isNull(void* value) const { if (this->nullable == true && (*(int*)value) == NULL_INT) return true; return false; @@ -408,6 +429,12 @@ class OperateFloat : public Operate { return true; } + inline bool getNull(void* value) { + if (this->nullable == false) return false; + value = &null_float_value; + return true; + } + inline bool isNull(void* value) const { if (this->nullable == true && (*(int*)value) == (int)NULL_FLOAT) return true; @@ -484,6 +511,12 @@ class OperateDouble : public Operate { return true; } + inline bool getNull(void* value) { + if (this->nullable == false) return false; + value = &null_double_value; + return true; + } + inline bool isNull(void* value) const { if (this->nullable == true && (*(int*)value) == (int)NULL_DOUBLE) return true; @@ -560,6 +593,12 @@ class OperateULong : public Operate { return true; } + inline bool getNull(void* value) { + if (this->nullable == false) return false; + value = &null_u_long_value; + return true; + } + inline bool isNull(void* value) const { if (this->nullable == true && (*(unsigned long*)value) == NULL_U_LONG) return true; @@ -648,6 +687,12 @@ class OperateString : public Operate { return true; } + inline bool getNull(void* value) { + if (this->nullable == false) return false; + value = &null_boolean_value; + return true; + } + inline bool isNull(void* value) const { if (this->nullable == true && (*(char*)value) == NULL_STRING) return true; return false; @@ -749,6 +794,12 @@ class OperateDate : public Operate { return true; } + inline bool getNull(void* value) { + if (this->nullable == false) return false; + value = &null_date_value; + return true; + } + inline bool isNull(void* value) const { if (this->nullable == true && (*(date*)value).is_neg_infinity() == true) return true; @@ -832,6 +883,13 @@ class OperateTime : public Operate { return true; } + inline bool getNull(void* value) { + if (this->nullable == false) return false; + time_duration null_time_value(NULL_TIME); + value = &null_time_value; + return true; + } + inline bool isNull(void* value) const { if (this->nullable == true && (*(time_duration*)value).is_neg_infinity() == true) @@ -919,6 +977,12 @@ class OperateDatetime : public Operate { return true; } + inline bool getNull(void* value) { + if (this->nullable == false) return false; + value = &null_datetime_value; + return true; + } + inline bool isNull(void* value) const { if (this->nullable == true && (*(ptime*)value).is_neg_infinity() == true) return true; @@ -1000,6 +1064,12 @@ class OperateSmallInt : public Operate { return true; } + inline bool getNull(void* value) { + if (this->nullable == false) return false; + value = &null_small_int_value; + return true; + } + inline bool isNull(void* value) const { if (this->nullable == true && (*(short*)value) == NULL_SMALL_INT) return true; @@ -1079,6 +1149,12 @@ class OperateUSmallInt : public Operate { return true; } + inline bool getNull(void* value) { + if (this->nullable == false) return false; + value = &null_u_small_int_value; + return true; + } + inline bool isNull(void* value) const { if (this->nullable == true && (*(unsigned short*)value) == NULL_U_SMALL_INT) return true; @@ -1091,9 +1167,9 @@ class OperateUSmallInt : public Operate { class OperateDecimal : public Operate { public: OperateDecimal(int p = 10, int s = 0, bool nullable = true) { - // assert(size > 1000); + // assert(size > 1000); assign = assigns; - // this->size = size; + // this->size = size; this->nullable = nullable; this->precision_ = p; this->scale_ = s; @@ -1103,30 +1179,11 @@ class OperateDecimal : public Operate { *(Decimal*)desc = *(Decimal*)src; } inline std::string toString(void* value) { - if (this->nullable == true && ((Decimal*)value)->isNull()) return "NULL"; - /* - char buf[43] = {"\0"}; - ExportSerializeOutput out(buf, 43); - ((NValue*)value)->serializeToExport(out, &size); - return std::string(buf + 4); - */ - return ((Decimal*)value)->ToString(((Decimal*)value)->GetScale()); + return ((Decimal*)value)->toString(this->scale_); }; - - /* - static std::string toString(const NValue v, unsigned n_o_d_d = 12) { - // if (this->nullable == true && compare(v, (void*)(&NULL_DECIMAL)) - //== - // 0) - // return "NULL"; - char buf[43] = {"\0"}; - ExportSerializeOutput out(buf, 43); - (v).serializeToExport(out, &n_o_d_d); - return std::string(buf + 4); - } - */ void toValue(void* target, const char* str) { - if (((strcmp(str, "") == 0)||(strcmp(str, "NULL") == 0)) && this->nullable == true) + if (((strcmp(str, "") == 0) || (strcmp(str, "NULL") == 0)) && + this->nullable == true) *(Decimal*)target = Decimal::CreateNullDecimal(); else *(Decimal*)target = Decimal(precision_, scale_, str); @@ -1209,6 +1266,12 @@ class OperateDecimal : public Operate { return true; } + inline bool getNull(void* value) { + if (this->nullable == false) return false; + value = &null_decimal_value; + return true; + } + inline bool isNull(void* value) const { if (this->nullable == true && ((Decimal*)value)->isNull()) return true; return false; @@ -1221,7 +1284,10 @@ class OperateDecimal : public Operate { */ RetCode CheckSet(string& str) const; void SetDefault(string& str) const { str = string("0"); } - + /* +private: + //bool CheckStr(string str) const; +*/ private: int precision_; int scale_; @@ -1300,6 +1366,12 @@ class OperateBool : public Operate { return true; } + inline bool getNull(void* value) { + if (this->nullable == false) return false; + value = &null_boolean_value; + return true; + } + inline bool isNull(void* value) const { if (this->nullable == true && (*(int*)value) == NULL_SMALL_INT) return true; return false; @@ -1329,6 +1401,7 @@ class column_type { } column_type() : operate(0){}; ~column_type() { + // cout << "call the destruct in columns_type" << endl; delete operate; operate = 0; } @@ -1417,7 +1490,7 @@ class column_type { operate = new OperateDatetime(nullable); break; case t_decimal: - operate = new OperateDecimal(size/1000, size%1000, nullable); + operate = new OperateDecimal(size / 1000, size % 1000, nullable); break; case t_smallInt: operate = new OperateSmallInt(nullable); diff --git a/common/error_define.h b/common/error_define.h index 9609d5105..046f78f24 100644 --- a/common/error_define.h +++ b/common/error_define.h @@ -59,6 +59,16 @@ typedef int RetCode; // means return code } \ } while (0) +#define EXEC_AND_LOG_RETURN(ret, f, info, err_info) \ + do { \ + if (rSuccess == (ret = f)) { \ + LOG(INFO) << info << std::endl; \ + } else { \ + ELOG(ret, err_info) \ + return ret; \ + } \ + } while (0) + #define EXEC_AND_PLOG(ret, f, info, err_info) \ do { \ if (rSuccess == (ret = f)) { \ @@ -189,6 +199,9 @@ const int rAccessDiskFileFail = -98; const int rAccessHdfsFileFail = -99; const int rNoMemory = -100; const int rDataPathError = -101; +const int rFileInUsing = -102; + +const int rResourceIsLocked = -103; // schema associated const int rEmptyAttributeName = -501; @@ -255,6 +268,19 @@ const int rCodegenFailed = const int rCatalogRestoreInvild = -5004; const int rCatalogNotFound = -5005; +/* errorno for storage -6001 ~ -7000 */ +const int rMemoryPoolMallocFail = -6001; +const int rNoEnoughMemory = -6002; +const int rReturnFailFindTargetChunkId = -6003; +const int rUnkownStroageLevel = -6004; +const int rFailOpenFileInDiskChunkReaderIterator = -6005; +const int rFailReadOneBlockInDiskChunkReaderIterator = -6006; +const int rFailOpenHDFSFileInStorage = -6007; +const int rFailSetStartOffsetInStorage = -6008; +const int rLoadFromHdfsOpenFailed = -6009; +const int rUnbindPartitionFailed = -6010; +const int rLoadFromDiskOpenFailed = -6011; + /* errorno for stmt_handler -14001 ~ -15000*/ const int rStmtHandlerCreateTableExisted = -14002; const int rStmtHandlerTypeNotSupport = -14003; @@ -307,6 +333,17 @@ const int rLimitParaCouldnotLessZero = -14137; const int rLimitParaShouldNaturalNumber = -14138; const int rCreateProjectionOnDelTableFailed = -14138; const int rNULLDropTableName = -14139; +const int rStmtCancelled = -14140; +const int rUnknowStmtType = -14141; + +// for node_manager +const int rConRemoteActorError = -14300; +const int rRegisterToMasterTimeOut = -14301; +const int rRegisterToMasterError = -14302; +// for exec_tracker +const int rCouldnotFindCancelQueryId = -14400; +const int rNetworkError = -14401; +const int rSendingTimeout = -14402; } // end namespace common } // end namespace claims diff --git a/common/error_no.cpp b/common/error_no.cpp index 607c949f2..26a7d4d78 100644 --- a/common/error_no.cpp +++ b/common/error_no.cpp @@ -156,6 +156,8 @@ ErrorInit::ErrorInit() { DefineErrorAndMessage(rNoMemory, "failed to allocate memory from system"); DefineErrorAndMessage(rDataPathError, "Data path not exist"); + DefineErrorAndMessage(rFileInUsing, "Someone is still using this file"); + DefineErrorAndMessage(rResourceIsLocked, "other hold the lock of resource"); // schema assocated DefineErrorAndMessage(rEmptyAttributeName, @@ -200,7 +202,7 @@ ErrorInit::ErrorInit() { /* errorno for loader -2001 ~ -3000 */ DefineErrorAndMessage(rUnbindEntireProjectionFail, "failed to unbind entire projection"); - DefineErrorAndMessage(rInvalidInsertData,"The Insert Data is Invalid"); + DefineErrorAndMessage(rInvalidInsertData, "The Insert Data is Invalid"); DefineErrorAndMessage(rTooLargeData, "Load Too Large Data"); DefineErrorAndMessage(rTooSmallData, "Load Too Small Data"); DefineErrorAndMessage(rTooLongData, "Load Too Long Data"); @@ -232,10 +234,25 @@ ErrorInit::ErrorInit() { DefineErrorAndMessage(rNoProjection, "No Projection on this table."); DefineErrorAndMessage(rLimitNotStandardized, "limit not standard."); DefineErrorAndMessage(rLimitZero, "limit zero."); - DefineErrorAndMessage(rLimitParaCouldnotLessZero, - "limit parameter couldn't zero."); - DefineErrorAndMessage(rLimitParaShouldNaturalNumber, - "limit parameter should be natural number."); + + /*errorno for the layor of storage -6001 ~ -7000 */ + + DefineErrorAndMessage(rMemoryPoolMallocFail, + "fail to malloc for the pool memory."); + DefineErrorAndMessage(rNoEnoughMemory, "not enough memory!!"); + DefineErrorAndMessage(rReturnFailFindTargetChunkId, + "fail to find the target chunk id !"); + DefineErrorAndMessage(rUnkownStroageLevel, "current storage level: unknown!"); + DefineErrorAndMessage(rFailOpenFileInDiskChunkReaderIterator, + "Failed to open file"); + DefineErrorAndMessage(rFailReadOneBlockInDiskChunkReaderIterator, + "failed to read one block,only a little bytes "); + DefineErrorAndMessage(rFailOpenHDFSFileInStorage, "fails to open HDFS file"); + DefineErrorAndMessage(rFailSetStartOffsetInStorage, + "fails to set the start offset"); + DefineErrorAndMessage(rLoadFromHdfsOpenFailed, "Fail to open file from Hdfs"); + DefineErrorAndMessage(rLoadFromDiskOpenFailed, "Fail to open file from Disk"); + DefineErrorAndMessage(rUnbindPartitionFailed, "Fail to unbinding partition"); /* errorno for stmt_handler -14001 ~ -15000*/ DefineErrorAndMessage(rStmtHandlerCreateTableExisted, @@ -296,6 +313,23 @@ ErrorInit::ErrorInit() { DefineErrorAndMessage(rTableNotExisted, "table not exist"); DefineErrorAndMessage(rNULLDropTableName, "The table name in the drop table statment is NULL"); + DefineErrorAndMessage(rLimitParaCouldnotLessZero, + "limit parameter couldn't zero."); + DefineErrorAndMessage(rLimitParaShouldNaturalNumber, + "limit parameter should be natural number."); + DefineErrorAndMessage(rStmtCancelled, "this stmt is cancelled"); + DefineErrorAndMessage(rUnknowStmtType, "the type of the stmt doesn't know"); + + DefineErrorAndMessage(rConRemoteActorError, "connecting remote actor failed"); + DefineErrorAndMessage(rRegisterToMasterTimeOut, "register to master timeout"); + DefineErrorAndMessage(rRegisterToMasterError, "register to master error"); + + DefineErrorAndMessage(rCouldnotFindCancelQueryId, + "couldn't the query id to be cancelled"); + + DefineErrorAndMessage(rNetworkError, "Network error when sending message!"); + + DefineErrorAndMessage(rSendingTimeout, "sending info timeout!"); // std::cout<args_num_ == 1); + *(bool *)fcinfo->result_ = (*(int *)fcinfo->args_[0] == NULL_INT); +} +inline void int_is_not_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = (*(int *)fcinfo->args_[0] != NULL_INT); +} /*******************int*************************/ @@ -354,6 +362,16 @@ inline void u_long_agg_max(OperFuncInfo fcinfo) { } inline void u_long_agg_sum(OperFuncInfo fcinfo) { u_long_add(fcinfo); } inline void u_long_agg_count(OperFuncInfo fcinfo) { u_long_add(fcinfo); } +inline void u_long_is_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = + (*(unsigned long *)fcinfo->args_[0] == NULL_U_LONG); +} +inline void u_long_is_not_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = + (*(unsigned long *)fcinfo->args_[0] != NULL_U_LONG); +} /*******************u_long*************************/ /*******************float*************************/ @@ -433,7 +451,14 @@ inline void float_agg_min(OperFuncInfo fcinfo) { } inline void float_agg_sum(OperFuncInfo fcinfo) { float_add(fcinfo); } inline void float_agg_count(OperFuncInfo fcinfo) { float_add(fcinfo); } - +inline void float_is_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = ((*(float *)fcinfo->args_[0]) == NULL_FLOAT); +} +inline void float_is_not_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = ((*(float *)fcinfo->args_[0]) != NULL_FLOAT); +} /*******************float*************************/ /*******************double*************************/ @@ -515,7 +540,14 @@ inline void double_agg_min(OperFuncInfo fcinfo) { } inline void double_agg_sum(OperFuncInfo fcinfo) { double_add(fcinfo); } inline void double_agg_count(OperFuncInfo fcinfo) { double_add(fcinfo); } - +inline void double_is_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = ((*(double *)fcinfo->args_[0]) == NULL_DOUBLE); +} +inline void double_is_not_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = ((*(double *)fcinfo->args_[0]) != NULL_DOUBLE); +} /*******************double*************************/ /*******************smallInt*************************/ @@ -606,7 +638,14 @@ inline void smallInt_agg_min(OperFuncInfo fcinfo) { } inline void smallInt_agg_sum(OperFuncInfo fcinfo) { smallInt_add(fcinfo); } inline void smallInt_agg_count(OperFuncInfo fcinfo) { smallInt_add(fcinfo); } - +inline void smallInt_is_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = ((*(short *)fcinfo->args_[0]) == NULL_SMALL_INT); +} +inline void smallInt_is_not_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = ((*(short *)fcinfo->args_[0]) != NULL_SMALL_INT); +} /*******************smallInt*************************/ /*****************boolean********************/ @@ -660,6 +699,15 @@ inline void boolean_less_equal(OperFuncInfo fcinfo) { *(bool *)fcinfo->result_ = (*(bool *)fcinfo->args_[0] <= *(bool *)fcinfo->args_[1]); } +inline void boolean_is_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = ((*(short *)fcinfo->args_[0]) == NULL_BOOLEAN); +} +inline void boolean_is_not_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = ((*(short *)fcinfo->args_[0]) != NULL_BOOLEAN); +} + /*****************boolean********************/ /*****************decimal********************/ @@ -713,29 +761,19 @@ inline void decimal_less_equal(OperFuncInfo fcinfo) { *(bool *)fcinfo->result_ = (*(Decimal *)fcinfo->args_[0]) .op_less_equals(*(Decimal *)fcinfo->args_[1]); } + +static Decimal neg(1, 0, "-1"); + inline void decimal_negative(OperFuncInfo fcinfo) { assert(fcinfo->args_num_ == 1); - if((*(Decimal *)fcinfo->args_[0]).isNull()) - *(Decimal *)fcinfo->result_ = *(Decimal *)fcinfo->args_[0]; - else - { - Decimal neg(1, 0, "-1"); - *(Decimal *)fcinfo->result_ = (*(Decimal *)fcinfo->args_[0]).op_multiply(neg); - } + *(Decimal *)fcinfo->result_ = (*(Decimal *)fcinfo->args_[0]).op_multiply(neg); } inline void decimal_agg_max(OperFuncInfo fcinfo) { assert(fcinfo->args_num_ == 2); - if((*(Decimal *)fcinfo->args_[1]).isNull()||(*(Decimal *)fcinfo->args_[0]).isNull()) - { - *(Decimal *)fcinfo->result_ = (*(Decimal *)fcinfo->args_[1]).isNull()?(*(Decimal *)fcinfo->args_[0]):(*(Decimal *)fcinfo->args_[1]); - } - else - { *(Decimal *)fcinfo->result_ = (*(Decimal *)fcinfo->args_[0]).op_great(*(Decimal *)fcinfo->args_[1]) ? (*(Decimal *)fcinfo->args_[0]) : (*(Decimal *)fcinfo->args_[1]); - } } inline void decimal_agg_min(OperFuncInfo fcinfo) { assert(fcinfo->args_num_ == 2); @@ -746,6 +784,14 @@ inline void decimal_agg_min(OperFuncInfo fcinfo) { } inline void decimal_agg_sum(OperFuncInfo fcinfo) { decimal_add(fcinfo); } inline void decimal_agg_count(OperFuncInfo fcinfo) { decimal_add(fcinfo); } +inline void decimal_is_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = (((Decimal *)fcinfo->args_[0])->isNull()); +} +inline void decimal_is_not_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = !(((Decimal *)fcinfo->args_[0])->isNull()); +} /*****************decimal********************/ /*****************string********************/ @@ -847,9 +893,17 @@ inline void string_substring(OperFuncInfo fcinfo) { assert(fcinfo->args_num_ == 3); strncpy((char *)fcinfo->result_, ((char *)fcinfo->args_[0]) + (*(int *)fcinfo->args_[1]), - (*(int *)fcinfo->args_[2]) - (*(int *)fcinfo->args_[1])); + (*(int *)fcinfo->args_[2])); + *((char *)fcinfo->result_ + (*(int *)fcinfo->args_[2])) = '\0'; +} +inline void string_is_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = (*(char *)fcinfo->args_[0]) == NULL_STRING; +} +inline void string_is_not_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = (*(char *)fcinfo->args_[0]) != NULL_STRING; } - /*****************string********************/ /*****************date********************/ @@ -941,7 +995,14 @@ inline void date_agg_min(OperFuncInfo fcinfo) { ? *(date *)fcinfo->args_[0] : *(date *)fcinfo->args_[1]; } - +inline void date_is_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = ((date *)fcinfo->args_[0])->is_infinity(); +} +inline void date_is_not_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = !(((date *)fcinfo->args_[0])->is_infinity()); +} /*****************date********************/ /*****************time********************/ @@ -990,6 +1051,16 @@ inline void time_agg_min(OperFuncInfo fcinfo) { ? *(time_duration *)fcinfo->args_[0] : *(time_duration *)fcinfo->args_[1]; } +inline void time_is_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = ((time_duration *)fcinfo->args_[0])->is_negative(); +} +inline void time_is_not_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = + !(((time_duration *)fcinfo->args_[0])->is_negative()); +} + /*****************time********************/ /*****************datetime********************/ @@ -1037,6 +1108,14 @@ inline void datetime_agg_min(OperFuncInfo fcinfo) { ? *(ptime *)fcinfo->args_[0] : *(ptime *)fcinfo->args_[1]; } +inline void datetime_is_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = ((ptime *)fcinfo->args_[0])->is_infinity(); +} +inline void datetime_is_not_null(OperFuncInfo fcinfo) { + assert(fcinfo->args_num_ == 1); + *(bool *)fcinfo->result_ = !(((ptime *)fcinfo->args_[0])->is_infinity()); +} /*****************datetime********************/ inline void InitOperatorFunc() { for (int i = 0; i < DATA_TYPE_NUM; i++) @@ -1063,6 +1142,9 @@ inline void InitOperatorFunc() { DataTypeOper::data_type_oper_func_[t_int][oper_min] = int_agg_min; DataTypeOper::data_type_oper_func_[t_int][oper_agg_sum] = int_agg_sum; DataTypeOper::data_type_oper_func_[t_int][oper_agg_count] = int_agg_count; + DataTypeOper::data_type_oper_func_[t_int][oper_is_null] = int_is_null; + DataTypeOper::data_type_oper_func_[t_int][oper_is_not_null] = int_is_not_null; + /*****************int********************/ /*****************ulong********************/ @@ -1091,6 +1173,10 @@ inline void InitOperatorFunc() { DataTypeOper::data_type_oper_func_[t_u_long][oper_agg_sum] = u_long_agg_sum; DataTypeOper::data_type_oper_func_[t_u_long][oper_agg_count] = u_long_agg_count; + + DataTypeOper::data_type_oper_func_[t_u_long][oper_is_null] = u_long_is_null; + DataTypeOper::data_type_oper_func_[t_u_long][oper_is_not_null] = + u_long_is_not_null; /*****************ulong********************/ /*****************float********************/ @@ -1116,6 +1202,10 @@ inline void InitOperatorFunc() { DataTypeOper::data_type_oper_func_[t_float][oper_min] = float_agg_min; DataTypeOper::data_type_oper_func_[t_float][oper_agg_sum] = float_agg_sum; DataTypeOper::data_type_oper_func_[t_float][oper_agg_count] = float_agg_count; + DataTypeOper::data_type_oper_func_[t_float][oper_is_null] = float_is_null; + DataTypeOper::data_type_oper_func_[t_float][oper_is_not_null] = + float_is_not_null; + /*****************float********************/ /*****************double********************/ @@ -1144,6 +1234,9 @@ inline void InitOperatorFunc() { DataTypeOper::data_type_oper_func_[t_double][oper_agg_sum] = double_agg_sum; DataTypeOper::data_type_oper_func_[t_double][oper_agg_count] = double_agg_count; + DataTypeOper::data_type_oper_func_[t_double][oper_is_null] = double_is_null; + DataTypeOper::data_type_oper_func_[t_double][oper_is_not_null] = + double_is_not_null; /*****************double********************/ /*****************smallInt********************/ @@ -1174,6 +1267,10 @@ inline void InitOperatorFunc() { smallInt_agg_sum; DataTypeOper::data_type_oper_func_[t_smallInt][oper_agg_count] = smallInt_agg_count; + DataTypeOper::data_type_oper_func_[t_smallInt][oper_is_null] = + smallInt_is_null; + DataTypeOper::data_type_oper_func_[t_smallInt][oper_is_not_null] = + smallInt_is_not_null; /*****************smallInt********************/ /*****************boolean********************/ @@ -1198,7 +1295,9 @@ inline void InitOperatorFunc() { boolean_less_equal; DataTypeOper::data_type_oper_func_[t_boolean][oper_negative] = oper_not_support; - + DataTypeOper::data_type_oper_func_[t_boolean][oper_is_null] = boolean_is_null; + DataTypeOper::data_type_oper_func_[t_boolean][oper_is_not_null] = + boolean_is_not_null; /*****************boolean********************/ /*****************decimal********************/ @@ -1229,6 +1328,9 @@ inline void InitOperatorFunc() { DataTypeOper::data_type_oper_func_[t_decimal][oper_agg_sum] = decimal_agg_sum; DataTypeOper::data_type_oper_func_[t_decimal][oper_agg_count] = decimal_agg_count; + DataTypeOper::data_type_oper_func_[t_decimal][oper_is_null] = decimal_is_null; + DataTypeOper::data_type_oper_func_[t_decimal][oper_is_not_null] = + decimal_is_not_null; /*****************decimal********************/ /*****************string********************/ @@ -1263,7 +1365,9 @@ inline void InitOperatorFunc() { DataTypeOper::data_type_oper_func_[t_string][oper_upper] = string_upper; DataTypeOper::data_type_oper_func_[t_string][oper_substring] = string_substring; - + DataTypeOper::data_type_oper_func_[t_string][oper_is_null] = string_is_null; + DataTypeOper::data_type_oper_func_[t_string][oper_is_not_null] = + string_is_not_null; /*****************string********************/ /*****************date********************/ @@ -1301,6 +1405,9 @@ inline void InitOperatorFunc() { date_sub_year; DataTypeOper::data_type_oper_func_[t_date][oper_max] = date_agg_max; DataTypeOper::data_type_oper_func_[t_date][oper_min] = date_agg_min; + DataTypeOper::data_type_oper_func_[t_date][oper_is_null] = date_is_null; + DataTypeOper::data_type_oper_func_[t_date][oper_is_not_null] = + date_is_not_null; /*****************date********************/ /*****************time********************/ @@ -1313,6 +1420,9 @@ inline void InitOperatorFunc() { DataTypeOper::data_type_oper_func_[t_time][oper_less_equal] = time_less_equal; DataTypeOper::data_type_oper_func_[t_time][oper_max] = time_agg_max; DataTypeOper::data_type_oper_func_[t_time][oper_min] = time_agg_min; + DataTypeOper::data_type_oper_func_[t_time][oper_is_null] = time_is_null; + DataTypeOper::data_type_oper_func_[t_time][oper_is_not_null] = + time_is_not_null; /*****************time********************/ /*****************datetime********************/ @@ -1327,6 +1437,10 @@ inline void InitOperatorFunc() { datetime_less_equal; DataTypeOper::data_type_oper_func_[t_datetime][oper_max] = datetime_agg_max; DataTypeOper::data_type_oper_func_[t_datetime][oper_min] = datetime_agg_min; + DataTypeOper::data_type_oper_func_[t_datetime][oper_is_null] = + datetime_is_null; + DataTypeOper::data_type_oper_func_[t_datetime][oper_is_not_null] = + datetime_is_not_null; /*****************datetime********************/ } inline void avg_error_divide(void *sum_value, int64_t tuple_number, @@ -1363,7 +1477,7 @@ inline void avg_decimal_divide(void *sum_value, int64_t tuple_number, *(Decimal *)result = *(Decimal *)sum_value; stringstream ss; ss << tuple_number; - Decimal tn(CLAIMS_COMMON_DECIMAL_PSUBS, 0, ss.str()); + Decimal tn(DECIMAL_PSUBS, 0, ss.str()); *(Decimal *)result = (*(Decimal *)result).op_divide(tn); } diff --git a/common/expression/expr_binary.cpp b/common/expression/expr_binary.cpp index ad4e71757..0200a41b7 100644 --- a/common/expression/expr_binary.cpp +++ b/common/expression/expr_binary.cpp @@ -35,22 +35,22 @@ ExprBinary::ExprBinary(ExprBinary* expr) data_type_oper_func_(expr->data_type_oper_func_), arg0_(expr->arg0_->ExprCopy()), arg1_(expr->arg1_->ExprCopy()) {} -void* ExprBinary::ExprEvaluate(void* tuple, Schema* schema) { + +void* ExprBinary::ExprEvaluate(ExprEvalCnxt& eecnxt) { OperFuncInfoData oper_info; - oper_info.args_[0] = arg0_->ExprEvaluate(tuple, schema); - oper_info.args_[1] = arg1_->ExprEvaluate(tuple, schema); + oper_info.args_[0] = arg0_->ExprEvaluate(eecnxt); + oper_info.args_[1] = arg1_->ExprEvaluate(eecnxt); oper_info.args_num_ = 2; oper_info.result_ = value_; data_type_oper_func_(&oper_info); return type_cast_func_(oper_info.result_, value_); } - -void ExprBinary::InitExprAtLogicalPlan( - data_type return_type, const std::map& column_index, - Schema* schema) { - return_type_ = return_type; - arg0_->InitExprAtLogicalPlan(get_type_, column_index, schema); - arg1_->InitExprAtLogicalPlan(get_type_, column_index, schema); +void ExprBinary::InitExprAtLogicalPlan(LogicInitCnxt& licnxt) { + return_type_ = licnxt.return_type_; + licnxt.return_type_ = get_type_; + arg0_->InitExprAtLogicalPlan(licnxt); + licnxt.return_type_ = get_type_; + arg1_->InitExprAtLogicalPlan(licnxt); value_size_ = std::max(arg0_->value_size_, arg1_->value_size_); is_null_ = (arg0_->is_null_ || arg1_->is_null_); } diff --git a/common/expression/expr_binary.h b/common/expression/expr_binary.h index 0397d7c61..46c235e25 100644 --- a/common/expression/expr_binary.h +++ b/common/expression/expr_binary.h @@ -36,10 +36,10 @@ class ExprBinary : public ExprNode { delete arg0_; delete arg1_; } - void* ExprEvaluate(void* tuple, Schema* schema); - void InitExprAtLogicalPlan(data_type return_type, - const std::map& column_index, - Schema* schema); + void* ExprEvaluate(ExprEvalCnxt& eecnxt); + + void InitExprAtLogicalPlan(LogicInitCnxt& licnxt); + void InitExprAtPhysicalPlan(); ExprNode* ExprCopy(); diff --git a/common/expression/expr_case_when.cpp b/common/expression/expr_case_when.cpp index 77cd51221..45be44571 100644 --- a/common/expression/expr_case_when.cpp +++ b/common/expression/expr_case_when.cpp @@ -34,32 +34,31 @@ ExprCaseWhen::ExprCaseWhen(ExprCaseWhen* expr) case_then_[i] = case_then_[i]->ExprCopy(); } } -void* ExprCaseWhen::ExprEvaluate(void* tuple, Schema* schema) { + +void* ExprCaseWhen::ExprEvaluate(ExprEvalCnxt& eecnxt) { ExprNode* then = case_then_[case_then_.size() - 1]; void* result; for (int i = 0; i < case_when_.size(); i++) { - if (*static_cast(case_when_[i]->ExprEvaluate(tuple, schema)) == - true) { + if (*static_cast(case_when_[i]->ExprEvaluate(eecnxt)) == true) { then = case_then_[i]; break; } } // case_then_ shouldn't be NULL, checked before - result = then->ExprEvaluate(tuple, schema); + result = then->ExprEvaluate(eecnxt); return type_cast_func_(result, value_); } -void ExprCaseWhen::InitExprAtLogicalPlan( - data_type return_type, const std::map& column_index, - Schema* schema) { - return_type_ = return_type; +void ExprCaseWhen::InitExprAtLogicalPlan(LogicInitCnxt& licnxt) { + return_type_ = licnxt.return_type_; value_size_ = 0; is_null_ = false; for (int i = 0; i < case_when_.size(); i++) { - case_when_[i]->InitExprAtLogicalPlan(t_boolean, column_index, schema); + licnxt.return_type_ = t_boolean; + case_when_[i]->InitExprAtLogicalPlan(licnxt); } for (int i = 0; i < case_then_.size(); i++) { - case_then_[i]->InitExprAtLogicalPlan(case_then_[i]->get_type_, column_index, - schema); + licnxt.return_type_ = case_then_[i]->get_type_; + case_then_[i]->InitExprAtLogicalPlan(licnxt); value_size_ = std::max(value_size_, case_then_[i]->value_size_); is_null_ = (is_null_ || case_then_[i]->is_null_); } diff --git a/common/expression/expr_case_when.h b/common/expression/expr_case_when.h index bd25202e2..5f987792b 100644 --- a/common/expression/expr_case_when.h +++ b/common/expression/expr_case_when.h @@ -38,10 +38,11 @@ class ExprCaseWhen : public ExprNode { case_when_.clear(); case_then_.clear(); } - void* ExprEvaluate(void* tuple, Schema* schema); - void InitExprAtLogicalPlan(data_type return_type, - const std::map& column_index, - Schema* schema); + + void* ExprEvaluate(ExprEvalCnxt& eecnxt); + + void InitExprAtLogicalPlan(LogicInitCnxt& licnxt); + void InitExprAtPhysicalPlan(); ExprNode* ExprCopy(); diff --git a/common/expression/expr_column.cpp b/common/expression/expr_column.cpp index 6eb14c6f7..c497f8a22 100644 --- a/common/expression/expr_column.cpp +++ b/common/expression/expr_column.cpp @@ -17,6 +17,7 @@ #include #include +#include "./expr_node.h" #include "./expr_type_cast.h" using std::string; namespace claims { @@ -36,33 +37,50 @@ ExprColumn::ExprColumn(ExprColumn* expr) table_id_(expr->table_id_), table_name_(expr->table_name_), column_name_(expr->column_name_) {} -void* ExprColumn::ExprEvaluate(void* tuple, Schema* schema) { - void* result = schema->getColumnAddess(attr_id_, tuple); + +void* ExprColumn::ExprEvaluate(ExprEvalCnxt& eecnxt) { + void* result = eecnxt.schema[table_id_]->getColumnAddess( + attr_id_, eecnxt.tuple[table_id_]); return type_cast_func_(result, value_); } - -void ExprColumn::InitExprAtLogicalPlan( - data_type return_type, const std::map& column_index, - Schema* schema) { - return_type_ = return_type; - auto it = column_index.find(table_name_ + "." + column_name_); - if (it != column_index.end()) { +// checking the column belongs to witch table +void ExprColumn::InitExprAtLogicalPlan(LogicInitCnxt& licnxt) { + return_type_ = licnxt.return_type_; + auto it = licnxt.column_id0_.find(table_name_ + "." + column_name_); + if (it != licnxt.column_id0_.end()) { attr_id_ = it->second; + table_id_ = 0; + if (return_type_ == t_string) { + value_size_ = std::max(licnxt.schema0_->getcolumn(attr_id_).get_length(), + static_cast(BASE_DATA_SIZE)); + } else if (return_type_ == t_decimal) { + value_size_ = licnxt.schema0_->getcolumn(attr_id_).size; + } else { + value_size_ = licnxt.schema0_->getcolumn(attr_id_).get_length(); + } + is_null_ = false; } else { - LOG(ERROR) << "[ " << table_name_ + "." + column_name_ - << " ] doesn't exist in column_index_map during initalize " - "column at logical plan" << endl; - assert(false); - } - - // now column_name_ like A.a, but may be change to a. - if (return_type_ == t_string) { - value_size_ = std::max(schema->getcolumn(attr_id_).get_length(), - static_cast(BASE_DATA_SIZE)); - } else { - value_size_ = schema->getcolumn(attr_id_).get_length(); + auto it = licnxt.column_id1_.find(table_name_ + "." + column_name_); + if (it != licnxt.column_id1_.end()) { + attr_id_ = it->second; + table_id_ = 1; + if (return_type_ == t_string) { + value_size_ = + std::max(licnxt.schema1_->getcolumn(attr_id_).get_length(), + static_cast(BASE_DATA_SIZE)); + } else if (return_type_ == t_decimal) { + value_size_ = licnxt.schema1_->getcolumn(attr_id_).size; + } else { + value_size_ = licnxt.schema1_->getcolumn(attr_id_).get_length(); + } + is_null_ = false; + } else { + LOG(ERROR) << "[ " << table_name_ + "." + column_name_ + << " ] doesn't exist in column_id_map during initalize " + "column at logical plan" << endl; + assert(false); + } } - is_null_ = false; } void ExprColumn::InitExprAtPhysicalPlan() { diff --git a/common/expression/expr_column.h b/common/expression/expr_column.h index 7ce3a061a..49c3f442b 100644 --- a/common/expression/expr_column.h +++ b/common/expression/expr_column.h @@ -30,10 +30,10 @@ class ExprColumn : public ExprNode { explicit ExprColumn(ExprColumn* expr_column); ExprColumn() {} ~ExprColumn() {} - void* ExprEvaluate(void* tuple, Schema* schema); - void InitExprAtLogicalPlan(data_type return_type, - const std::map& column_index, - Schema* schema); + void* ExprEvaluate(ExprEvalCnxt& eecnxt); + + void InitExprAtLogicalPlan(LogicInitCnxt& licnxt); + void InitExprAtPhysicalPlan(); ExprNode* ExprCopy(); diff --git a/common/expression/expr_const.cpp b/common/expression/expr_const.cpp index be6129ef4..08d7dca77 100644 --- a/common/expression/expr_const.cpp +++ b/common/expression/expr_const.cpp @@ -23,15 +23,14 @@ ExprConst::ExprConst(ExprNodeType expr_node_type, data_type actual_type, : ExprNode(expr_node_type, actual_type, alias), const_value_(const_value) {} ExprConst::ExprConst(ExprConst* expr) : ExprNode(expr), const_value_(expr->const_value_) {} -void* ExprConst::ExprEvaluate(void* tuple, Schema* schema) { return value_; } +void* ExprConst::ExprEvaluate(ExprEvalCnxt& eecnxt) { return value_; } -void ExprConst::InitExprAtLogicalPlan( - data_type return_type, const std::map& column_index, - Schema* schema) { - return_type_ = return_type; +void ExprConst::InitExprAtLogicalPlan(LogicInitCnxt& licnxt) { + return_type_ = licnxt.return_type_; value_size_ = max(static_cast(const_value_.size()), BASE_DATA_SIZE); is_null_ = false; } + /* * for const, the value has been type_casted at InitExprAtPhysicalPlan(), * so later, just to return the value_ is ok, because this can avoid diff --git a/common/expression/expr_const.h b/common/expression/expr_const.h index b74018544..9ed088d9f 100644 --- a/common/expression/expr_const.h +++ b/common/expression/expr_const.h @@ -28,10 +28,10 @@ class ExprConst : public ExprNode { explicit ExprConst(ExprConst* expr_const); ExprConst() {} ~ExprConst() {} - void* ExprEvaluate(void* tuple, Schema* schema); - void InitExprAtLogicalPlan(data_type return_type, - const std::map& column_index, - Schema* schema); + void* ExprEvaluate(ExprEvalCnxt& eecnxt); + + void InitExprAtLogicalPlan(LogicInitCnxt& licnxt); + void InitExprAtPhysicalPlan(); ExprNode* ExprCopy(); diff --git a/common/expression/expr_date.cpp b/common/expression/expr_date.cpp index 1e7e1453a..09245a748 100644 --- a/common/expression/expr_date.cpp +++ b/common/expression/expr_date.cpp @@ -36,23 +36,23 @@ ExprDate::ExprDate(ExprDate* expr) arg1_(expr->arg1_->ExprCopy()), arg1_return_type_(expr->arg1_return_type_), DataTypeOperFunc_(expr->DataTypeOperFunc_) {} -void* ExprDate::ExprEvaluate(void* tuple, Schema* schema) { + +void* ExprDate::ExprEvaluate(ExprEvalCnxt& eecnxt) { OperFuncInfoData oper_info; - oper_info.args_[0] = arg0_->ExprEvaluate(tuple, schema); - oper_info.args_[1] = arg1_->ExprEvaluate(tuple, schema); + oper_info.args_[0] = arg0_->ExprEvaluate(eecnxt); + oper_info.args_[1] = arg1_->ExprEvaluate(eecnxt); oper_info.args_num_ = 2; oper_info.result_ = value_; DataTypeOperFunc_(&oper_info); return type_cast_func_(oper_info.result_, value_); } -void ExprDate::InitExprAtLogicalPlan( - data_type return_type, const std::map& column_index, - Schema* schema) { - return_type_ = return_type; - // difference from ExprBinary - arg0_->InitExprAtLogicalPlan(actual_type_, column_index, schema); - arg1_->InitExprAtLogicalPlan(arg1_return_type_, column_index, schema); +void ExprDate::InitExprAtLogicalPlan(LogicInitCnxt& licnxt) { + return_type_ = licnxt.return_type_; + licnxt.return_type_ = actual_type_; + arg0_->InitExprAtLogicalPlan(licnxt); + licnxt.return_type_ = arg1_return_type_; + arg1_->InitExprAtLogicalPlan(licnxt); value_size_ = std::max(arg0_->value_size_, arg1_->value_size_); is_null_ = (arg0_->is_null_ || arg1_->is_null_); } diff --git a/common/expression/expr_date.h b/common/expression/expr_date.h index 64492ca2e..4cb1d533a 100644 --- a/common/expression/expr_date.h +++ b/common/expression/expr_date.h @@ -35,10 +35,10 @@ class ExprDate : public ExprNode { delete arg0_; delete arg1_; } - void* ExprEvaluate(void* tuple, Schema* schema); - void InitExprAtLogicalPlan(data_type return_type, - const std::map& column_index, - Schema* schema); + void* ExprEvaluate(ExprEvalCnxt& eecnxt); + + void InitExprAtLogicalPlan(LogicInitCnxt& licnxt); + void InitExprAtPhysicalPlan(); ExprNode* ExprCopy(); diff --git a/common/expression/expr_in.cpp b/common/expression/expr_in.cpp index 3f6db5a15..d5a9e2491 100644 --- a/common/expression/expr_in.cpp +++ b/common/expression/expr_in.cpp @@ -37,44 +37,46 @@ ExprIn::ExprIn(ExprIn* expr) } } -void* ExprIn::ExprItemEvaluate(void* tuple, Schema* schema, - ExprBinary* cmp_expr, ExprNode* right_node) { +void* ExprIn::ExprItemEvaluate(ExprEvalCnxt& eecnxt, ExprBinary* cmp_expr, + ExprNode* right_node) { OperFuncInfoData oper_info; - oper_info.args_[0] = cmp_expr->arg0_->ExprEvaluate(tuple, schema); - oper_info.args_[1] = right_node->ExprEvaluate(tuple, schema); + oper_info.args_[0] = cmp_expr->arg0_->ExprEvaluate(eecnxt); + oper_info.args_[1] = right_node->ExprEvaluate(eecnxt); oper_info.args_num_ = 2; oper_info.result_ = value_; cmp_expr->data_type_oper_func_(&oper_info); // return TypeCastFunc_(oper_info.result_, value_); return oper_info.result_; } -void* ExprIn::ExprEvaluate(void* tuple, Schema* schema) { + +void* ExprIn::ExprEvaluate(ExprEvalCnxt& eecnxt) { bool result = false; bool tmp_result = true; for (int i = 0; i < right_node_.size() && !result; ++i) { tmp_result = true; for (int j = 0; j < right_node_[i].size() && tmp_result; ++j) { tmp_result = *(static_cast( - ExprItemEvaluate(tuple, schema, cmp_expr_[j], right_node_[i][j]))); + ExprItemEvaluate(eecnxt, cmp_expr_[j], right_node_[i][j]))); } result = tmp_result; } return type_cast_func_(&result, value_); } -void ExprIn::InitExprAtLogicalPlan( - data_type return_type, const std::map& column_index, - Schema* schema) { - return_type_ = return_type; +void ExprIn::InitExprAtLogicalPlan(LogicInitCnxt& licnxt) { + return_type_ = licnxt.return_type_; value_size_ = BASE_DATA_SIZE; is_null_ = false; + + licnxt.return_type_ = t_boolean; for (int i = 0; i < cmp_expr_.size(); i++) { - cmp_expr_[i]->InitExprAtLogicalPlan(t_boolean, column_index, schema); + cmp_expr_[i]->InitExprAtLogicalPlan(licnxt); } + for (int i = 0; i < right_node_.size(); i++) { for (int j = 0; j < right_node_[i].size(); j++) { - right_node_[i][j]->InitExprAtLogicalPlan(cmp_expr_[j]->get_type_, - column_index, schema); + licnxt.return_type_ = cmp_expr_[j]->get_type_; + right_node_[i][j]->InitExprAtLogicalPlan(licnxt); } } } diff --git a/common/expression/expr_in.h b/common/expression/expr_in.h index 39ff976a5..1b542c781 100644 --- a/common/expression/expr_in.h +++ b/common/expression/expr_in.h @@ -42,12 +42,13 @@ class ExprIn : public ExprNode { cmp_expr_.clear(); right_node_.clear(); } - void* ExprItemEvaluate(void* tuple, Schema* schema, ExprBinary* cmp_expr, + void* ExprEvaluate(ExprEvalCnxt& eecnxt); + + void InitExprAtLogicalPlan(LogicInitCnxt& licnxt); + + void* ExprItemEvaluate(ExprEvalCnxt& eecnxt, ExprBinary* cmp_expr, ExprNode* right_node); - void* ExprEvaluate(void* tuple, Schema* schema); - void InitExprAtLogicalPlan(data_type return_type, - const std::map& column_index, - Schema* schema); + void InitExprAtPhysicalPlan(); ExprNode* ExprCopy(); diff --git a/common/expression/expr_node.cpp b/common/expression/expr_node.cpp index 3dcd867f0..ef485ee20 100644 --- a/common/expression/expr_node.cpp +++ b/common/expression/expr_node.cpp @@ -49,17 +49,18 @@ ExprNode::ExprNode(ExprNode* expr) is_null_(expr->is_null_), value_size_(expr->value_size_), type_cast_func_(expr->type_cast_func_), - value_(expr->value_) {} + value_(NULL) {} -bool ExprNode::MoreExprEvaluate(vector thread_condi, void* tuple, - Schema* schema) { +bool ExprNode::MoreExprEvaluate(vector thread_condi, + ExprEvalCnxt& eecnxt) { for (int i = 0; i < thread_condi.size(); ++i) { bool result = - *reinterpret_cast(thread_condi[i]->ExprEvaluate(tuple, schema)); + *reinterpret_cast(thread_condi[i]->ExprEvaluate(eecnxt)); if (!result) return false; } return true; } + bool ExprNode::IsEqualAttr(const Attribute& attr) { if (expr_node_type_ == t_qcolcumns) { ExprColumn* column = reinterpret_cast(this); diff --git a/common/expression/expr_node.h b/common/expression/expr_node.h index eda21f82c..a83d97790 100644 --- a/common/expression/expr_node.h +++ b/common/expression/expr_node.h @@ -94,6 +94,23 @@ enum OperType { oper_max, oper_min, oper_agg_count, + oper_is_not_null, + oper_is_null, +}; +struct ExprEvalCnxt { + void* tuple[2]; + Schema* schema[2]; +}; +struct LogicInitCnxt { + LogicInitCnxt() : return_type_(t_int), schema0_(NULL), schema1_(NULL) { + column_id0_.clear(); + column_id1_.clear(); + } + data_type return_type_; + std::map column_id0_; + std::map column_id1_; + Schema* schema0_; // shouldn't be deleted + Schema* schema1_; // shouldn't be deleted }; class ExprNode { public: @@ -101,7 +118,7 @@ class ExprNode { ExprNode(ExprNodeType expr_node_type, data_type actual_type, data_type get_type, string alias); explicit ExprNode(ExprNode* expr); - ExprNode() {} + ExprNode() : value_(NULL) {} virtual ~ExprNode() { if (value_ != NULL) { free(value_); @@ -115,11 +132,12 @@ class ExprNode { bool is_null_; TypeCastFunc type_cast_func_; std::string alias_; - bool MoreExprEvaluate(vector condi, void* tuple, Schema* schema); - virtual void* ExprEvaluate(void* tuple, Schema* schema) { return NULL; } - virtual void InitExprAtLogicalPlan( - data_type return_type, const std::map& column_index, - Schema* schema) {} + bool MoreExprEvaluate(vector condi, ExprEvalCnxt& eecnxt); + + virtual void* ExprEvaluate(ExprEvalCnxt& eecnxt) { return NULL; } + + virtual void InitExprAtLogicalPlan(LogicInitCnxt& licnxt) {} + virtual void InitExprAtPhysicalPlan() {} virtual ExprNode* ExprCopy() { return NULL; } bool IsEqualAttr(const Attribute& attr); diff --git a/common/expression/expr_ternary.cpp b/common/expression/expr_ternary.cpp index 727cf40c4..14eb4a140 100644 --- a/common/expression/expr_ternary.cpp +++ b/common/expression/expr_ternary.cpp @@ -35,24 +35,26 @@ ExprTernary::ExprTernary(ExprTernary* expr) arg0_(expr->arg0_->ExprCopy()), arg1_(expr->arg1_->ExprCopy()), arg2_(expr->arg2_->ExprCopy()) {} -void* ExprTernary::ExprEvaluate(void* tuple, Schema* schema) { + +void* ExprTernary::ExprEvaluate(ExprEvalCnxt& eecnxt) { OperFuncInfoData oper_info; - oper_info.args_[0] = arg0_->ExprEvaluate(tuple, schema); - oper_info.args_[1] = arg1_->ExprEvaluate(tuple, schema); - oper_info.args_[2] = arg2_->ExprEvaluate(tuple, schema); + oper_info.args_[0] = arg0_->ExprEvaluate(eecnxt); + oper_info.args_[1] = arg1_->ExprEvaluate(eecnxt); + oper_info.args_[2] = arg2_->ExprEvaluate(eecnxt); oper_info.args_num_ = 3; oper_info.result_ = value_; data_type_oper_func_(&oper_info); return type_cast_func_(oper_info.result_, value_); } -void ExprTernary::InitExprAtLogicalPlan( - data_type return_type, const std::map& column_index, - Schema* schema) { - return_type_ = return_type; - arg0_->InitExprAtLogicalPlan(arg0_->actual_type_, column_index, schema); - arg1_->InitExprAtLogicalPlan(arg1_->actual_type_, column_index, schema); - arg2_->InitExprAtLogicalPlan(arg2_->actual_type_, column_index, schema); +void ExprTernary::InitExprAtLogicalPlan(LogicInitCnxt& licnxt) { + return_type_ = licnxt.return_type_; + licnxt.return_type_ = arg0_->actual_type_; + arg0_->InitExprAtLogicalPlan(licnxt); + licnxt.return_type_ = arg1_->actual_type_; + arg1_->InitExprAtLogicalPlan(licnxt); + licnxt.return_type_ = arg2_->actual_type_; + arg2_->InitExprAtLogicalPlan(licnxt); value_size_ = std::max(arg0_->value_size_, std::max(arg1_->value_size_, arg2_->value_size_)); is_null_ = (arg0_->is_null_ || arg1_->is_null_ || arg2_->is_null_); diff --git a/common/expression/expr_ternary.h b/common/expression/expr_ternary.h index b7de0e324..6294c5e3a 100644 --- a/common/expression/expr_ternary.h +++ b/common/expression/expr_ternary.h @@ -36,10 +36,10 @@ class ExprTernary : public ExprNode { delete arg1_; delete arg2_; } - void* ExprEvaluate(void* tuple, Schema* schema); - void InitExprAtLogicalPlan(data_type return_type, - const std::map& column_index, - Schema* schema); + void* ExprEvaluate(ExprEvalCnxt& eecnxt); + + void InitExprAtLogicalPlan(LogicInitCnxt& licnxt); + void InitExprAtPhysicalPlan(); ExprNode* ExprCopy(); diff --git a/common/expression/expr_type_cast.h b/common/expression/expr_type_cast.h index dab70a662..ff534a459 100644 --- a/common/expression/expr_type_cast.h +++ b/common/expression/expr_type_cast.h @@ -33,37 +33,37 @@ class ExprTypeCast { /***************int****************************/ inline void *int_to_int(void *value, void *tovalue) { - if (*(int *)value == NULL_INT) // in order to judge the return result is - // NULL,so void * is NULL will be simple - return NULL; + // if (*(int *)value == NULL_INT) // in order to judge the return result is + // // NULL,so void * is NULL will be simple + // return NULL; *(int *)tovalue = *(int *)value; return tovalue; } inline void *int_to_smallint(void *value, void *tovalue) { - if (*(int *)value == NULL_INT) return NULL; + // if (*(int *)value == NULL_INT) return NULL; *(short int *)tovalue = *(int *)value; return tovalue; } inline void *int_to_float(void *value, void *tovalue) { - if (*(int *)value == NULL_INT) return NULL; + // if (*(int *)value == NULL_INT) return NULL; *(float *)tovalue = *(int *)value; return tovalue; } inline void *int_to_double(void *value, void *tovalue) { - if (*(int *)value == NULL_INT) return NULL; + // if (*(int *)value == NULL_INT) return NULL; *(double *)tovalue = *(int *)value; return tovalue; } inline void *int_to_ulong(void *value, void *tovalue) { - if (*(int *)value == NULL_INT) return NULL; + // if (*(int *)value == NULL_INT) return NULL; *(unsigned long *)tovalue = *(int *)value; return tovalue; } inline void *int_to_decimal(void *value, void *tovalue) { - if (*(int *)value == NULL_INT) return NULL; + // if (*(int *)value == NULL_INT) return NULL; stringstream va; va << *(int *)value; - *(Decimal *)tovalue = Decimal(CLAIMS_COMMON_DECIMAL_PSUBS-1, -1, va.str()); + *(Decimal *)tovalue = Decimal(DECIMAL_PSUBS, 0, va.str()); va.clear(); return tovalue; } @@ -113,8 +113,7 @@ inline void *string_to_string(void *value, void *tovalue) { } inline void *string_to_decimal(void *value, void *tovalue) { *(Decimal *)tovalue = - Decimal(CLAIMS_COMMON_DECIMAL_PSUBS-1, -1,/* Invailed scale */ - string((char *)value)); + Decimal(DECIMAL_MAXPRCISION, DECIMAL_MAXSCALE, (char *)value); return tovalue; } inline void *string_to_boolean(void *value, void *tovalue) { @@ -172,7 +171,7 @@ inline void *ulong_to_double(void *value, void *tovalue) { inline void *ulong_to_decimal(void *value, void *tovalue) { stringstream va; va << *(unsigned long *)value; - *(Decimal *)tovalue = Decimal(CLAIMS_COMMON_DECIMAL_PSUBS-1, -1, va.str()); + *(Decimal *)tovalue = Decimal(DECIMAL_PSUBS, 0, (char *)value); va.clear(); return tovalue; } @@ -219,7 +218,7 @@ inline void *smallInt_to_boolean(void *value, void *tovalue) { inline void *smallInt_to_decimal(void *value, void *tovalue) { stringstream va; va << *(short int *)value; - *(Decimal *)tovalue = Decimal(CLAIMS_COMMON_DECIMAL_PSUBS-1, -1, va.str()); + *(Decimal *)tovalue = Decimal(DECIMAL_PSUBS, 0, va.str()); va.clear(); return tovalue; } @@ -249,8 +248,8 @@ inline void *float_to_boolean(void *value, void *tovalue) { inline void *float_to_decimal(void *value, void *tovalue) { stringstream va; va << *(float *)value; - *(Decimal *)tovalue = Decimal(CLAIMS_COMMON_DECIMAL_MAXPRCISION, - CLAIMS_COMMON_DECIMAL_MAXSCALE, va.str()); + *(Decimal *)tovalue = + Decimal(DECIMAL_MAXPRCISION, DECIMAL_MAXSCALE, va.str()); va.clear(); return tovalue; } @@ -277,7 +276,8 @@ inline void *double_to_decimal(void *value, void *tovalue) { stringstream va; va.precision(30); va << *(double *)value; - *(Decimal *)tovalue = Decimal(CLAIMS_COMMON_DECIMAL_PSUBS-1, -1, va.str()); + *(Decimal *)tovalue = + Decimal(DECIMAL_MAXPRCISION, DECIMAL_MAXSCALE, va.str()); va.clear(); return tovalue; } @@ -311,8 +311,7 @@ inline void *boolean_to_ulong(void *value, void *tovalue) { inline void *boolean_to_decimal(void *value, void *tovalue) { stringstream va; va << *(bool *)value; - *(Decimal *)tovalue = Decimal(CLAIMS_COMMON_DECIMAL_PSUBS, - 0, va.str()); + *(Decimal *)tovalue = Decimal(DECIMAL_PSUBS, 0, va.str()); va.clear(); return tovalue; } @@ -324,9 +323,10 @@ inline void *decimal_to_decimal(void *value, void *tovalue) { *(Decimal *)tovalue = *(Decimal *)value; return tovalue; } +static Decimal zero(1, 0, "0"); inline void *decimal_to_boolean(void *value, void *tovalue) { Decimal tvalue = *(Decimal *)value; - *(bool *)tovalue = tvalue.op_equals(Decimal(1, 0, "0")); + *(bool *)tovalue = tvalue.op_equals(zero); return tovalue; } /***************decimal****************************/ diff --git a/common/expression/expr_unary.cpp b/common/expression/expr_unary.cpp index 15d6f820b..053a206ff 100644 --- a/common/expression/expr_unary.cpp +++ b/common/expression/expr_unary.cpp @@ -26,22 +26,22 @@ ExprUnary::ExprUnary(ExprNodeType expr_node_type, data_type actual_type, oper_type_(oper_type), arg0_(arg0), data_type_oper_func_(NULL) {} +ExprUnary::ExprUnary(ExprNodeType expr_node_type, data_type actual_type, + data_type get_type, string alias, OperType oper_type, + ExprNode* arg0) + : ExprNode(expr_node_type, actual_type, get_type, alias), + oper_type_(oper_type), + arg0_(arg0), + data_type_oper_func_(NULL) {} ExprUnary::ExprUnary(ExprUnary* expr) : ExprNode(expr), oper_type_(expr->oper_type_), arg0_(expr->arg0_->ExprCopy()), data_type_oper_func_(expr->data_type_oper_func_) {} -void* ExprUnary::ExprEvaluate(void* tuple, Schema* schema) { - OperFuncInfoData oper_info; - oper_info.args_[0] = arg0_->ExprEvaluate(tuple, schema); - oper_info.args_num_ = 1; - oper_info.result_ = value_; - data_type_oper_func_(&oper_info); - return type_cast_func_(oper_info.result_, value_); -} -void* ExprUnary::ExprEvaluate(void* tuple, Schema* schema, void* last_value) { + +void* ExprUnary::ExprEvaluate(ExprEvalCnxt& eecnxt, void* last_value) { OperFuncInfoData oper_info; - oper_info.args_[1] = arg0_->ExprEvaluate(tuple, schema); + oper_info.args_[1] = arg0_->ExprEvaluate(eecnxt); oper_info.args_[0] = last_value; oper_info.args_num_ = 2; oper_info.result_ = last_value; @@ -57,11 +57,19 @@ void* ExprUnary::ExprEvaluate(void* value, void* last_value) { data_type_oper_func_(&oper_info); return last_value; } -void ExprUnary::InitExprAtLogicalPlan( - data_type return_type, const std::map& column_index, - Schema* schema) { - return_type_ = return_type; - arg0_->InitExprAtLogicalPlan(get_type_, column_index, schema); +void* ExprUnary::ExprEvaluate(ExprEvalCnxt& eecnxt) { + OperFuncInfoData oper_info; + oper_info.args_[0] = arg0_->ExprEvaluate(eecnxt); + oper_info.args_num_ = 1; + oper_info.result_ = value_; + data_type_oper_func_(&oper_info); + return type_cast_func_(oper_info.result_, value_); +} + +void ExprUnary::InitExprAtLogicalPlan(LogicInitCnxt& licnxt) { + return_type_ = licnxt.return_type_; + licnxt.return_type_ = get_type_; + arg0_->InitExprAtLogicalPlan(licnxt); value_size_ = arg0_->value_size_; is_null_ = arg0_->is_null_; } diff --git a/common/expression/expr_unary.h b/common/expression/expr_unary.h index be5c6bd0c..e69699372 100644 --- a/common/expression/expr_unary.h +++ b/common/expression/expr_unary.h @@ -21,15 +21,18 @@ class ExprUnary : public ExprNode { DataTypeOperFunc data_type_oper_func_; ExprUnary(ExprNodeType expr_node_type, data_type actual_type, string alias, OperType oper_type, ExprNode* arg0); + ExprUnary(ExprNodeType expr_node_type, data_type actual_type, + data_type get_type, string alias, OperType oper_type, + ExprNode* arg0); explicit ExprUnary(ExprUnary* expr); ExprUnary() {} ~ExprUnary() { delete arg0_; } - virtual void* ExprEvaluate(void* tuple, Schema* schema); - virtual void* ExprEvaluate(void* tuple, Schema* schema, void* last_value); + virtual void* ExprEvaluate(ExprEvalCnxt& eecnxt); + + virtual void InitExprAtLogicalPlan(LogicInitCnxt& licnxt); + virtual void* ExprEvaluate(ExprEvalCnxt& eecnxt, void* last_value); virtual void* ExprEvaluate(void* value, void* last_value); - virtual void InitExprAtLogicalPlan( - data_type return_type, const std::map& column_index, - Schema* schema); + virtual void InitExprAtPhysicalPlan(); virtual ExprNode* ExprCopy(); diff --git a/common/file_handle/Makefile.am b/common/file_handle/Makefile.am index 89b40275c..b49f8fdeb 100644 --- a/common/file_handle/Makefile.am +++ b/common/file_handle/Makefile.am @@ -1,5 +1,5 @@ AM_CPPFLAGS= -fPIC -fpermissive -DTHERON_XS\ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux @@ -19,7 +19,8 @@ libfilehandle_a_SOURCES = \ disk_file_handle_imp.cpp disk_file_handle_imp.h \ file_handle_imp.cpp \ file_handle_imp.h file_handle_imp_factory.h \ - hdfs_file_handle_imp.cpp hdfs_file_handle_imp.h + hdfs_file_handle_imp.cpp hdfs_file_handle_imp.h \ + hdfs_connector.h hdfs_connector.cpp SUBDIRS = test diff --git a/common/file_handle/disk_file_handle_imp.cpp b/common/file_handle/disk_file_handle_imp.cpp index 0fd9d6f61..f7d1d31c7 100644 --- a/common/file_handle/disk_file_handle_imp.cpp +++ b/common/file_handle/disk_file_handle_imp.cpp @@ -49,51 +49,47 @@ using claims::utility::LockGuard; namespace claims { namespace common { DiskFileHandleImp::~DiskFileHandleImp() { - int ret = Close(); - if (ret != 0) LOG(ERROR) << "failed to close file fd. ret:" << ret << endl; + int ret = rSuccess; + EXEC_AND_ONLY_LOG_ERROR(ret, Close(), "failed to close "); } -RetCode DiskFileHandleImp::Open(string file_name, FileOpenFlag open_flag) { - file_name_ = file_name; - open_flag_ = open_flag; - - int ret = rSuccess; - if (kReadFile == open_flag_ && false == CanAccess(file_name_)) { - ret = rAccessDiskFileFail; - ELOG(ret, "File name:" << file_name_ - << " open mode:" << file_open_flag_info[open_flag_]); - return ret; - } - if (kCreateFile == open_flag_) { +RetCode DiskFileHandleImp::SwitchStatus(FileStatus status_to_be) { + int old_file_status = file_status_; + if (kInReading == status_to_be && kInReading != file_status_) { + Close(); + fd_ = FileOpen(file_name_.c_str(), O_RDONLY, S_IWUSR | S_IRUSR); + } else if (kInOverWriting == status_to_be) { + Close(); fd_ = FileOpen(file_name_.c_str(), O_RDWR | O_TRUNC | O_CREAT, S_IWUSR | S_IRUSR); - } else if (kAppendFile == open_flag_) { + } else if (kInAppending == status_to_be && kInAppending != file_status_) { + Close(); fd_ = FileOpen(file_name_.c_str(), O_RDWR | O_CREAT | O_APPEND, S_IWUSR | S_IRUSR); - } else if (kReadFile == open_flag_) { - fd_ = FileOpen(file_name_.c_str(), O_RDONLY, S_IWUSR | S_IRUSR); } else { - LOG(ERROR) << "parameter flag:" << open_flag_ << " is invalid" << endl; - return rParamInvalid; + return rSuccess; } - if (-1 == fd_) { - PLOG(ERROR) << "failed to open file :" << file_name_ << "."; + PLOG(ERROR) << "failed to reopen file:" << file_name_ << "(" + << file_status_info[old_file_status] << ") in mode " + << file_status_info[status_to_be] << " ."; return rOpenDiskFileFail; } else { - LOG(INFO) << "opened disk file:" << file_name_ << "with " - << (kCreateFile == open_flag_ - ? "kCreateFile" - : kAppendFile == open_flag_ ? "kAppendFile" : "kReadFile") - << endl; + // can_close_.set_value(1); + file_status_ = status_to_be; + LOG(INFO) << "disk file:" << file_name_ << "(" + << file_status_info[old_file_status] << ") is reopened for " + << file_status_info[file_status_] << endl; return rSuccess; } } RetCode DiskFileHandleImp::Write(const void* buffer, const size_t length) { assert(fd_ >= 3); - assert(open_flag_ != kReadFile && - "It's unavailable to write into a read-only file"); + assert((kInOverWriting == file_status_ || kInAppending == file_status_) && + " files is not opened in writing mode"); + // RefHolder holder(reference_count_); + size_t total_write_num = 0; while (total_write_num < length) { ssize_t write_num = @@ -101,7 +97,7 @@ RetCode DiskFileHandleImp::Write(const void* buffer, const size_t length) { length - total_write_num); if (-1 == write_num) { PLOG(ERROR) << "failed to write buffer(" << buffer << ") to file(" << fd_ - << "): " << file_name_ << endl; + << "): " << file_name_; return rWriteDiskFileFail; } total_write_num += write_num; @@ -118,47 +114,42 @@ RetCode DiskFileHandleImp::Write(const void* buffer, const size_t length) { return rSuccess; } -RetCode DiskFileHandleImp::AtomicWrite(const void* buffer, - const size_t length) { - assert(fd_ >= 3); - assert(open_flag_ != kReadFile && - "It's unavailable to write into a read-only file"); - size_t total_write_num = 0; - LockGuard guard(write_lock_); - while (total_write_num < length) { - ssize_t write_num = - write(fd_, static_cast(buffer) + total_write_num, - length - total_write_num); - if (-1 == write_num) { - PLOG(ERROR) << "failed to write buffer(" << buffer << ") to file(" << fd_ - << "): " << file_name_ << endl; - return rWriteDiskFileFail; - } - total_write_num += write_num; - } - return rSuccess; -} - RetCode DiskFileHandleImp::Close() { + // LOG(INFO) << "ref: " << can_close_.get_value(); + // if (-1 == fd_ || 0 != reference_count_.load() // someone are still using + // this + // // file descriptor + // || !i_win_to_close_.try_lock()) { // someone win the lock to close if (-1 == fd_) { return rSuccess; } else if (0 == FileClose(fd_)) { LOG(INFO) << "closed file: " << file_name_ << " whose fd is " << fd_ << endl; + // i_win_to_close_.release(); fd_ = -1; + file_status_ = kClosed; return rSuccess; } else { - return rCloseDiskFileFail; + // i_win_to_close_.release(); + int ret = rCloseDiskFileFail; + EXEC_AND_PLOG(ret, ret, "", "failed to close file:" << file_name_); + return ret; } } RetCode DiskFileHandleImp::ReadTotalFile(void*& buffer, size_t* length) { - assert(fd_ >= 3); int ret = rSuccess; + // RefHolder holder(reference_count_); + + EXEC_AND_RETURN_ERROR(ret, SwitchStatus(kInReading), + "failed to switch status"); + + assert(fd_ >= 3); + assert(kInReading == file_status_ && " files is not opened in reading mode"); ssize_t file_length = lseek(fd_, 0, SEEK_END); if (-1 == file_length) { - PLOG(ERROR) << "lseek called on fd to set pos to the end of file " << fd_ - << " failed : "; + PLOG(ERROR) << "failed to set pos at the end of (fd: " << fd_ + << ", name: " << file_name_ << ")."; return rLSeekDiskFileFail; } LOG(INFO) << "The length of file " << file_name_ << "is " << file_length @@ -169,27 +160,24 @@ RetCode DiskFileHandleImp::ReadTotalFile(void*& buffer, size_t* length) { if (rSuccess != (ret = SetPosition(0))) { return ret; } + *length = file_length; - ssize_t read_num = read(fd_, buffer, file_length); - LOG(INFO) << "read " << read_num << " from disk file " << file_name_ << endl; - - if (read_num != file_length) { - LOG(ERROR) << "read file [" << file_name_ - << "] from disk failed, expected read " << file_length - << " , actually read " << read_num << endl; - return rReadDiskFileFail; - } - *length = read_num; - return rSuccess; + return Read(buffer, file_length); } RetCode DiskFileHandleImp::Read(void* buffer, size_t length) { + int ret = rSuccess; + // RefHolder holder(reference_count_); + + EXEC_AND_RETURN_ERROR(ret, SwitchStatus(kInReading), + "failed to switch status"); + assert(fd_ >= 3); + assert(kInReading == file_status_ && " files is not opened in reading mode"); ssize_t total_read_num = 0; while (total_read_num < length) { ssize_t read_num = read(fd_, static_cast(buffer) + total_read_num, length - total_read_num); - if (-1 == read_num) { LOG(ERROR) << "read file [" << file_name_ << "] from disk failed, expected read " << length @@ -203,11 +191,13 @@ RetCode DiskFileHandleImp::Read(void* buffer, size_t length) { } LOG(INFO) << "read total " << total_read_num << " from disk file " << file_name_ << endl; - return rSuccess; + return ret; } RetCode DiskFileHandleImp::SetPosition(size_t pos) { assert(fd_ >= 3); + assert(kInReading == file_status_ && + "Seeking is only work for files opened in read-only mode"); if (-1 == lseek(fd_, pos, SEEK_SET)) { PLOG(ERROR) << "failed to lseek at " << pos << " in file(fd:" << fd_ << ", " << file_name_ << ")"; @@ -216,13 +206,64 @@ RetCode DiskFileHandleImp::SetPosition(size_t pos) { return rSuccess; } +RetCode DiskFileHandleImp::Append(const void* buffer, const size_t length) { + int ret = rSuccess; + // RefHolder holder(reference_count_); + + EXEC_AND_RETURN_ERROR(ret, SwitchStatus(kInAppending), + "failed to switch status"); + assert(fd_ >= 3); + assert(kInAppending == file_status_ && + " files is not opened in appending mode"); + return Write(buffer, length); +} +RetCode DiskFileHandleImp::AtomicAppend(const void* buffer, const size_t length, + function lock_func, + function unlock_func) { + lock_func(); + RetCode ret = Append(buffer, length); + unlock_func(); + return ret; +} + +RetCode DiskFileHandleImp::OverWrite(const void* buffer, const size_t length) { + int ret = rSuccess; + // RefHolder holder(reference_count_); + + EXEC_AND_RETURN_ERROR(ret, SwitchStatus(kInOverWriting), + "failed to switch status"); + assert(fd_ >= 3); + assert(kInOverWriting == file_status_ && + " files is not opened in overwriting mode"); + + return Write(buffer, length); +} + +RetCode DiskFileHandleImp::AtomicOverWrite(const void* buffer, + const size_t length, + function lock_func, + function unlock_func) { + lock_func(); + RetCode ret = OverWrite(buffer, length); + unlock_func(); + return ret; +} + RetCode DiskFileHandleImp::DeleteFile() { int ret = rSuccess; - EXEC_AND_ONLY_LOG_ERROR(ret, Close(), "file name: " << file_name_); - if (0 != remove(file_name_.c_str())) { - LOG(ERROR) << "Cannot delete disk file : [" + file_name_ + "] ! Reason: " + - strerror(errno) << std::endl; - return rFailure; + EXEC_AND_LOG(ret, Close(), "closed file name: " << file_name_, + "failed to close file:" << file_name_); + if (CanAccess(file_name_)) { + if (0 != remove(file_name_.c_str())) { + LOG(ERROR) << "Cannot delete disk file : [" + file_name_ + + "] ! Reason: " + strerror(errno) << std::endl; + return rFailure; + } else { + fd_ = -1; + file_status_ = kClosed; + LOG(WARNING) << "The file " << file_name_ << "is deleted successfully!\n" + << std::endl; + } } return rSuccess; } diff --git a/common/file_handle/disk_file_handle_imp.h b/common/file_handle/disk_file_handle_imp.h index a500a8fb7..459395087 100644 --- a/common/file_handle/disk_file_handle_imp.h +++ b/common/file_handle/disk_file_handle_imp.h @@ -42,15 +42,24 @@ class DiskFileHandleImp : public FileHandleImp { friend FileHandleImpFactory; private: - DiskFileHandleImp() : fd_(-1) {} + explicit DiskFileHandleImp(std::string file_name) + : fd_(-1), FileHandleImp(file_name) {} public: virtual ~DiskFileHandleImp(); - virtual RetCode Open(std::string file_name, FileOpenFlag open_flag); // see more in FileHandleImp class - virtual RetCode Write(const void* buffer, const size_t length); + virtual RetCode Append(const void* buffer, const size_t length); + + virtual RetCode AtomicAppend(const void* buffer, const size_t length, + function lock_func, + function unlock_func); + + virtual RetCode OverWrite(const void* buffer, const size_t length); + + virtual RetCode AtomicOverWrite(const void* buffer, const size_t length, + function lock_func, + function unlock_func); - virtual RetCode AtomicWrite(const void* buffer, const size_t length); virtual RetCode Close(); // see more in FileHandleImp class virtual RetCode ReadTotalFile(void*& buffer, size_t* length); @@ -59,13 +68,19 @@ class DiskFileHandleImp : public FileHandleImp { virtual bool CanAccess(std::string file_name) { return 0 == access(file_name.c_str(), 0); } - virtual RetCode SetPosition(size_t pos); virtual RetCode DeleteFile(); + virtual RetCode SwitchStatus(FileStatus status_to_be); + + protected: + virtual RetCode SetPosition(size_t pos); + + private: + RetCode Write(const void* buffer, const size_t length); + private: int fd_; - FileOpenFlag open_flag_ = kReadFile; }; } // namespace common diff --git a/common/file_handle/file_handle_imp.cpp b/common/file_handle/file_handle_imp.cpp index 1b4ae9e8c..c08a564cb 100644 --- a/common/file_handle/file_handle_imp.cpp +++ b/common/file_handle/file_handle_imp.cpp @@ -28,6 +28,41 @@ #include "./file_handle_imp.h" +#include +#include "../../common/error_define.h" + +using std::endl; namespace claims { -namespace common {} // namespace common +namespace common { +// RetCode FileHandleImp::AtomicAppend(const void* buffer, const size_t length, +// function lock_func, +// function unlock_func) { +// lock_func(); +// RetCode ret = Append(buffer, length); +// unlock_func(); +// return ret; +//} +// RetCode FileHandleImp::AtomicOverWrite(const void* buffer, const size_t +// length, +// function lock_func, +// function unlock_func) { +// lock_func(); +// RetCode ret = OverWrite(buffer, length); +// unlock_func(); +// // if (rSuccess != ret) { +// // return ret; +// // } else { +// // ret = Close(); +// // } +// return ret; +//} +RetCode FileHandleImp::PRead(void* buffer, size_t length, size_t start_pos) { + int ret = rSuccess; + EXEC_AND_RETURN_ERROR(ret, this->SwitchStatus(kInReading), + "failed to switch status"); + EXEC_AND_RETURN_ERROR(ret, this->SetPosition(start_pos), + "failed to set position to " << start_pos); + return Read(buffer, length); +} +} // namespace common } /* namespace claims */ diff --git a/common/file_handle/file_handle_imp.h b/common/file_handle/file_handle_imp.h index 74f024ef3..eaabfa492 100644 --- a/common/file_handle/file_handle_imp.h +++ b/common/file_handle/file_handle_imp.h @@ -28,25 +28,57 @@ #ifndef COMMON_FILE_HANDLE_FILE_HANDLE_IMP_H_ #define COMMON_FILE_HANDLE_FILE_HANDLE_IMP_H_ +#include +#include +#include #include #include "../../utility/lock.h" #include "../error_define.h" + +using std::function; namespace claims { namespace common { - +using std::string; +using std::atomic; class FileHandleImpFactory; -enum FileOpenFlag { kCreateFile = 0, kAppendFile, kReadFile }; -static const char* file_open_flag_info[3] = {"create file", "append file", - "read file"}; +enum FileOpenFlag { + kReadFile = 0, + kCreateFile, + kAppendFile, +}; +static const char* file_open_flag_info[3] = {"kReadFile", "kCreateFile", + "kAppendFile"}; + +static const char* file_status_info[4] = {"Reading", "Writing", "Appending", + "Closed"}; class FileHandleImp { friend FileHandleImpFactory; public: - FileHandleImp() {} + enum FileStatus { kInReading = 0, kInOverWriting, kInAppending, kClosed }; + + protected: + class RefHolder { + public: + explicit RefHolder(atomic& ref) : ref_(ref) { + ++ref_; + LOG(INFO) << "ref post:" << ref_ << std::endl; + } + ~RefHolder() { + --ref_; + LOG(INFO) << "ref wait:" << ref_; + } + + private: + atomic& ref_; + }; + + public: + explicit FileHandleImp(std::string file_name) : file_name_(file_name) {} virtual ~FileHandleImp() {} - virtual RetCode Open(std::string file_name, FileOpenFlag open_flag) = 0; + // virtual RetCode Open(std::string file_name, FileOpenFlag open_flag) = 0; /** * @brief Method description: write buffer into file and make sure write * length char @@ -54,9 +86,18 @@ class FileHandleImp { * @param length: the no. of bytes to write * @return rSuccess if wrote length bytes */ - virtual RetCode Write(const void* buffer, const size_t length) = 0; + virtual RetCode Append(const void* buffer, const size_t length) = 0; + + virtual RetCode AtomicAppend(const void* buffer, const size_t length, + function lock_func, + function unlock_func) = 0; + + virtual RetCode OverWrite(const void* buffer, const size_t length) = 0; + + virtual RetCode AtomicOverWrite(const void* buffer, const size_t length, + function lock_func, + function unlock_func) = 0; - virtual RetCode AtomicWrite(const void* buffer, const size_t length) = 0; virtual RetCode Close() = 0; /** * @brief Method description: read total file into memory, update length to @@ -65,7 +106,8 @@ class FileHandleImp { * @param length: hold the no. bytes of the all file * @return rSuccess if succeed */ - virtual RetCode ReadTotalFile(void*& buffer, size_t* length) = 0; + virtual RetCode ReadTotalFile(void*& buffer, size_t* length) = 0; // NOLINT + /** * @brief Method description: read length bytes from file into memory, usually * called after SetPosition() @@ -74,14 +116,25 @@ class FileHandleImp { * @return rSuccess if succeed */ virtual RetCode Read(void* buffer, size_t length) = 0; + RetCode PRead(void* buffer, size_t length, size_t start_pos); virtual bool CanAccess(std::string file_name) = 0; - virtual RetCode SetPosition(size_t pos) = 0; virtual RetCode DeleteFile() = 0; + const string& get_file_name() { return file_name_; } + + virtual RetCode SwitchStatus(FileStatus status_to_be) = 0; + + protected: + virtual RetCode SetPosition(size_t pos) = 0; + protected: std::string file_name_; - Lock write_lock_; + volatile FileStatus file_status_ = kClosed; + // Lock write_lock_; + // atomic reference_count_; + // SpineLock i_win_to_close_; + // semaphore can_close_; }; } // namespace common diff --git a/common/file_handle/file_handle_imp_factory.h b/common/file_handle/file_handle_imp_factory.h index 51d797877..0da863f27 100644 --- a/common/file_handle/file_handle_imp_factory.h +++ b/common/file_handle/file_handle_imp_factory.h @@ -50,11 +50,11 @@ class FileHandleImpFactory { static FileHandleImpFactory factory; return factory; } - FileHandleImp* CreateFileHandleImp(FilePlatform platform) { + FileHandleImp* CreateFileHandleImp(FilePlatform platform, string file_name) { if (kHdfs == platform) - return new HdfsFileHandleImp(); + return new HdfsFileHandleImp(file_name); else if (kDisk == platform) - return new DiskFileHandleImp(); + return new DiskFileHandleImp(file_name); assert(false && "FilePlatform flag is invalid"); return NULL; } diff --git a/common/file_handle/hdfs_connector.cpp b/common/file_handle/hdfs_connector.cpp new file mode 100644 index 000000000..daed94189 --- /dev/null +++ b/common/file_handle/hdfs_connector.cpp @@ -0,0 +1,37 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/common/file_handle/HdfsConnector.cpp + * + * Created on: Feb 29, 2016 + * Author: yukai + * Email: yukai2014@gmail.com + * + * Description: + * + */ + +#include "hdfs_connector.h" + +namespace claims { +namespace common { + +hdfsFS HdfsConnector::fs_ = NULL; + +} /* namespace common */ +} /* namespace claims */ diff --git a/common/file_handle/hdfs_connector.h b/common/file_handle/hdfs_connector.h new file mode 100644 index 000000000..b3036c5ba --- /dev/null +++ b/common/file_handle/hdfs_connector.h @@ -0,0 +1,78 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/common/file_handle/HdfsConnector.h + * + * Created on: Feb 29, 2016 + * Author: yukai + * Email: yukai2014@gmail.com + * + * Description: + * + */ + +#ifndef COMMON_FILE_HANDLE_HDFS_CONNECTOR_H_ +#define COMMON_FILE_HANDLE_HDFS_CONNECTOR_H_ + +#include +#include +#include +#include + +#include "../../common/rename.h" +#include "../../Config.h" +using std::endl; +namespace claims { +namespace common { + +class HdfsConnector { + public: + static hdfsFS Instance() { + if (NULL == fs_) { + LOG(INFO) << "start to connect to HDFS"; + Config::getInstance(); + fs_ = + hdfsConnect(Config::hdfs_master_ip.c_str(), Config::hdfs_master_port); + if (NULL == fs_) { + LOG(ERROR) << "failed to connect to HDFS(ip:" << Config::hdfs_master_ip + << ", port:" << Config::hdfs_master_port << ")" << std::endl; + assert(false); + } + LOG(INFO) << "connected to HDFS(ip:" << Config::hdfs_master_ip + << ", port:" << Config::hdfs_master_port << ")" << std::endl; + } + return fs_; + } + + NO_COPY_AND_ASSIGN(HdfsConnector); + + ~HdfsConnector() { + hdfsDisconnect(fs_); + fs_ = NULL; + std::cerr << "disconnected hdfs and fs_ was set to NULL" << std::endl; + } + + private: + HdfsConnector() {} + static hdfsFS fs_; +}; + +} /* namespace common */ +} /* namespace claims */ + +#endif // COMMON_FILE_HANDLE_HDFS_CONNECTOR_H_ diff --git a/common/file_handle/hdfs_file_handle_imp.cpp b/common/file_handle/hdfs_file_handle_imp.cpp index b591157d3..0617a75ce 100644 --- a/common/file_handle/hdfs_file_handle_imp.cpp +++ b/common/file_handle/hdfs_file_handle_imp.cpp @@ -45,56 +45,45 @@ using claims::utility::LockGuard; namespace claims { namespace common { -HdfsFileHandleImp::HdfsFileHandleImp() : read_start_pos_(-1) { - fs_ = hdfsConnect(Config::hdfs_master_ip.c_str(), Config::hdfs_master_port); - if (NULL == fs_) { - LOG(ERROR) << "failed to connect to HDFS(ip:" << Config::hdfs_master_ip - << ", port:" << Config::hdfs_master_port << ")" << endl; - assert(false); - } - LOG(INFO) << "connected to HDFS(ip:" << Config::hdfs_master_ip - << ", port:" << Config::hdfs_master_port << ")" << endl; -} - -HdfsFileHandleImp::~HdfsFileHandleImp() { - int ret = rSuccess; - EXEC_AND_ONLY_LOG_ERROR(ret, Close(), "failed to close "); - ret = hdfsDisconnect(fs_); - fs_ = NULL; - if (ret != 0) LOG(ERROR) << "failed to disconnect to hdfs" << endl; -} - -RetCode HdfsFileHandleImp::Open(std::string file_name, FileOpenFlag open_flag) { - assert(NULL != fs_ && "failed to connect hdfs"); - int ret = rSuccess; - open_flag_ = open_flag; - file_name_ = file_name; - if (kReadFile == open_flag && false == CanAccess(file_name_)) { - ret = rAccessHdfsFileFail; - ELOG(ret, "File name:" << file_name_ - << " open mode:" << file_open_flag_info[open_flag]); - return ret; - } - - if (kCreateFile == open_flag) { +RetCode HdfsFileHandleImp::SwitchStatus(FileStatus status_to_be) { + int old_file_status = file_status_; + if (kInReading == status_to_be && kInReading != file_status_) { + Close(); + file_ = hdfsOpenFile(fs_, file_name_.c_str(), O_RDONLY, 0, 0, 0); + } else if (kInOverWriting == status_to_be) { + Close(); file_ = hdfsOpenFile(fs_, file_name_.c_str(), O_WRONLY, 0, 0, 0); - } else if (kAppendFile == open_flag) { + } else if (kInAppending == status_to_be && kInAppending != file_status_) { + Close(); + if (!CanAccess(file_name_)) { // this file doesn't exist, create one + file_ = hdfsOpenFile(fs_, file_name_.c_str(), O_WRONLY, 0, 0, 0); + if (NULL == file_) { + PLOG(ERROR) << "failed to create hdfs file :" << file_name_; + return rOpenHdfsFileFail; + } else { + LOG(INFO) << "created hdfs file :" << file_name_ << endl; + if (0 != hdfsCloseFile(fs_, file_)) { + LOG(ERROR) << "failed to close hdfs file: " << file_name_ << endl; + return rCloseHdfsFileFail; + } + } + } file_ = hdfsOpenFile(fs_, file_name_.c_str(), O_WRONLY | O_APPEND, 0, 0, 0); - } else if (kReadFile == open_flag) { - file_ = hdfsOpenFile(fs_, file_name_.c_str(), O_RDONLY, 0, 0, 0); } else { - LOG(ERROR) << "parameter flag:" << open_flag << " is invalid" << endl; - return rParamInvalid; + return rSuccess; } + if (NULL == file_) { - PLOG(ERROR) << "failed to open hdfs file :" << file_name_; + PLOG(ERROR) << "failed to reopen file:" << file_name_ << "(" + << file_status_info[old_file_status] << ") in mode " + << file_status_info[status_to_be] << " ."; return rOpenHdfsFileFail; } else { - LOG(INFO) << "opened hdfs file: " << file_name_ << " with " - << (kCreateFile == open_flag - ? "kCreateFile" - : kAppendFile == open_flag ? "kAppendFile" : "kReadFile") - << endl; + // can_close_.set_value(1); + file_status_ = status_to_be; + LOG(INFO) << "HDFS file:" << file_name_ << "(" + << file_status_info[old_file_status] << ") is reopened for " + << file_status_info[file_status_] << endl; return rSuccess; } } @@ -102,8 +91,8 @@ RetCode HdfsFileHandleImp::Open(std::string file_name, FileOpenFlag open_flag) { RetCode HdfsFileHandleImp::Write(const void* buffer, const size_t length) { assert(NULL != fs_ && "failed to connect hdfs"); assert(NULL != file_ && "make sure file is opened"); - assert(open_flag_ != kReadFile && - "It's unavailable to write into a read-only file"); + // RefHolder holder(reference_count_); + size_t total_write_num = 0; while (total_write_num < length) { int32_t write_num = hdfsWrite( @@ -128,80 +117,62 @@ RetCode HdfsFileHandleImp::Write(const void* buffer, const size_t length) { return rSuccess; } -RetCode HdfsFileHandleImp::AtomicWrite(const void* buffer, - const size_t length) { - assert(NULL != fs_ && "failed to connect hdfs"); - assert(NULL != file_ && "make sure file is opened"); - assert(open_flag_ != kReadFile && - "It's unavailable to write into a read-only file"); - size_t total_write_num = 0; - LockGuard gurad(write_lock_); - while (total_write_num < length) { - int32_t write_num = hdfsWrite( - fs_, file_, static_cast(buffer) + total_write_num, - length - total_write_num); - if (-1 == write_num) { - PLOG(ERROR) << "failed to write buffer(" << buffer - << ") to file: " << file_name_ << endl; - return rWriteDiskFileFail; - } - total_write_num += write_num; - } - return rSuccess; -} - RetCode HdfsFileHandleImp::Close() { if (NULL == file_) { - LOG(INFO) << "hdfs file have been closed " << endl; + LOG(INFO) << "hdfs file:" << file_name_ << " have been closed " << endl; return rSuccess; } assert(NULL != fs_ && "failed to connect hdfs"); - static char* hdfs_file_type[] = {"UNINITIALIZED", "INPUT", "OUTPUT"}; - LOG(INFO) << "the type of file_ is" << hdfs_file_type[file_->type] << endl; - if (0 != hdfsCloseFile(fs_, file_)) { - LOG(ERROR) << "failed to close hdfs file: " << file_name_ << endl; + PLOG(ERROR) << "failed to close hdfs file: " << file_name_; return rCloseHdfsFileFail; } file_ = NULL; - LOG(INFO) << "hdfs file is been closed " << endl; + file_status_ = kClosed; + LOG(INFO) << "hdfs file: " << file_name_ << " is closed " << endl; return rSuccess; } RetCode HdfsFileHandleImp::ReadTotalFile(void*& buffer, size_t* length) { + assert(NULL != fs_ && "failed to connect hdfs"); + // RefHolder holder(reference_count_); + int ret = rSuccess; + EXEC_AND_RETURN_ERROR(ret, SwitchStatus(kInReading), + "failed to switch status"); + assert(NULL != fs_ && "failed to connect hdfs"); assert(NULL != file_ && "make sure file is opened"); - int ret = rSuccess; hdfsFileInfo* hdfsfile = hdfsGetPathInfo(fs_, file_name_.c_str()); + if (NULL == hdfsfile) { + PLOG(ERROR) << "failed to open file :" << file_name_ << " in mode" + << file_status_info[kInReading] << " ."; + return rOpenHdfsFileFail; + } int file_length = hdfsfile->mSize; LOG(INFO) << "The length of file " << file_name_ << " is " << file_length << endl; + buffer = Malloc(file_length + 1); // set position 0 if (rSuccess != (ret = SetPosition(0))) { return ret; } + *length = file_length; - buffer = Malloc(file_length + 1); - int read_num = hdfsRead(fs_, file_, buffer, file_length); - LOG(INFO) << "read " << read_num << " data from hdfs file " << file_name_ - << endl; - - if (read_num != file_length) { - LOG(ERROR) << "failed to read file [" << file_name_ - << "] from hdfs , expected read " << file_length - << " , actually read " << read_num << endl; - return rReadHdfsFileFail; - } - *length = read_num; - return ret; + return Read(buffer, file_length); } RetCode HdfsFileHandleImp::Read(void* buffer, size_t length) { assert(NULL != fs_ && "failed to connect hdfs"); - assert(NULL != file_ && "make sure file is opened"); + // RefHolder holder(reference_count_); + + int ret = rSuccess; + EXEC_AND_RETURN_ERROR(ret, SwitchStatus(kInReading), + "failed to switch status"); + assert(NULL != fs_ && "failed to connect hdfs"); + assert(NULL != file_ && "make sure file is opened"); int total_read_num = 0; while (total_read_num < length) { int read_num = @@ -223,12 +194,12 @@ RetCode HdfsFileHandleImp::SetPosition(size_t pos) { assert(NULL != fs_ && "failed to connect hdfs"); assert(NULL != file_ && "make sure file is opened"); - assert(kReadFile == open_flag_ && + assert(kInReading == file_status_ && "Seeking is only work for files opened in read-only mode"); int ret = hdfsSeek(fs_, file_, pos); if (0 != ret) { - LOG(ERROR) << "failed to seek to " << pos << " in " << file_name_ << " file" - << endl; + PLOG(ERROR) << "failed to seek to " << pos << " in " << file_name_ + << " file" << endl; return rLSeekHdfsFileFail; } @@ -236,16 +207,69 @@ RetCode HdfsFileHandleImp::SetPosition(size_t pos) { return rSuccess; } +RetCode HdfsFileHandleImp::Append(const void* buffer, const size_t length) { + // RefHolder holder(reference_count_); + assert(NULL != fs_ && "failed to connect hdfs"); + int ret = rSuccess; + EXEC_AND_RETURN_ERROR(ret, SwitchStatus(kInAppending), + "failed to switch status"); + + return Write(buffer, length); +} + +RetCode HdfsFileHandleImp::AtomicAppend(const void* buffer, const size_t length, + function lock_func, + function unlock_func) { + assert(NULL != fs_ && "failed to connect hdfs"); + lock_func(); + RetCode ret = Append(buffer, length); + // must close because another imp may want to open this file + RetCode ret2 = Close(); + unlock_func(); + if (ret == rSuccess) ret = ret2; + return ret; +} + +RetCode HdfsFileHandleImp::OverWrite(const void* buffer, const size_t length) { + // RefHolder holder(reference_count_); + assert(NULL != fs_ && "failed to connect hdfs"); + int ret = rSuccess; + EXEC_AND_RETURN_ERROR(ret, SwitchStatus(kInOverWriting), + "failed to switch status"); + return Write(buffer, length); +} + +RetCode HdfsFileHandleImp::AtomicOverWrite(const void* buffer, + const size_t length, + function lock_func, + function unlock_func) { + assert(NULL != fs_ && "failed to connect hdfs"); + + lock_func(); + RetCode ret = OverWrite(buffer, length); + // must close because another imp may want to open this file + RetCode ret2 = Close(); + unlock_func(); + if (ret == rSuccess) ret = ret2; + return ret; +} + RetCode HdfsFileHandleImp::DeleteFile() { + assert(NULL != fs_ && "failed to connect hdfs"); + int ret = rSuccess; EXEC_AND_ONLY_LOG_ERROR(ret, Close(), "file name: " << file_name_); - if (0 == hdfsExists(fs_, file_name_.c_str())) { - if (0 != hdfsDelete(fs_, file_name_.c_str())) { + if (CanAccess(file_name_)) { + if (0 != hdfsDelete(fs_, file_name_.c_str(), 0)) { LOG(ERROR) << "Failed to delete file : [" + file_name_ + "]." << std::endl; return rFailure; + } else { + LOG(INFO) << "The file " << file_name_ << " is deleted successfully"; } } else { + file_ = NULL; + file_status_ = kClosed; LOG(WARNING) << "The file " << file_name_ << "is not exits!\n" << std::endl; } return rSuccess; diff --git a/common/file_handle/hdfs_file_handle_imp.h b/common/file_handle/hdfs_file_handle_imp.h index 7e45e5796..79bc222a2 100644 --- a/common/file_handle/hdfs_file_handle_imp.h +++ b/common/file_handle/hdfs_file_handle_imp.h @@ -33,6 +33,7 @@ #include #include "./file_handle_imp.h" +#include "./hdfs_connector.h" #include "../../common/rename.h" namespace claims { @@ -44,7 +45,11 @@ class HdfsFileHandleImp : public FileHandleImp { friend FileHandleImpFactory; private: - HdfsFileHandleImp(); + explicit HdfsFileHandleImp(std::string file_name) + : read_start_pos_(-1), + file_(NULL), + FileHandleImp(file_name), + fs_(HdfsConnector::Instance()) {} NO_COPY_AND_ASSIGN(HdfsFileHandleImp); @@ -52,11 +57,24 @@ class HdfsFileHandleImp : public FileHandleImp { /** * @brief Method description: call Close() and disconnect HDFS */ - virtual ~HdfsFileHandleImp(); - virtual RetCode Open(std::string file_name, FileOpenFlag open_flag); + virtual ~HdfsFileHandleImp() { + int ret = rSuccess; + EXEC_AND_ONLY_LOG_ERROR(ret, Close(), "failed to close "); + } + // virtual RetCode Open(std::string file_name, FileOpenFlag open_flag); // see more in FileHandleImp class - virtual RetCode Write(const void* buffer, const size_t length); - virtual RetCode AtomicWrite(const void* buffer, const size_t length); + virtual RetCode Append(const void* buffer, const size_t length); + + virtual RetCode AtomicAppend(const void* buffer, const size_t length, + function lock_func, + function unlock_func); + + virtual RetCode OverWrite(const void* buffer, const size_t length); + + virtual RetCode AtomicOverWrite(const void* buffer, const size_t length, + function lock_func, + function unlock_func); + virtual RetCode Close(); // see more in FileHandleImp class virtual RetCode ReadTotalFile(void*& buffer, size_t* length); @@ -66,15 +84,21 @@ class HdfsFileHandleImp : public FileHandleImp { assert(fs_ != NULL && "failed to connect hdfs"); return 0 == hdfsExists(fs_, file_name.c_str()); } - virtual RetCode SetPosition(size_t pos); virtual RetCode DeleteFile(); + virtual RetCode SwitchStatus(FileStatus status_to_be); + + protected: + virtual RetCode SetPosition(size_t pos); + + private: + RetCode Write(const void* buffer, const size_t length); + private: - hdfsFS fs_ = NULL; - hdfsFile file_ = NULL; + hdfsFS fs_; + hdfsFile file_; int64_t read_start_pos_; - FileOpenFlag open_flag_ = kReadFile; }; } // namespace common } /* namespace claims */ diff --git a/common/file_handle/test/Makefile.am b/common/file_handle/test/Makefile.am index 61a06fe4e..0da1a2aeb 100644 --- a/common/file_handle/test/Makefile.am +++ b/common/file_handle/test/Makefile.am @@ -1,5 +1,5 @@ AM_CPPFLAGS= -fPIC -fpermissive -DTHERON_XS\ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux \ -I${GTEST_HOME}/include diff --git a/common/file_handle/test/disk_file_handle_imp_test.h b/common/file_handle/test/disk_file_handle_imp_test.h index 1e71537c1..0b483ba4d 100644 --- a/common/file_handle/test/disk_file_handle_imp_test.h +++ b/common/file_handle/test/disk_file_handle_imp_test.h @@ -45,8 +45,8 @@ namespace common { class DiskFileHandleImpTest : public ::testing::Test { public: static void SetUpTestCase() { - // file_name_ = "DiskFileHandleImpTest"; - imp_ = FileHandleImpFactory::Instance().CreateFileHandleImp(kDisk); + imp_ = + FileHandleImpFactory::Instance().CreateFileHandleImp(kDisk, file_name_); std::cout << "=============" << std::endl; } static void TearDownTestCase() { DELETE_PTR(imp_); } @@ -55,109 +55,116 @@ class DiskFileHandleImpTest : public ::testing::Test { static FileHandleImp* imp_; static string file_name_; + char* buffer = "abc"; }; -string DiskFileHandleImpTest::file_name_ = "DiskFileHandleImpTest"; +string DiskFileHandleImpTest::file_name_ = + "/home/imdb/data/yk/DiskFileHandleImpTest"; FileHandleImp* DiskFileHandleImpTest::imp_ = NULL; - TEST_F(DiskFileHandleImpTest, TestAccess1) { - bool ret = imp_->CanAccess(file_name_); + int ret = rSuccess; + EXEC_AND_LOG(ret, imp_->DeleteFile(), "deleted file ", + "failed to delete file"); + ret = imp_->CanAccess(file_name_); EXPECT_FALSE(ret); } TEST_F(DiskFileHandleImpTest, TestAccess2) { - imp_->Open(file_name_, kCreateFile); + imp_->Append(buffer, 3); bool ret = imp_->CanAccess(file_name_); EXPECT_TRUE(ret); - int res = imp_->Close(); - EXPECT_EQ(rSuccess, res); +} + +TEST_F(DiskFileHandleImpTest, Delete) { + imp_->OverWrite(buffer, 3); + bool ret = imp_->CanAccess(file_name_); + EXPECT_TRUE(ret); + EXPECT_EQ(rSuccess, imp_->DeleteFile()); + EXPECT_FALSE(imp_->CanAccess(file_name_)); +} + +TEST_F(DiskFileHandleImpTest, DeleteNonExistFile) { + EXPECT_EQ(rSuccess, imp_->DeleteFile()); +} + +TEST_F(DiskFileHandleImpTest, ReadNonExistFile) { + void* data = NULL; + size_t length = 0; + EXPECT_EQ(rSuccess, imp_->DeleteFile()); + EXPECT_EQ(rOpenDiskFileFail, imp_->ReadTotalFile(data, &length)); } TEST_F(DiskFileHandleImpTest, Write) { - imp_->Open(file_name_, kCreateFile); - char* buffer = "abc"; char* data = static_cast(Malloc(4)); int ret = rSuccess; - if (rSuccess != (imp_->Write(buffer, 3)) || - rSuccess != (imp_->SetPosition(0)) || - rSuccess != (ret = imp_->Read(data, 3)) || - rSuccess != (ret = imp_->Close())) { - FAIL(); - } + EXPECT_EQ(rSuccess, imp_->OverWrite(buffer, 3)); + EXPECT_EQ(rSuccess, imp_->PRead(data, 3, 0)); + EXPECT_EQ(rSuccess, imp_->Close()); EXPECT_STREQ("abc", data); } TEST_F(DiskFileHandleImpTest, Append) { - imp_->Open(file_name_, kAppendFile); - char* buffer = "abc"; - char* data = static_cast(Malloc(7)); + void* data = static_cast(Malloc(7)); + uint64_t length; int ret = rSuccess; - if (rSuccess != (imp_->Write(buffer, 3)) || rSuccess != (imp_->Close()) || - rSuccess != (imp_->Open(file_name_, kReadFile)) || - rSuccess != (imp_->SetPosition(0)) || - rSuccess != (ret = imp_->Read(data, 6)) || - rSuccess != (ret = imp_->Close())) { - FAIL(); - } - EXPECT_STREQ("abcabc", data); + EXPECT_EQ(rSuccess, imp_->Append(buffer, 3)); + EXPECT_EQ(rSuccess, imp_->ReadTotalFile(data, &length)); + EXPECT_EQ(rSuccess, imp_->Close()); + EXPECT_STREQ("abcabc", (char*)data); } TEST_F(DiskFileHandleImpTest, Read) { - imp_->Open(file_name_, kReadFile); char* data = static_cast(Malloc(7)); int ret = rSuccess; - if (rSuccess != (ret = imp_->Read(data, 6)) || - rSuccess != (ret = imp_->Close())) { - FAIL(); - } + EXPECT_EQ(rSuccess, imp_->Read(data, 6)); + EXPECT_EQ(rSuccess, imp_->Close()); EXPECT_STREQ("abcabc", data); } TEST_F(DiskFileHandleImpTest, ReadTotalFile) { - imp_->Open(file_name_, kAppendFile); - char* buffer = "abc"; void* data = NULL; size_t a = 0; int ret = rSuccess; - if (rSuccess != (imp_->Write(buffer, 3)) || rSuccess != (imp_->Close()) || - rSuccess != (imp_->Open(file_name_, kReadFile)) || - rSuccess != (ret = imp_->ReadTotalFile(data, &a)) || - rSuccess != (ret = imp_->Close())) { - FAIL(); - } + EXPECT_EQ(rSuccess, imp_->Append(buffer, 3)); + EXPECT_EQ(rSuccess, imp_->ReadTotalFile(data, &a)); + EXPECT_EQ(rSuccess, imp_->Close()); EXPECT_STREQ("abcabcabc", static_cast(data)); } TEST_F(DiskFileHandleImpTest, PositionalRead) { - imp_->Open(file_name_, kReadFile); - char* buffer = "abc"; char* data = static_cast(Malloc(4)); size_t a = 0; int ret = rSuccess; - if (rSuccess != (ret = imp_->SetPosition(5)) || - rSuccess != (ret = imp_->Read(data, 3)) || - rSuccess != (ret = imp_->Close())) { - FAIL(); - } + EXPECT_EQ(rSuccess, imp_->PRead(data, 3, 5)); + EXPECT_EQ(rSuccess, imp_->Close()); EXPECT_STREQ("cab", data); } TEST_F(DiskFileHandleImpTest, OverWrite) { - imp_->Open(file_name_, kCreateFile); - char* buffer = "abc"; void* data = NULL; size_t a = 0; int ret = rSuccess; - if (rSuccess != (imp_->Write(buffer, 3)) || rSuccess != (imp_->Close()) || - rSuccess != (imp_->Open(file_name_, kReadFile)) || - rSuccess != (ret = imp_->ReadTotalFile(data, &a)) || - rSuccess != (ret = imp_->Close())) { - FAIL(); - } + EXPECT_EQ(rSuccess, imp_->OverWrite(buffer, 3)); + EXPECT_EQ(rSuccess, imp_->ReadTotalFile(data, &a)); + EXPECT_EQ(rSuccess, imp_->Close()); EXPECT_STREQ("abc", static_cast(data)); } -} // namespace common +TEST_F(DiskFileHandleImpTest, AppendIntoNonExistFile) { + void* data = NULL; + size_t a = 0; + int ret = rSuccess; + EXPECT_EQ(rSuccess, imp_->DeleteFile()); + EXPECT_EQ(rSuccess, imp_->Append(buffer, 3)); + EXPECT_EQ(rSuccess, imp_->ReadTotalFile(data, &a)); + EXPECT_STREQ("abc", static_cast(data)); +} + +TEST_F(DiskFileHandleImpTest, GetName) { + EXPECT_STREQ(file_name_.c_str(), imp_->get_file_name().c_str()); +} +TEST_F(DiskFileHandleImpTest, AtomicAppend) {} +} // namespace common } // namespace claims #endif // COMMON_FILE_HANDLE_TEST_DISK_FILE_HANDLE_IMP_TEST_H_ diff --git a/common/file_handle/test/hdfs_file_handle_imp_test.h b/common/file_handle/test/hdfs_file_handle_imp_test.h index b4d7072ca..b9f39e0b7 100644 --- a/common/file_handle/test/hdfs_file_handle_imp_test.h +++ b/common/file_handle/test/hdfs_file_handle_imp_test.h @@ -46,7 +46,8 @@ class HdfsFileHandleImpTest : public ::testing::Test { public: static void SetUpTestCase() { Config::getInstance(); - imp_ = FileHandleImpFactory::Instance().CreateFileHandleImp(kHdfs); + imp_ = + FileHandleImpFactory::Instance().CreateFileHandleImp(kHdfs, file_name_); std::cout << "=============" << std::endl; } static void TearDownTestCase() { DELETE_PTR(imp_); } @@ -54,110 +55,113 @@ class HdfsFileHandleImpTest : public ::testing::Test { public: static FileHandleImp* imp_; static string file_name_; + char* buffer = "abc"; }; -string HdfsFileHandleImpTest::file_name_ = "HdfsFileHandleImpTest"; +string HdfsFileHandleImpTest::file_name_ = // NOLINT + "/home/imdb/data/yk/HdfsFileHandleImpTest"; FileHandleImp* HdfsFileHandleImpTest::imp_ = NULL; - TEST_F(HdfsFileHandleImpTest, TestAccess1) { - bool ret = imp_->CanAccess(file_name_); - EXPECT_FALSE(ret); + ASSERT_EQ(rSuccess, imp_->DeleteFile()); + ASSERT_FALSE(imp_->CanAccess(file_name_)); } TEST_F(HdfsFileHandleImpTest, TestAccess2) { - imp_->Open(file_name_, kCreateFile); - bool ret = imp_->CanAccess(file_name_); - EXPECT_TRUE(ret); - int res = imp_->Close(); - EXPECT_EQ(rSuccess, res); + assert(rSuccess == imp_->Append(buffer, 3)); + ASSERT_TRUE(imp_->CanAccess(file_name_)); +} + +TEST_F(HdfsFileHandleImpTest, Delete) { + ASSERT_EQ(rSuccess, imp_->OverWrite(buffer, 3)); + ASSERT_TRUE(imp_->CanAccess(file_name_)); + ASSERT_EQ(rSuccess, imp_->DeleteFile()); + ASSERT_FALSE(imp_->CanAccess(file_name_)); +} + +TEST_F(HdfsFileHandleImpTest, DeleteNonExistFile) { + ASSERT_EQ(rSuccess, imp_->DeleteFile()); +} + +TEST_F(HdfsFileHandleImpTest, ReadNonExistFile) { + void* data = NULL; + size_t length = 0; + ASSERT_EQ(rSuccess, imp_->DeleteFile()); + ASSERT_EQ(rOpenHdfsFileFail, imp_->ReadTotalFile(data, &length)); } TEST_F(HdfsFileHandleImpTest, Write) { - imp_->Open(file_name_, kCreateFile); - char* buffer = "abc"; char* data = static_cast(Malloc(4)); int ret = rSuccess; - if (rSuccess != (imp_->Write(buffer, 3)) || rSuccess != (imp_->Close()) || - rSuccess != (imp_->Open(file_name_, kReadFile)) || - rSuccess != (imp_->SetPosition(0)) || - rSuccess != (ret = imp_->Read(data, 3)) || - rSuccess != (ret = imp_->Close())) { - FAIL(); - } + ASSERT_EQ(rSuccess, imp_->OverWrite(buffer, 3)); + ASSERT_EQ(rSuccess, imp_->PRead(data, 3, 0)); + ASSERT_EQ(rSuccess, imp_->Close()); EXPECT_STREQ("abc", data); } TEST_F(HdfsFileHandleImpTest, Append) { - imp_->Open(file_name_, kAppendFile); - char* buffer = "abc"; - char* data = static_cast(Malloc(7)); + void* data = static_cast(Malloc(7)); + uint64_t length; int ret = rSuccess; - if (rSuccess != (imp_->Write(buffer, 3)) || rSuccess != (imp_->Close()) || - rSuccess != (imp_->Open(file_name_, kReadFile)) || - rSuccess != (imp_->SetPosition(0)) || - rSuccess != (ret = imp_->Read(data, 6)) || - rSuccess != (ret = imp_->Close())) { - FAIL(); - } - EXPECT_STREQ("abcabc", data); + ASSERT_EQ(rSuccess, imp_->Append(buffer, 3)); + ASSERT_EQ(rSuccess, imp_->ReadTotalFile(data, &length)); + ASSERT_EQ(rSuccess, imp_->Close()); + EXPECT_STREQ("abcabc", (char*)data); } TEST_F(HdfsFileHandleImpTest, Read) { - imp_->Open(file_name_, kReadFile); char* data = static_cast(Malloc(7)); int ret = rSuccess; - if (rSuccess != (ret = imp_->Read(data, 6)) || - rSuccess != (ret = imp_->Close())) { - FAIL(); - } + ASSERT_EQ(rSuccess, imp_->Read(data, 6)); + ASSERT_EQ(rSuccess, imp_->Close()); EXPECT_STREQ("abcabc", data); } TEST_F(HdfsFileHandleImpTest, ReadTotalFile) { - imp_->Open(file_name_, kAppendFile); - char* buffer = "abc"; void* data = NULL; size_t a = 0; int ret = rSuccess; - if (rSuccess != (imp_->Write(buffer, 3)) || rSuccess != (imp_->Close()) || - rSuccess != (imp_->Open(file_name_, kReadFile)) || - rSuccess != (ret = imp_->ReadTotalFile(data, &a)) || - rSuccess != (ret = imp_->Close())) { - FAIL(); - } + ASSERT_EQ(rSuccess, imp_->Append(buffer, 3)); + ASSERT_EQ(rSuccess, imp_->ReadTotalFile(data, &a)); + ASSERT_EQ(rSuccess, imp_->Close()); EXPECT_STREQ("abcabcabc", static_cast(data)); } TEST_F(HdfsFileHandleImpTest, PositionalRead) { - imp_->Open(file_name_, kReadFile); - char* buffer = "abc"; char* data = static_cast(Malloc(4)); size_t a = 0; int ret = rSuccess; - if (rSuccess != (ret = imp_->SetPosition(5)) || - rSuccess != (ret = imp_->Read(data, 3)) || - rSuccess != (ret = imp_->Close())) { - FAIL(); - } + ASSERT_EQ(rSuccess, imp_->PRead(data, 3, 5)); + ASSERT_EQ(rSuccess, imp_->Close()); EXPECT_STREQ("cab", data); } TEST_F(HdfsFileHandleImpTest, OverWrite) { - imp_->Open(file_name_, kCreateFile); - char* buffer = "abc"; void* data = NULL; size_t a = 0; int ret = rSuccess; - if (rSuccess != (imp_->Write(buffer, 3)) || rSuccess != (imp_->Close()) || - rSuccess != (imp_->Open(file_name_, kReadFile)) || - rSuccess != (ret = imp_->ReadTotalFile(data, &a)) || - rSuccess != (ret = imp_->Close())) { - FAIL(); - } + ASSERT_EQ(rSuccess, imp_->OverWrite(buffer, 3)); + ASSERT_EQ(rSuccess, imp_->ReadTotalFile(data, &a)); + ASSERT_EQ(rSuccess, imp_->Close()); + EXPECT_STREQ("abc", static_cast(data)); +} + +TEST_F(HdfsFileHandleImpTest, AppendIntoNonExistFile) { + void* data = NULL; + size_t a = 0; + int ret = rSuccess; + ASSERT_EQ(rSuccess, imp_->DeleteFile()); + ASSERT_EQ(rSuccess, imp_->Append(buffer, 3)); + ASSERT_EQ(rSuccess, imp_->ReadTotalFile(data, &a)); EXPECT_STREQ("abc", static_cast(data)); } -} // namespace common +TEST_F(HdfsFileHandleImpTest, GetName) { + EXPECT_STREQ(file_name_.c_str(), imp_->get_file_name().c_str()); +} + +TEST_F(HdfsFileHandleImpTest, AtomicAppend) {} + +} // namespace common } // namespace claims #endif // COMMON_FILE_HANDLE_TEST_HDFS_FILE_HANDLE_IMP_TEST_H_ diff --git a/common/ids.cpp b/common/ids.cpp index 47aab3a62..1973f4f65 100644 --- a/common/ids.cpp +++ b/common/ids.cpp @@ -7,11 +7,15 @@ #include "ids.h" #include "../Config.h" +#include "string.h" std::string PartitionID::getPathAndName() const { - - std::ostringstream str; -// str<<"/home/claims/data/tpc-h/4_partitions/SF-20/T"< @@ -75,6 +76,7 @@ void Register_Block_Stream_Iterator(Archive& ar) { ar.register_type(static_cast(NULL)); ar.register_type(static_cast(NULL)); ar.register_type(static_cast(NULL)); + ar.register_type(static_cast(NULL)); ar.register_type(static_cast(NULL)); ar.register_type(static_cast(NULL)); ar.register_type(static_cast(NULL)); diff --git a/common/types/decimal.cpp b/common/types/decimal.cpp index a7a447de0..c48e53fd3 100644 --- a/common/types/decimal.cpp +++ b/common/types/decimal.cpp @@ -28,6 +28,13 @@ #include "./decimal.h" +#include +#include +#include +#include +#include +#include + #include #include #include @@ -36,450 +43,217 @@ namespace claims { namespace common { -inline string& ltrim(string& ss, char c) { - while (ss.size() > 0 && ss[0] == c) - ss.erase(0, 1); - return ss; -} -inline string& rtrim(string& ss, char c) { - while (ss.size() > 0 && ss[ss.size() - 1] == c) - ss.erase(ss.size() - 1, 1); - return ss; -} -inline string& trim(string& st, char c) { - ltrim(rtrim(st, c), c); - return st; -} - -inline bool isAllDigit(string &ss) { - for (int ii = 0; ii < static_cast(ss.size()); ii++) { - if (!isdigit(ss[ii])) { - return false; - } - } - return true; -} - -inline bool DoSign(string & ss) { - bool isSign = false; - if ((isSign = (ss[0] == '-')) || (ss[0] == '+')) ss.erase(0, 1); - return isSign; -} - // 30 '0' 1000000000000000000000000000000 #define KMAXSCALEFACTOR "1000000000000000000000000000000" -// 45+30 -#define NULLTTINTSTRING "9999999999\ -9999999999\ -9999999999\ -9999999999\ -99999\ -9999999999\ -9999999999\ -9999999999" - -/* - // 42+30 - #define MAXTTINTSTRING "9999999999\ -9999999999\ -9999999999\ -9999999999\ -99\ -9999999999\ -9999999999\ -9999999999" - - // -, 42+30 - #define MINTTINTSTRING "-9999999999\ -9999999999\ -9999999999\ -9999999999\ -99\ -9999999999\ -9999999999\ -9999999999" - */ const TTInt Decimal::kMaxScaleFactor = KMAXSCALEFACTOR; -//const TTInt Decimal::kMaxTTIntValue = MAXTTINTSTRING; -//const TTInt Decimal::kMinTTIntValue = MINTTINTSTRING; Decimal::Decimal() - : precision_(10), scale_(0) { + : decimal_sign_(DECIMAL_POS) { memset(word, 0, sizeof(word)); } Decimal::Decimal(int precision, int scale, string valuestr) - : precision_(precision), scale_(scale) { - // TODO Auto-generated constructor stub - memset(word, 0, sizeof(word)); - + : decimal_sign_(DECIMAL_POS) { + memset(word, 0, sizeof(word)); + StrtoDecimal(precision, scale, valuestr.c_str()); +} - DEBUGOUT("precision:" << precision << ",scale:" << scale); - bool issign = false; - string whole = ""; - string fractinal = ""; - if (StringToDecimal(precision, scale, valuestr, &issign, &whole, &fractinal)) - SetTTInt(issign, whole, fractinal); +Decimal::Decimal(int precision, int scale, const char * valstr) + : decimal_sign_(DECIMAL_POS) { + memset(word, 0, sizeof(word)); + StrtoDecimal(precision, scale, valstr); } Decimal::~Decimal() { // TODO Auto-generated destructor stub } -bool Decimal::StringToDecimal(int p, int s, string strdec, bool * pissign, - string * pwhole, string * pfractinal) { - DecimalString decstr; - if (!StringToDecimal(strdec, decstr)) { - LOG(ERROR) << "Invalid string during convert to decimal:\"" << strdec << "\"." - << endl; - return false; - } - DEBUGOUT("To decimal:--------------------------------------"); - const int psubs = p - s; - if (psubs > CLAIMS_COMMON_DECIMAL_PSUBS || psubs <= 0) { - LOG(ERROR) << "Invalid precision and scale:\"" << p << "\", \"" << s << "\"." - << endl; - return false; - } - string whole_part(decstr.whole_part_); - string fractional_part(decstr.fractional_part_); - - DEBUGOUT("1 whole_part:" << whole_part); - DEBUGOUT("2 fractional_part:" << fractional_part); - - if (decstr.e_power_ != "") { - int e_int_power = atoi(decstr.e_power_.c_str()); - DEBUGOUT("epower:" << e_int_power); - if (decstr.e_sign_) // - - { - if ((int) whole_part.size() - e_int_power > psubs) { - LOG(ERROR) << "Too Large Decimal value nearly:\"" << whole_part - << "\" and \"-" << decstr.e_power_ << "\"." << endl; - return false; - } - if (e_int_power <= (int) whole_part.size()) { - fractional_part.insert( - 0, whole_part.substr(whole_part.size() - e_int_power, e_int_power)); - whole_part.erase(whole_part.size() - e_int_power, e_int_power); - } - else { - fractional_part.insert(0, whole_part); - for (unsigned int ii = 0; ii < e_int_power - whole_part.size(); ii++) { - fractional_part.insert(0, "0"); - } - whole_part = ""; - } - } - else //+ - { - if (e_int_power <= (int) fractional_part.size()) { - whole_part.append(fractional_part.substr(0, e_int_power)); - fractional_part.erase(0, e_int_power); - } - else { - whole_part.append(fractional_part); - for (unsigned int ii = 0; ii < e_int_power - fractional_part.size(); - ii++) { - whole_part.append("0"); - } - fractional_part = ""; - } - - } - } - - ltrim(whole_part, '0'); - - DEBUGOUT("3 whole_part:" << whole_part); - DEBUGOUT("4 whole_part size:" << whole_part.size()); - DEBUGOUT("5 fractional_part:" << fractional_part); - DEBUGOUT("6 fractional_part size:" << fractional_part.size()); - - if ((int) whole_part.size() > psubs) { - LOG(ERROR) << "Too Large Decimal value nearly:\"" << strdec << "\"." << endl; - return false; - } - - DEBUGOUT("7 fractional_part:" << fractional_part); - - if ((NULL != pissign) && (NULL != pwhole) && (NULL != pfractinal)) { - *pissign = decstr.is_sign_; - *pwhole = whole_part; - *pfractinal = fractional_part; - } - - return true; -} - -bool Decimal::StringToDecimal(string strdec, DecimalString & decstr) { - bool is_sign = false; - size_t epos = string::npos; - size_t dot_pos = string::npos; - string numstr = ""; - string whole_part = ""; - string fractional_part = ""; - string e_str_power = ""; - bool e_sign = false; - - DEBUGOUT("enter strdec..........................................."); - DEBUGOUT("1:" + strdec); - trim(strdec, ' '); - DEBUGOUT("2:" + strdec); - - // true is negative, false is postive - is_sign = DoSign(strdec); - DEBUGOUT("3:" + strdec); - epos = ((epos = strdec.find('e')) == string::npos) ? strdec.find('E') : epos; - DEBUGOUT("epos:" << epos); - - numstr = strdec.substr(0, epos); - DEBUGOUT("4:" + numstr); - dot_pos = numstr.find('.', 0); - - whole_part = numstr.substr(0, dot_pos); - ltrim(whole_part, '0'); - if (!isAllDigit(whole_part)) { - LOG(ERROR) << "Invalid characters in decimal whole part:\"" << whole_part << "\"." - << endl; - return false; - } - if (dot_pos != string::npos) { - fractional_part = numstr.substr(dot_pos + 1, numstr.size() - (dot_pos + 1)); - // rtrim(fractional_part, '0'); - if (!isAllDigit(fractional_part)) { - LOG(ERROR) << "Invalid characters in decimal fractional part:\"" - << fractional_part << "\"." << endl; - return false; - } - } - - DEBUGOUT("5:" + whole_part); - DEBUGOUT("6:" + fractional_part); - - if (epos != string::npos) { - e_str_power = strdec.substr(epos + 1, strdec.size() - (epos + 1)); - DEBUGOUT("e_power:" + e_str_power); - e_sign = DoSign(e_str_power); - ltrim(e_str_power, '0'); - if (!isAllDigit(e_str_power)) { - LOG(ERROR) << "Invalid ePower:\"" << e_str_power << "\"." << endl; - return false; - } - DEBUGOUT("7:" + e_str_power); - } - - if (NULL != &decstr) { - decstr.is_sign_ = is_sign; - decstr.whole_part_ = whole_part; - decstr.fractional_part_ = fractional_part; - decstr.e_sign_ = e_sign; - decstr.e_power_ = e_str_power; - } - - return true; -} - -void Decimal::SetTTInt(bool issign, string wholestr, string fractinalstr) { - string whole_part(wholestr); - string fractional_part(fractinalstr); - - string digitend = "0"; - string num1 = "1"; - - /* Invailed scale. convert to Decimal during calculate, Set scale by num self */ - if(this->scale_ < 0) - { - this->SetScale( MINVAL(Decimal::kMaxDecScale, (int)fractional_part.size())); - } - - DEBUGOUT("this->scale_:" << this->scale_); - DEBUGOUT(fractional_part.size() << ":" << fractional_part); - - if(this->scale_ < (int)fractional_part.size()) - { - digitend = fractional_part[this->scale_]; - fractional_part.erase(this->scale_, - (int)fractional_part.size() - (this->scale_)); - } - - while (fractional_part.size() < Decimal::kMaxDecScale) { - fractional_part.push_back('0'); - num1.push_back('0'); - } - - TTInt whole(whole_part); - TTInt fractional(fractional_part); - whole *= Decimal::kMaxScaleFactor; - whole += fractional; - if(digitend[0]-'5'>=0) - { - whole += TTInt(num1); - } - if (issign) { - whole.SetSign(); - } - - SetTTInt(whole); -} - -TTInt Decimal::Round(unsigned num) const +bool Decimal::StrtoDecimal(int p, int s, const char *cp) { - TTInt out_value = this->word[0]; - if(num >= 30) return out_value; - string sfrafive = "5"; - while(Decimal::kMaxDecScale - (int)num- (int)sfrafive.size() > 0 ) - sfrafive.append("0"); - //DEBUGOUT("sfrafive: " << sfrafive); - TTInt frafive_value(sfrafive.c_str()); - if(out_value.IsSign()) - out_value -= frafive_value; - else - out_value += frafive_value; - return out_value; + bool have_dp = false; + int i = 0; + char decdigits[DECIMAL_MAXPRCISION+1]; + char num1[31]; + int dsign = DECIMAL_POS; + int dweight = -1; + const char *str = cp; + + memset(decdigits, '0', DECIMAL_MAXPRCISION+1); + num1[0] = '1'; + memset(num1+1, '0', 30); + + while (isspace(*cp)) + cp++; + //printf("source :%s\n",cp); + switch (*cp) + { + case '+': + cp++; + break; + case '-': + dsign = DECIMAL_NEG; + cp++; + break; + } + + while (*cp) + { + if (isdigit((unsigned char) *cp)) + { + decdigits[i++] = *cp++; + if (!have_dp) + dweight++; + } + else if (*cp == '.') + { + if (have_dp) + { + printf("invalid input syntax for type numeric: \"%s\"\n", str); + return false; + } + have_dp = true; + cp++; + } + else + break; + } + if (*cp == 'e' || *cp == 'E') + { + long exponent; + char *endptr; + cp++; + exponent = strtol(cp, &endptr, 10); + if (endptr == cp) + { + printf("invalid input syntax for type numeric: \"%s\"\n", str); + return false; + } + cp = endptr; + dweight += (int) exponent; + } + if (dweight > p - s ) + { + printf("invalid input syntax for type numeric: \"%s\"\n", str); + return false; + } + while (*cp) + { + if (isspace((unsigned char) *cp)) + { + cp++; + } + else + { + printf("invalid input syntax for type numeric: \"%s\"\n", str); + return false; + } + } + + decdigits[DECIMAL_MAXSCALE + dweight + 1] = 0; + char c = decdigits[s + dweight + 1]; + memset(decdigits + s + dweight + 1, '0', DECIMAL_MAXSCALE - s); + TTInt whole(decdigits); + + num1[30 - s + 1] = 0; + TTInt fra(num1); + + if (DECIMAL_NEG == dsign) + { + fra.SetSign(); + whole.SetSign(); + } + SetTTInt((c>='5'?whole+fra:whole)); + return true; } -string Decimal::ToString(unsigned number_of_fractinal_digits) const { +string Decimal::toString(unsigned number_of_fractinal_digits) const { if (isNull()) return "NULL"; - assert(number_of_fractinal_digits <= Decimal::kMaxDecScale); string ress = ""; - TTInt rest = Round(number_of_fractinal_digits); + TTInt rest = this->word[0]; rest.ToString(ress); - if (rest.IsSign()) ress.erase(0, 1); - while ((Decimal::kMaxDecScale - (int) ress.length()) >= 0) - ress.insert(0, "0"); + int sign = 0; + if (rest.IsSign()) sign = 1; + while ((Decimal::kMaxDecScale + sign - (int) ress.length()) >= 0) + ress.insert(sign, "0"); if(number_of_fractinal_digits > 0) ress.insert(ress.length() - Decimal::kMaxDecScale, "."); ress.erase(ress.size() - Decimal::kMaxDecScale + number_of_fractinal_digits, Decimal::kMaxDecScale - number_of_fractinal_digits); - if (rest.IsSign()) ress.insert(0, "-"); return ress; } Decimal Decimal::CreateNullDecimal() { - Decimal NDecimal; - TTInt NTTInt(NULLTTINTSTRING); - NDecimal.SetTTInt(NTTInt); + static Decimal NDecimal; + const_cast(NDecimal.decimal_sign_) = DECIMAL_NAN; return NDecimal; } bool Decimal::isNull() const { - TTInt NTTInt(NULLTTINTSTRING); - return NTTInt == this->GetTTInt(); + return decimal_sign_ == DECIMAL_NAN; } Decimal & Decimal::operator=(const Decimal &rhs) { if (this == &rhs) return *this; this->word[0] = rhs.GetTTInt(); - const_cast(this->precision_) = rhs.precision_; - const_cast(this->scale_) = rhs.scale_; + const_cast(this->decimal_sign_) = rhs.decimal_sign_; + //PrintValue(10); return *this; } void Decimal::PrintValue(int ifra) { - cout << "value : [:" << this->scale_ << ":],"<<"[:"<< this->word[0] <<":][" <word[0].ToString().c_str(), toString(ifra).c_str()); } ///////////////////////////////////////////////////////////////////// Decimal Decimal::op_add(const Decimal rhs) const { - if ((this->isNull()) && (!rhs.isNull())) { - return rhs; - } - if ((!this->isNull()) && (rhs.isNull())) { - return *this; - } - if ((this->isNull()) && (rhs.isNull())) { - return Decimal::CreateNullDecimal(); - } TTInt rett; rett = this->GetTTInt(); rett.Add(rhs.GetTTInt()); Decimal ret; - ret.SetPrecsion(CLAIMS_COMMON_DECIMAL_PSUBS+MAXVAL(this->scale_, rhs.scale_)); - DEBUGOUT("+,this->scale_:" << this->scale_); - DEBUGOUT("+,rhs.scale_:" << rhs.scale_); - ret.SetScale(MAXVAL(this->scale_, rhs.scale_)); ret.SetTTInt(rett); return ret; } Decimal Decimal::op_subtract(const Decimal rhs) const { - if ((this->isNull()) && (!rhs.isNull())) { - return rhs; - } - if ((!this->isNull()) && (rhs.isNull())) { - return *this; - } - if ((this->isNull()) && (rhs.isNull())) { - return Decimal(1, 0, "0"); - } TTInt rett; rett = this->GetTTInt(); rett.Sub(rhs.GetTTInt()); Decimal ret; - ret.SetPrecsion(CLAIMS_COMMON_DECIMAL_PSUBS+MAXVAL(this->scale_, rhs.scale_)); - DEBUGOUT("-,this->scale_:" << this->scale_); - DEBUGOUT("-,rhs.scale_:" << rhs.scale_); - ret.SetScale(MAXVAL(this->scale_, rhs.scale_)); ret.SetTTInt(rett); return ret; } Decimal Decimal::op_multiply(const Decimal rhs) const { - if ((this->isNull()) && (!rhs.isNull())) { - return rhs; - } - if ((!this->isNull()) && (rhs.isNull())) { - return *this; - } - if ((this->isNull()) && (rhs.isNull())) { - return Decimal::CreateNullDecimal(); - } + TTLInt rett; rett = this->GetTTInt(); rett *= rhs.GetTTInt(); rett /= Decimal::kMaxScaleFactor; Decimal ret; - ret.SetPrecsion(CLAIMS_COMMON_DECIMAL_PSUBS+MINVAL(this->scale_+ rhs.scale_, CLAIMS_COMMON_DECIMAL_MAXSCALE)); - DEBUGOUT("x,this->scale_:" << this->scale_); - DEBUGOUT("x,rhs.scale_:" << rhs.scale_); - ret.SetScale(MINVAL(this->scale_+ rhs.scale_, CLAIMS_COMMON_DECIMAL_MAXSCALE)); ret.SetTTInt(rett); return ret; } Decimal Decimal::op_divide(const Decimal rhs) const { - TTInt zero("0"); - if (rhs.isNull() || zero == rhs.GetTTInt() || this->isNull()) { - return Decimal::CreateNullDecimal(); - } - + TTLInt rett; rett = this->GetTTInt(); rett *= Decimal::kMaxScaleFactor; rett /= rhs.GetTTInt(); + + Decimal ret; + ret.SetTTInt(rett); - string ress; - rett.ToString(ress); - if (rett.IsSign()) ress.erase(0, 1); - while ((Decimal::kMaxDecScale - (int)ress.length()) >= 0) ress.insert(0, "0"); - ress.insert(ress.length() - Decimal::kMaxDecScale, "."); - if (rett.IsSign()) ress.insert(0, "-"); - int counttail = ress.length(); - while(ress[--counttail]=='0'); - DEBUGOUT("/:" << counttail << "," << ress); - int scale = MAXVAL(this->scale_, rhs.scale_); - scale = MAXVAL(scale, (kMaxDecScale - ((int)ress.length() - (counttail+1)))); - Decimal ret(CLAIMS_COMMON_DECIMAL_PSUBS + scale, scale, ress); return ret; } diff --git a/common/types/decimal.h b/common/types/decimal.h index 353f32694..19d79f442 100644 --- a/common/types/decimal.h +++ b/common/types/decimal.h @@ -36,20 +36,23 @@ using namespace std; namespace claims { namespace common { -#define CLAIMS_COMMON_DECIMAL_TTSIZE 4 -#define CLAIMS_COMMON_DECIMAL_TTLSIZE 8 -#define CLAIMS_COMMON_DECIMAL_MAXSCALE 30 -#define CLAIMS_COMMON_DECIMAL_MAXPRCISION 72 -#define CLAIMS_COMMON_DECIMAL_PSUBS \ - (CLAIMS_COMMON_DECIMAL_MAXPRCISION - CLAIMS_COMMON_DECIMAL_MAXSCALE) +#define DECIMAL_TTSIZE 4 +#define DECIMAL_TTLSIZE 8 +#define DECIMAL_MAXSCALE 30 +#define DECIMAL_MAXPRCISION 72 +#define DECIMAL_PSUBS \ + (DECIMAL_MAXPRCISION - DECIMAL_MAXSCALE) #define NWORDS 1 +#define DECIMAL_POS 0x00 +#define DECIMAL_NEG 0x01 +#define DECIMAL_NAN 0x02 // The int used for storage and return values -typedef ttmath::Int TTInt; +typedef ttmath::Int TTInt; // Long integer with space for multiplication and division without // carry/overflow -typedef ttmath::Int TTLInt; +typedef ttmath::Int TTLInt; //#define DECIM_DEBUG @@ -62,86 +65,18 @@ typedef ttmath::Int TTLInt; #define DEBUGOUT(A) #endif -#define MAXVAL(A,B) ((A)>(B)?(A):(B)) -#define MINVAL(A,B) ((A)<(B)?(A):(B)) - -/**----------------------------------------- - e/E - / \ - . esign - / \ \ - / \ ePower - whole fractinal - / - issign - -------------------------------------------- - -123.456e-7 - =>{ - is_sign = true; - whole="123"; - fractinal = "456"; - esign = true; - ePower = "7"; - } - */ -class DecimalString { - public: - DecimalString(bool isSign = false, string wholePart = "", - string fractinalPart = "", bool eSign = false, - string ePower = "") - : is_sign_(isSign), - whole_part_(wholePart), - fractional_part_(fractinalPart), - e_sign_(eSign), - e_power_(ePower) {} - - DecimalString& operator=(const DecimalString& rhs) { - if (this == &rhs) return *this; - this->is_sign_ = rhs.is_sign_; - this->whole_part_ = rhs.whole_part_; - this->fractional_part_ = rhs.fractional_part_; - this->e_sign_ = rhs.e_sign_; - this->e_power_ = rhs.e_power_; - return *this; - } - - void PrintValue() { - cout << setw(20) << "is_sign_:[" << is_sign_ << "]" << endl; - cout << setw(20) << "whole_part_:[" << whole_part_ << "]" << endl; - cout << setw(20) << "fractional_part_:[" << fractional_part_ << "]" << endl; - cout << setw(20) << "e_sign_:[" << e_sign_ << "]" << endl; - cout << setw(20) << "e_power_:[" << e_power_ << "]" << endl; - } - - void clear() { - is_sign_ = false; - whole_part_ = ""; - fractional_part_ = ""; - e_sign_ = false; - e_power_ = ""; - } - - public: - bool is_sign_; - string whole_part_; - string fractional_part_; - bool e_sign_; - string e_power_; -}; /* * */ class Decimal { public: - Decimal(); + Decimal(int precision, int scale, string valstr); + Decimal(int precision, int scale, const char * valstr); virtual ~Decimal(); - static bool StringToDecimal(int p, int s, string strdec, bool* pissign = NULL, - string* pwhole = NULL, string* pfractinal = NULL); - static bool StringToDecimal(string strdec, DecimalString& decstr); - string ToString(unsigned number_of_fractinal_digits = - CLAIMS_COMMON_DECIMAL_MAXSCALE) const; + bool StrtoDecimal(int p, int s, const char *cp); + string toString(unsigned number_of_fractinal_digits) const; static Decimal CreateNullDecimal(); bool isNull() const; @@ -163,10 +98,6 @@ class Decimal { Decimal& operator=(const Decimal& rhs); - void SetPrecsion(int p){ const_cast(precision_) = p; } - void SetScale(int s){ const_cast(scale_) = s; } - int GetScale(){return scale_;} - void PrintValue(int ifra); const TTInt& GetTTInt() const { @@ -175,20 +106,18 @@ class Decimal { } private: + Decimal(); void SetTTInt(TTInt value) { this->word[0] = value; } - void SetTTInt(bool issign, string whole, string fractinal); - TTInt Round(unsigned num) const; private: static const TTInt kMaxScaleFactor; - static const int kMaxDecScale = CLAIMS_COMMON_DECIMAL_MAXSCALE; + static const int kMaxDecScale = DECIMAL_MAXSCALE; - const int precision_; - const int scale_; + // Mark decimal NULL or not + const char decimal_sign_; TTInt word[NWORDS]; }; - inline int Decimal::compare(const Decimal rhs) const { const TTInt l = this->GetTTInt(); const TTInt r = rhs.GetTTInt(); diff --git a/conf/config b/conf/config index 665548de3..e815823a5 100755 --- a/conf/config +++ b/conf/config @@ -1,24 +1,24 @@ #本机IP地址 -ip = "127.0.0.1"; +ip = "219.228.147.162"; #端口范围(调试用) PortManager: { - start = 19000; - end = 19500; + start = 27000; + end = 27500; } #master的IP地址和端口 coordinator: { - ip="127.0.0.1" - port="11001" + ip="219.228.147.162" + port="12012" } #是否为master. 若为master,则master=1,否则master=0 master=1 -client_listener_port = 10000 +client_listener_port = 10012 #hadoop上的数据目录 @@ -27,20 +27,25 @@ client_listener_port = 10000 #data="/home/imdb/data/wangli/" #data="/home/imdb/data/POC/sample/" #data="/home/minqi/git/Data/data/tpc-h/1-partition/sf-1/" -data="/home/minqi/git/Data/data/tpc-h/18-partition/sf-1/" +#data="/home/minqi/git/Data/data/tpc-h/18-partition/sf-1/" #data="/home/imdb/data/SF-1/" #data="/home/imdb/data/SF-1/" #data="/home/imdb/data/stock/" #data="/home/imdb/data/stock/" - - +#data="/home/zzh/data/1partition/" +#data="/claimsdata/" +#data="/home/zzh/data/sf-1-p4/" + +#data = "/home/claims/hcs/data/" +data = "/home/imdb/data/" +#data = "/test/claims/" #data="/home/fish/data/test/" #data="/home/imdb/data/POC/" #data="/home/imdb/data/POC/" #hdfs主节点 -hdfs_master_ip="127.0.0.1" +hdfs_master_ip="219.228.147.162" #hdfs主节点端口 hdfs_master_port=9000 diff --git a/configure.ac b/configure.ac index 4222ea980..266f1411d 100644 --- a/configure.ac +++ b/configure.ac @@ -4,7 +4,7 @@ AM_PROG_AR AC_PROG_LIBTOOL CPPFLAGS="-w -O2 -DTHERON_XS -D__STDC_CONSTANT_MACROS -D__STDC_LIMIT_MACROS" AM_CONDITIONAL(OPT_TCMALLOC, true) -CXXFLAGS=${CXXFLAGS="-g -std=c++11"} +CXXFLAGS=${CXXFLAGS="-g -std=c++11"} AC_PROG_CXX AC_CONFIG_HEADERS([config.h]) AC_CONFIG_FILES([ @@ -27,6 +27,7 @@ AC_CONFIG_FILES([ common/types/Makefile common/types/Test/Makefile common/types/ttmath/Makefile + node_manager/Makefile Daemon/Makefile Executor/Makefile Executor/Test/Makefile @@ -39,6 +40,7 @@ AC_CONFIG_FILES([ logical_operator/Makefile Resource/Makefile storage/Makefile + exec_tracker/Makefile sql_parser/Makefile sql_parser/ast_node/Makefile sql_parser/parser/Makefile diff --git a/doc/html/graph_legend.html b/doc/html/graph_legend.html new file mode 100644 index 000000000..e8897b691 --- /dev/null +++ b/doc/html/graph_legend.html @@ -0,0 +1,174 @@ + + + + + + +My Project: Graph Legend + + + + + + + + + + + + + +
+
+ + + + + + +
+
My Project +
+
+
+ + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
Graph Legend
+
+
+

This page explains how to interpret the graphs that are generated by doxygen.

+

Consider the following example:

+
/*! Invisible class because of truncation */
+
class Invisible { };
+
+
/*! Truncated class, inheritance relation is hidden */
+
class Truncated : public Invisible { };
+
+
/* Class not documented with doxygen comments */
+
class Undocumented { };
+
+
/*! Class that is inherited using public inheritance */
+
class PublicBase : public Truncated { };
+
+
/*! A template class */
+
template<class T> class Templ { };
+
+
/*! Class that is inherited using protected inheritance */
+
class ProtectedBase { };
+
+
/*! Class that is inherited using private inheritance */
+
class PrivateBase { };
+
+
/*! Class that is used by the Inherited class */
+
class Used { };
+
+
/*! Super class that inherits a number of other classes */
+
class Inherited : public PublicBase,
+
protected ProtectedBase,
+
private PrivateBase,
+
public Undocumented,
+
public Templ<int>
+
{
+
private:
+
Used *m_usedClass;
+
};
+

This will result in the following graph:

+
+ +
+

The boxes in the above graph have the following meaning:

+
    +
  • +A filled gray box represents the struct or class for which the graph is generated.
  • +
  • +A box with a black border denotes a documented struct or class.
  • +
  • +A box with a grey border denotes an undocumented struct or class.
  • +
  • +A box with a red border denotes a documented struct or class forwhich not all inheritance/containment relations are shown. A graph is truncated if it does not fit within the specified boundaries.
  • +
+

The arrows have the following meaning:

+
    +
  • +A dark blue arrow is used to visualize a public inheritance relation between two classes.
  • +
  • +A dark green arrow is used for protected inheritance.
  • +
  • +A dark red arrow is used for private inheritance.
  • +
  • +A purple dashed arrow is used if a class is contained or used by another class. The arrow is labeled with the variable(s) through which the pointed class or struct is accessible.
  • +
  • +A yellow dashed arrow denotes a relation between a template instance and the template class it was instantiated from. The arrow is labeled with the template parameters of the instance.
  • +
+
+
+ + + + diff --git a/doc/html/graph_legend.md5 b/doc/html/graph_legend.md5 new file mode 100644 index 000000000..a06ed050c --- /dev/null +++ b/doc/html/graph_legend.md5 @@ -0,0 +1 @@ +387ff8eb65306fa251338d3c9bd7bfff \ No newline at end of file diff --git a/doc/html/graph_legend.png b/doc/html/graph_legend.png new file mode 100644 index 000000000..573ebdcaa Binary files /dev/null and b/doc/html/graph_legend.png differ diff --git a/doc/latex/doxygen.sty b/doc/latex/doxygen.sty new file mode 100644 index 000000000..199abf8d5 --- /dev/null +++ b/doc/latex/doxygen.sty @@ -0,0 +1,464 @@ +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{doxygen} + +% Packages used by this style file +\RequirePackage{alltt} +\RequirePackage{array} +\RequirePackage{calc} +\RequirePackage{float} +\RequirePackage{ifthen} +\RequirePackage{verbatim} +\RequirePackage[table]{xcolor} +\RequirePackage{xtab} + +%---------- Internal commands used in this style file ---------------- + +\newcommand{\ensurespace}[1]{% + \begingroup% + \setlength{\dimen@}{#1}% + \vskip\z@\@plus\dimen@% + \penalty -100\vskip\z@\@plus -\dimen@% + \vskip\dimen@% + \penalty 9999% + \vskip -\dimen@% + \vskip\z@skip% hide the previous |\vskip| from |\addvspace| + \endgroup% +} + +\newcommand{\DoxyLabelFont}{} +\newcommand{\entrylabel}[1]{% + {% + \parbox[b]{\labelwidth-4pt}{% + \makebox[0pt][l]{\DoxyLabelFont#1}% + \vspace{1.5\baselineskip}% + }% + }% +} + +\newenvironment{DoxyDesc}[1]{% + \ensurespace{4\baselineskip}% + \begin{list}{}{% + \settowidth{\labelwidth}{20pt}% + \setlength{\parsep}{0pt}% + \setlength{\itemsep}{0pt}% + \setlength{\leftmargin}{\labelwidth+\labelsep}% + \renewcommand{\makelabel}{\entrylabel}% + }% + \item[#1]% +}{% + \end{list}% +} + +\newsavebox{\xrefbox} +\newlength{\xreflength} +\newcommand{\xreflabel}[1]{% + \sbox{\xrefbox}{#1}% + \setlength{\xreflength}{\wd\xrefbox}% + \ifthenelse{\xreflength>\labelwidth}{% + \begin{minipage}{\textwidth}% + \setlength{\parindent}{0pt}% + \hangindent=15pt\bfseries #1\vspace{1.2\itemsep}% + \end{minipage}% + }{% + \parbox[b]{\labelwidth}{\makebox[0pt][l]{\textbf{#1}}}% + }% +} + +%---------- Commands used by doxygen LaTeX output generator ---------- + +% Used by
 ... 
+\newenvironment{DoxyPre}{% + \small% + \begin{alltt}% +}{% + \end{alltt}% + \normalsize% +} + +% Used by @code ... @endcode +\newenvironment{DoxyCode}{% + \par% + \scriptsize% + \begin{alltt}% +}{% + \end{alltt}% + \normalsize% +} + +% Used by @example, @include, @includelineno and @dontinclude +\newenvironment{DoxyCodeInclude}{% + \DoxyCode% +}{% + \endDoxyCode% +} + +% Used by @verbatim ... @endverbatim +\newenvironment{DoxyVerb}{% + \footnotesize% + \verbatim% +}{% + \endverbatim% + \normalsize% +} + +% Used by @verbinclude +\newenvironment{DoxyVerbInclude}{% + \DoxyVerb% +}{% + \endDoxyVerb% +} + +% Used by numbered lists (using '-#' or
    ...
) +\newenvironment{DoxyEnumerate}{% + \enumerate% +}{% + \endenumerate% +} + +% Used by bullet lists (using '-', @li, @arg, or
    ...
) +\newenvironment{DoxyItemize}{% + \itemize% +}{% + \enditemize% +} + +% Used by description lists (using
...
) +\newenvironment{DoxyDescription}{% + \description% +}{% + \enddescription% +} + +% Used by @image, @dotfile, @dot ... @enddot, and @msc ... @endmsc +% (only if caption is specified) +\newenvironment{DoxyImage}{% + \begin{figure}[H]% + \begin{center}% +}{% + \end{center}% + \end{figure}% +} + +% Used by @image, @dotfile, @dot ... @enddot, and @msc ... @endmsc +% (only if no caption is specified) +\newenvironment{DoxyImageNoCaption}{% +}{% +} + +% Used by @attention +\newenvironment{DoxyAttention}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @author and @authors +\newenvironment{DoxyAuthor}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @date +\newenvironment{DoxyDate}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @invariant +\newenvironment{DoxyInvariant}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @note +\newenvironment{DoxyNote}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @post +\newenvironment{DoxyPostcond}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @pre +\newenvironment{DoxyPrecond}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @copyright +\newenvironment{DoxyCopyright}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @remark +\newenvironment{DoxyRemark}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @return and @returns +\newenvironment{DoxyReturn}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @since +\newenvironment{DoxySince}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @see +\newenvironment{DoxySeeAlso}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @version +\newenvironment{DoxyVersion}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @warning +\newenvironment{DoxyWarning}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @internal +\newenvironment{DoxyInternal}[1]{% + \paragraph*{#1}% +}{% +} + +% Used by @par and @paragraph +\newenvironment{DoxyParagraph}[1]{% + \begin{list}{}{% + \settowidth{\labelwidth}{40pt}% + \setlength{\leftmargin}{\labelwidth}% + \setlength{\parsep}{0pt}% + \setlength{\itemsep}{-4pt}% + \renewcommand{\makelabel}{\entrylabel}% + }% + \item[#1]% +}{% + \end{list}% +} + +% Used by parameter lists +\newenvironment{DoxyParams}[2][]{% + \par% + \tabletail{\hline}% + \tablelasttail{\hline}% + \tablefirsthead{}% + \tablehead{}% + \ifthenelse{\equal{#1}{}}% + {\tablefirsthead{\multicolumn{2}{l}{\hspace{-6pt}\bfseries\fontseries{bc}\selectfont\color{darkgray} #2}\\[1ex]}% + \begin{xtabular}{|>{\raggedleft\hspace{0pt}}p{0.15\textwidth}|% + p{0.805\textwidth}|}}% + {\ifthenelse{\equal{#1}{1}}% + {\tablefirsthead{\multicolumn{2}{l}{\hspace{-6pt}\bfseries\fontseries{bc}\selectfont\color{darkgray} #2}\\[1ex]}% + \begin{xtabular}{|>{\centering}p{0.10\textwidth}|% + >{\raggedleft\hspace{0pt}}p{0.15\textwidth}|% + p{0.678\textwidth}|}}% + {\tablefirsthead{\multicolumn{2}{l}{\hspace{-6pt}\bfseries\fontseries{bc}\selectfont\color{darkgray} #2}\\[1ex]}% + \begin{xtabular}{|>{\centering}p{0.10\textwidth}|% + >{\centering\hspace{0pt}}p{0.15\textwidth}|% + >{\raggedleft\hspace{0pt}}p{0.15\textwidth}|% + p{0.501\textwidth}|}}% + }\hline% +}{% + \end{xtabular}% + \tablefirsthead{}% + \vspace{6pt}% +} + +% Used for fields of simple structs +\newenvironment{DoxyFields}[1]{% + \par% + \tabletail{\hline}% + \tablelasttail{\hline}% + \tablehead{}% + \tablefirsthead{\multicolumn{2}{l}{\hspace{-6pt}\bfseries\fontseries{bc}\selectfont\color{darkgray} #1}\\[1ex]}% + \begin{xtabular}{|>{\raggedleft\hspace{0pt}}p{0.15\textwidth}|% + p{0.15\textwidth}|% + p{0.63\textwidth}|}% + \hline% +}{% + \end{xtabular}% + \tablefirsthead{}% + \vspace{6pt}% +} + +% Used for parameters within a detailed function description +\newenvironment{DoxyParamCaption}{% + \renewcommand{\item}[2][]{##1 {\em ##2}}% +}{% +} + +% Used by return value lists +\newenvironment{DoxyRetVals}[1]{% + \par% + \tabletail{\hline}% + \tablelasttail{\hline}% + \tablehead{}% + \tablefirsthead{\multicolumn{2}{l}{\hspace{-6pt}\bfseries\fontseries{bc}\selectfont\color{darkgray} #1}\\[1ex]}% + \begin{xtabular}{|>{\raggedleft\hspace{0pt}}p{0.25\textwidth}|% + p{0.705\textwidth}|}% + \hline% +}{% + \end{xtabular}% + \tablefirsthead{}% + \vspace{6pt}% +} + +% Used by exception lists +\newenvironment{DoxyExceptions}[1]{% + \par% + \tabletail{\hline}% + \tablelasttail{\hline}% + \tablehead{}% + \tablefirsthead{\multicolumn{2}{l}{\hspace{-6pt}\bfseries\fontseries{bc}\selectfont\color{darkgray} #1}\\[1ex]}% + \begin{xtabular}{|>{\raggedleft\hspace{0pt}}p{0.25\textwidth}|% + p{0.705\textwidth}|}% + \hline% +}{% + \end{xtabular}% + \tablefirsthead{}% + \vspace{6pt}% +} + +% Used by template parameter lists +\newenvironment{DoxyTemplParams}[1]{% + \par% + \tabletail{\hline}% + \tablelasttail{\hline}% + \tablehead{}% + \tablefirsthead{\multicolumn{2}{l}{\hspace{-6pt}\bfseries\fontseries{bc}\selectfont\color{darkgray} #1}\\[1ex]}% + \begin{xtabular}{|>{\raggedleft\hspace{0pt}}p{0.25\textwidth}|% + p{0.705\textwidth}|}% + \hline% +}{% + \end{xtabular}% + \tablefirsthead{}% + \vspace{6pt}% +} + +% Used for member lists +\newenvironment{DoxyCompactItemize}{% + \begin{itemize}% + \setlength{\itemsep}{-3pt}% + \setlength{\parsep}{0pt}% + \setlength{\topsep}{0pt}% + \setlength{\partopsep}{0pt}% +}{% + \end{itemize}% +} + +% Used for member descriptions +\newenvironment{DoxyCompactList}{% + \begin{list}{}{% + \setlength{\leftmargin}{0.5cm}% + \setlength{\itemsep}{0pt}% + \setlength{\parsep}{0pt}% + \setlength{\topsep}{0pt}% + \renewcommand{\makelabel}{\hfill}% + }% +}{% + \end{list}% +} + +% Used for reference lists (@bug, @deprecated, @todo, etc.) +\newenvironment{DoxyRefList}{% + \begin{list}{}{% + \setlength{\labelwidth}{10pt}% + \setlength{\leftmargin}{\labelwidth}% + \addtolength{\leftmargin}{\labelsep}% + \renewcommand{\makelabel}{\xreflabel}% + }% +}{% + \end{list}% +} + +% Used by @bug, @deprecated, @todo, etc. +\newenvironment{DoxyRefDesc}[1]{% + \begin{list}{}{% + \renewcommand\makelabel[1]{\textbf{##1}}% + \settowidth\labelwidth{\makelabel{#1}}% + \setlength\leftmargin{\labelwidth+\labelsep}% + }% +}{% + \end{list}% +} + +% Used by parameter lists and simple sections +\newenvironment{Desc} +{\begin{list}{}{% + \settowidth{\labelwidth}{40pt}% + \setlength{\leftmargin}{\labelwidth}% + \setlength{\parsep}{0pt}% + \setlength{\itemsep}{-4pt}% + \renewcommand{\makelabel}{\entrylabel}% + } +}{% + \end{list}% +} + +% Used by tables +\newcommand{\PBS}[1]{\let\temp=\\#1\let\\=\temp}% +\newlength{\tmplength}% +\newenvironment{TabularC}[1]% +{% +\setlength{\tmplength}% + {\linewidth/(#1)-\tabcolsep*2-\arrayrulewidth*(#1+1)/(#1)}% + \par\begin{xtabular*}{\linewidth}% + {*{#1}{|>{\PBS\raggedright\hspace{0pt}}p{\the\tmplength}}|}% +}% +{\end{xtabular*}\par}% + +% Used for member group headers +\newenvironment{Indent}{% + \begin{list}{}{% + \setlength{\leftmargin}{0.5cm}% + }% + \item[]\ignorespaces% +}{% + \unskip% + \end{list}% +} + +% Used when hyperlinks are turned off +\newcommand{\doxyref}[3]{% + \textbf{#1} (\textnormal{#2}\,\pageref{#3})% +} + +% Used for syntax highlighting +\definecolor{comment}{rgb}{0.5,0.0,0.0} +\definecolor{keyword}{rgb}{0.0,0.5,0.0} +\definecolor{keywordtype}{rgb}{0.38,0.25,0.125} +\definecolor{keywordflow}{rgb}{0.88,0.5,0.0} +\definecolor{preprocessor}{rgb}{0.5,0.38,0.125} +\definecolor{stringliteral}{rgb}{0.0,0.125,0.25} +\definecolor{charliteral}{rgb}{0.0,0.5,0.5} +\definecolor{vhdldigit}{rgb}{1.0,0.0,1.0} +\definecolor{vhdlkeyword}{rgb}{0.43,0.0,0.43} +\definecolor{vhdllogic}{rgb}{1.0,0.0,0.0} +\definecolor{vhdlchar}{rgb}{0.0,0.0,0.0} diff --git a/doc/latex/refman.tex b/doc/latex/refman.tex new file mode 100644 index 000000000..2f243de93 --- /dev/null +++ b/doc/latex/refman.tex @@ -0,0 +1,141 @@ +\documentclass[twoside]{book} + +% Packages required by doxygen +\usepackage{calc} +\usepackage{doxygen} +\usepackage{graphicx} +\usepackage[utf8]{inputenc} +\usepackage{makeidx} +\usepackage{multicol} +\usepackage{multirow} +\usepackage{textcomp} +\usepackage[table]{xcolor} + +% Font selection +\usepackage[T1]{fontenc} +\usepackage{mathptmx} +\usepackage[scaled=.90]{helvet} +\usepackage{courier} +\usepackage{amssymb} +\usepackage{sectsty} +\renewcommand{\familydefault}{\sfdefault} +\allsectionsfont{% + \fontseries{bc}\selectfont% + \color{darkgray}% +} +\renewcommand{\DoxyLabelFont}{% + \fontseries{bc}\selectfont% + \color{darkgray}% +} + +% Page & text layout +\usepackage{geometry} +\geometry{% + a4paper,% + top=2.5cm,% + bottom=2.5cm,% + left=2.5cm,% + right=2.5cm% +} +\tolerance=750 +\hfuzz=15pt +\hbadness=750 +\setlength{\emergencystretch}{15pt} +\setlength{\parindent}{0cm} +\setlength{\parskip}{0.2cm} +\makeatletter +\renewcommand{\paragraph}{% + \@startsection{paragraph}{4}{0ex}{-1.0ex}{1.0ex}{% + \normalfont\normalsize\bfseries\SS@parafont% + }% +} +\renewcommand{\subparagraph}{% + \@startsection{subparagraph}{5}{0ex}{-1.0ex}{1.0ex}{% + \normalfont\normalsize\bfseries\SS@subparafont% + }% +} +\makeatother + +% Headers & footers +\usepackage{fancyhdr} +\pagestyle{fancyplain} +\fancyhead[LE]{\fancyplain{}{\bfseries\thepage}} +\fancyhead[CE]{\fancyplain{}{}} +\fancyhead[RE]{\fancyplain{}{\bfseries\leftmark}} +\fancyhead[LO]{\fancyplain{}{\bfseries\rightmark}} +\fancyhead[CO]{\fancyplain{}{}} +\fancyhead[RO]{\fancyplain{}{\bfseries\thepage}} +\fancyfoot[LE]{\fancyplain{}{}} +\fancyfoot[CE]{\fancyplain{}{}} +\fancyfoot[RE]{\fancyplain{}{\bfseries\scriptsize Generated on Mon Sep 21 2015 16\-:30\-:09 for My Project by Doxygen }} +\fancyfoot[LO]{\fancyplain{}{\bfseries\scriptsize Generated on Mon Sep 21 2015 16\-:30\-:09 for My Project by Doxygen }} +\fancyfoot[CO]{\fancyplain{}{}} +\fancyfoot[RO]{\fancyplain{}{}} +\renewcommand{\footrulewidth}{0.4pt} +\renewcommand{\chaptermark}[1]{% + \markboth{#1}{}% +} +\renewcommand{\sectionmark}[1]{% + \markright{\thesection\ #1}% +} + +% Indices & bibliography +\usepackage{natbib} +\usepackage[titles]{tocloft} +\setcounter{tocdepth}{3} +\setcounter{secnumdepth}{5} +\makeindex + +% Hyperlinks (required, but should be loaded last) +\usepackage{ifpdf} +\ifpdf + \usepackage[pdftex,pagebackref=true]{hyperref} +\else + \usepackage[ps2pdf,pagebackref=true]{hyperref} +\fi +\hypersetup{% + colorlinks=true,% + linkcolor=blue,% + citecolor=blue,% + unicode% +} + +% Custom commands +\newcommand{\clearemptydoublepage}{% + \newpage{\pagestyle{empty}\cleardoublepage}% +} + + +%===== C O N T E N T S ===== + +\begin{document} + +% Titlepage & ToC +\hypersetup{pageanchor=false} +\pagenumbering{roman} +\begin{titlepage} +\vspace*{7cm} +\begin{center}% +{\Large My Project }\\ +\vspace*{1cm} +{\large Generated by Doxygen 1.8.5}\\ +\vspace*{0.5cm} +{\small Mon Sep 21 2015 16:30:09}\\ +\end{center} +\end{titlepage} +\clearemptydoublepage +\tableofcontents +\clearemptydoublepage +\pagenumbering{arabic} +\hypersetup{pageanchor=true} + +%--- Begin generated contents --- +%--- End generated contents --- + +% Index +\newpage +\phantomsection +\addcontentsline{toc}{part}{Index} +\printindex + +\end{document} diff --git a/exec_tracker/Makefile.am b/exec_tracker/Makefile.am new file mode 100644 index 000000000..73ddb55b9 --- /dev/null +++ b/exec_tracker/Makefile.am @@ -0,0 +1,36 @@ +AM_CPPFLAGS= -fPIC -fpermissive -DTHERON_XS\ +-I${BOOST_HOME} \ +-I${BOOST_HOME}/boost/serialization \ +-I${HADOOP_HOME}/include\ +-I${JAVA_HOME}/include\ +-I${JAVA_HOME}/include/linux + +AM_LDFLAGS=-lc -lm -lrt -lconfig++ -lpthread -lboost_serialization -lxs + +if OPT_TCMALLOC +AM_CPPFLAGS+=-fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-free +AM_LDFLAGS+=-ltcmalloc +endif + +LDADD = ../BlockStreamIterator/libblockstreamiterator.a \ + ../common/Block/libblock.a \ + ../common/libcommon.a \ + ../utility/libutility.a \ + ${CAF_HOME}/build/lib/libcaf_core.so \ + ${CAF_HOME}/build/lib/libcaf_io.so \ + ${BOOST_HOME}/stage/lib/libboost_serialization.a \ + ${BOOST_HOME}/stage/lib/libboost_serialization.so + +noinst_LIBRARIES=libexec_tracker.a + +libexec_tracker_a_SOURCES = \ + segment_exec_status.h segment_exec_status.cpp \ + stmt_exec_status.h stmt_exec_status.cpp \ + segment_exec_tracker.h segment_exec_tracker.cpp \ + stmt_exec_tracker.h stmt_exec_tracker.cpp + +libexec_tracker_a_LIBADD = \ + ${CAF_HOME}/build/lib/libcaf_core.so \ + ${CAF_HOME}/build/lib/libcaf_io.so \ + ${BOOST_HOME}/stage/lib/libboost_serialization.a \ + ${BOOST_HOME}/stage/lib/libboost_serialization.so diff --git a/exec_tracker/segment_exec_status.cpp b/exec_tracker/segment_exec_status.cpp new file mode 100644 index 000000000..2b2472c01 --- /dev/null +++ b/exec_tracker/segment_exec_status.cpp @@ -0,0 +1,155 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/exec_tracker/segment_exec_status.cpp + * + * Created on: Apr 3, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#include "./segment_exec_status.h" +#include +#include +#include +#include "../exec_tracker/stmt_exec_tracker.h" +#include "../Environment.h" +#include "caf/io/all.hpp" + +#include "caf/all.hpp" +using caf::io::remote_actor; +using std::string; +using std::endl; +using claims::common::rNetworkError; +using claims::common::rSendingTimeout; +namespace claims { + +SegmentExecStatus::SegmentExecStatus(NodeSegmentID node_segment_id, + unsigned int coor_node_id) + : node_segment_id_(node_segment_id), + coor_node_id_(coor_node_id), + exec_info_("ok"), + exec_status_(ExecStatus::kOk), + ret_code_(0), + logic_time_(0), + stop_report_(false), + ReportErrorTimes(0) { + // RegisterToTracker(); + coor_actor_ = + Environment::getInstance()->get_slave_node()->GetNodeActorFromId( + coor_node_id); +} +SegmentExecStatus::SegmentExecStatus(NodeSegmentID node_segment_id) + : node_segment_id_(node_segment_id), + exec_info_("ok"), + exec_status_(ExecStatus::kOk), + ret_code_(0), + stop_report_(false), + ReportErrorTimes(0), + logic_time_(Environment::getInstance() + ->get_stmt_exec_tracker() + ->get_logic_time()) {} +SegmentExecStatus::~SegmentExecStatus() { + // ostringstream exec_info; + // exec_info << "query (" << node_segment_id_.first << " , " + // << node_segment_id_.second / kMaxNodeNum << " ) at node " + // << node_segment_id_.second % kMaxNodeNum << " execution + // succeed"; + // UpdateStatus(SegmentExecStatus::ExecStatus::kDone, exec_info.str(), 0, + // true); + // ReportStatus(SegmentExecStatus::ExecStatus::kDone, exec_info.str()); + // UnRegisterFromTracker(); +} + +RetCode SegmentExecStatus::CancelSegExec() { + stop_report_ = true; + lock_.acquire(); + exec_status_ = kCancelled; + lock_.release(); + LOG(INFO) << node_segment_id_.first << " , " << node_segment_id_.second + << " has been cancelled!" << endl; + return rSuccess; +} + +bool SegmentExecStatus::UpdateStatus(ExecStatus exec_status, string exec_info, + u_int64_t logic_time, bool need_report) { + lock_.acquire(); + if (exec_status_ == ExecStatus::kCancelled) { + LOG(INFO) << node_segment_id_.first << " , " << node_segment_id_.second + << " update status failed!"; + lock_.release(); + return false; + } else if (ExecStatus::kOk == exec_status_) { + if (0 != logic_time) { + logic_time_ = logic_time; + } + exec_status_ = exec_status; + exec_info_ = exec_info; + lock_.release(); + LOG(INFO) << node_segment_id_.first << " , " << node_segment_id_.second + << " update logic_time= " << logic_time + << " exec_status_= " << exec_status + << " exec_info_= " << exec_info; + need_report = false; // for debug + if (need_report) { + ++logic_time_; + caf::scoped_actor self; + self->send(Environment::getInstance() + ->get_segment_exec_tracker() + ->segment_exec_tracker_actor_, + ReportSAtom::value, this); + } + } else { + lock_.release(); + LOG(WARNING) << "segment's status shouldn't be updated!!"; + } + return true; +} +RetCode SegmentExecStatus::RegisterToTracker() { + return Environment::getInstance()->get_segment_exec_tracker()->RegisterSegES( + node_segment_id_, this); +} + +RetCode SegmentExecStatus::UnRegisterFromTracker() { + while (true) { + if (exec_status_ == kOk) { + LOG(INFO) << node_segment_id_.first << " , " << node_segment_id_.second + << " " << exec_status_ << " should be 3"; + UpdateStatus(SegmentExecStatus::ExecStatus::kDone, "finished", 0, true); + } else { + break; + } + } + return Environment::getInstance() + ->get_segment_exec_tracker() + ->UnRegisterSegES(node_segment_id_); +} +bool SegmentExecStatus::HaveErrorCase(u_int64_t logic_time) { + LOG(INFO) << node_segment_id_.first << " , " << node_segment_id_.second << " " + << exec_status_ << " " << logic_time << " - " << logic_time_ + << " = " << logic_time - logic_time_; + // if the exection status is normal, but logic time falls behind + // TryReportTimes* kCheckIntervalTime + return (exec_status_ == kOk) && + (logic_time - logic_time_ > TryReportTimes + 1); +} + +} // namespace claims diff --git a/exec_tracker/segment_exec_status.h b/exec_tracker/segment_exec_status.h new file mode 100644 index 000000000..a882ad213 --- /dev/null +++ b/exec_tracker/segment_exec_status.h @@ -0,0 +1,97 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/exec_tracker/segment_exec_status.h + * + * Created on: Apr 3, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#ifndef EXEC_TRACKER_SEGMENT_EXEC_STATUS_H_ +#define EXEC_TRACKER_SEGMENT_EXEC_STATUS_H_ +#include "../common/error_define.h" +#include "../exec_tracker/segment_exec_tracker.h" +#include "../node_manager/base_node.h" +#include + +#include "../utility/lock.h" +#include "caf/all.hpp" +#include +using std::string; +namespace claims { +const int TryReportTimes = 20; +// due to the conflict between deleting SegmentExecStatus and reporting the +// last message (deleting is faster than reporting, so the last message doesn't +// been sent successfully), so all instance of SegmentExecStatus should be +// governed by SegmentExecTracker, new it and register it, then after the last +// message (e.t kDone of kCancelled), unregister it and delete it(may controlled +// by object poor). +class SegmentExecStatus { + public: + enum ExecStatus { kError, kOk, kCancelled, kDone }; + SegmentExecStatus(NodeSegmentID node_segment_id, unsigned int coor_node_id); + SegmentExecStatus(NodeSegmentID node_segment_id); + virtual ~SegmentExecStatus(); + // first cancel data source, e.t. exchange merger + RetCode CancelSegExec(); + + bool UpdateStatus(ExecStatus exec_status, string exec_info, + u_int64_t logic_time = 0, bool need_report = false); + RetCode RegisterToTracker(); + RetCode UnRegisterFromTracker(); + NodeSegmentID get_node_segment_id() { return node_segment_id_; } + bool HaveErrorCase(u_int64_t logic_time); + ExecStatus get_exec_status() { return exec_status_; } + void set_exec_status(ExecStatus exec_status) { exec_status_ = exec_status; } + string get_exec_info() { return exec_info_; } + void set_exec_info(string exec_info) { exec_info_ = exec_info; } + bool is_cancelled() { return kCancelled == exec_status_; } + + actor coor_actor_; + Lock lock_; + std::atomic_bool stop_report_; + + std::atomic_int ReportErrorTimes; + NodeSegmentID node_segment_id_; + unsigned int coor_node_id_; + u_int64_t logic_time_; + + private: + ExecStatus exec_status_; + RetCode ret_code_; + string exec_info_; +}; +#define UNLIKELY(expr) __builtin_expect(!!(expr), 0) + +#define RETURN_IF_CANCELLED(exec_status) \ + do { \ + if (UNLIKELY((exec_status)->is_cancelled())) { \ + LOG(WARNING) << exec_status->get_node_segment_id().first << " , " \ + << exec_status->get_node_segment_id().second \ + << " is cancelled and exited execution!"; \ + return false; \ + } \ + } while (false) + +} // namespace claims + +#endif // EXEC_TRACKER_SEGMENT_EXEC_STATUS_H_ diff --git a/exec_tracker/segment_exec_tracker.cpp b/exec_tracker/segment_exec_tracker.cpp new file mode 100644 index 000000000..ce81ddebb --- /dev/null +++ b/exec_tracker/segment_exec_tracker.cpp @@ -0,0 +1,230 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/Executor/segment_exec_tracker.cpp + * + * Created on: Mar 24, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#include "./segment_exec_tracker.h" +#include +#include +#include "caf/all.hpp" +#include "../Environment.h" +#include "caf/io/all.hpp" + +#include "./stmt_exec_status.h" +#include "../exec_tracker/segment_exec_status.h" +using caf::actor_pool; +using caf::event_based_actor; +using caf::io::remote_actor; +using caf::time_unit; +using std::string; +namespace claims { + +SegmentExecTracker::SegmentExecTracker() { + segment_exec_tracker_actor_ = caf::spawn(ReportAllSegStatus, this); +} + +SegmentExecTracker::~SegmentExecTracker() { + caf::scoped_actor self; + assert(node_segment_id_to_status_.size() == 0); + self->send(segment_exec_tracker_actor_, ExitAtom::value); +} + +RetCode SegmentExecTracker::CancelSegExec(NodeSegmentID node_segment_id) {} + +RetCode SegmentExecTracker::RegisterSegES(NodeSegmentID node_segment_id, + SegmentExecStatus* seg_exec_status) { + map_lock_.acquire(); + if (node_segment_id_to_status_.find(node_segment_id) == + node_segment_id_to_status_.end()) { + node_segment_id_to_status_.insert( + make_pair(node_segment_id, seg_exec_status)); + LOG(INFO) << node_segment_id.first << " , " << node_segment_id.second + << " register to segment tracker successfully!"; + map_lock_.release(); + } else { + LOG(ERROR) << "node_segment_id < " << node_segment_id.first << " , " + << node_segment_id.second << " >already in segment tracker"; + map_lock_.release(); + assert(false); + } + return rSuccess; +} + +RetCode SegmentExecTracker::UnRegisterSegES(NodeSegmentID node_segment_id) { + map_lock_.acquire(); + auto it = node_segment_id_to_status_.find(node_segment_id); + if (it != node_segment_id_to_status_.end()) { + node_segment_id_to_status_.erase(it); + LOG(INFO) << node_segment_id.first << " , " << node_segment_id.second + << " has been erased from segment tracker! then left segment= " + << node_segment_id_to_status_.size(); + map_lock_.release(); + } else { + LOG(ERROR) << node_segment_id.first << " , " << node_segment_id.second + << " couldn't be found when unregister segment status"; + map_lock_.release(); + assert(false); + } + return rSuccess; +} +// report all status of all segment that locate at this node, but if just one +// thread occur error, how to catch it and report it? +void SegmentExecTracker::ReportAllSegStatus( + caf::event_based_actor* self, SegmentExecTracker* seg_exec_tracker) { + self->become( + + [=](ReportSegESAtom) { + seg_exec_tracker->map_lock_.acquire(); + if (seg_exec_tracker->node_segment_id_to_status_.size() > 0) { + auto it = seg_exec_tracker->node_segment_id_to_status_.begin(); + for (; it != seg_exec_tracker->node_segment_id_to_status_.end();) { + assert(it->second != NULL); + if (it->second->stop_report_) { + // every sending message sent before has been received, so you can + // delete it now + if (it->second->logic_time_ == 0) { + LOG(INFO) << it->second->node_segment_id_.first << " , " + << it->second->node_segment_id_.second + << " has been deleted from tracker"; + DELETE_PTR(it->second); + it = seg_exec_tracker->node_segment_id_to_status_.erase(it); + } else { + LOG(WARNING) << it->second->node_segment_id_.first << " , " + << it->second->node_segment_id_.second + << "segment report status out of order0!"; + ++it; + } + } else { + ++it->second->logic_time_; + self->send(self, ReportSAtom::value, it->second); + ++it; + } + } + } + seg_exec_tracker->map_lock_.release(); + self->delayed_send(self, std::chrono::milliseconds(kReportIntervalTime), + ReportSegESAtom::value); + }, + [=](ReportSAtom, SegmentExecStatus* seg_exec_status) { + // get the status of the corresponding segment + seg_exec_status->lock_.acquire(); + int exec_status = seg_exec_status->get_exec_status(); + string exec_info = seg_exec_status->get_exec_info(); + seg_exec_status->lock_.release(); + if (seg_exec_status->stop_report_ == true) { + // shouldn't report + + LOG(WARNING) << seg_exec_status->node_segment_id_.first << " , " + << seg_exec_status->node_segment_id_.second + << "segment report status out of order!"; + + } else { + try { + LOG(INFO) << seg_exec_status->node_segment_id_.first << " , " + << seg_exec_status->node_segment_id_.second + << " before send: " << exec_status << " , " << exec_info; + self->sync_send( + seg_exec_status->coor_actor_, ReportSegESAtom::value, + seg_exec_status->node_segment_id_, exec_status, exec_info) + .then( + + [=](OkAtom) { + seg_exec_status->ReportErrorTimes = 0; + if (SegmentExecStatus::kCancelled == exec_status || + SegmentExecStatus::kDone == exec_status) { + seg_exec_status->stop_report_ = true; + } + LOG(INFO) + << seg_exec_status->node_segment_id_.first << " , " + << seg_exec_status->node_segment_id_.second + << " report: " << exec_status << " , " << exec_info + << " successfully!"; + }, + [=](CancelPlanAtom) { + seg_exec_status->ReportErrorTimes = 0; + seg_exec_status->CancelSegExec(); + LOG(INFO) << seg_exec_status->node_segment_id_.first + << " , " + << seg_exec_status->node_segment_id_.second + << " receive cancel signal and cancel self"; + }, + caf::others >> + [=]() { + LOG(WARNING) + << "segment report receives unknown message" + << endl; + }, + // if timeout, then ReportErrorTimes+1,if ReportErrorTimes > + // TryReportTimes, then the network may be error, so cancel + // it + caf::after(std::chrono::seconds(kTimeout)) >> + [=]() { + + ++seg_exec_status->ReportErrorTimes; + LOG(WARNING) + << seg_exec_status->node_segment_id_.first + << " , " + << seg_exec_status->node_segment_id_.second + << " segment report status timeout! times= " + << seg_exec_status->ReportErrorTimes; + + if (seg_exec_status->ReportErrorTimes > + TryReportTimes) { + LOG(ERROR) + << seg_exec_status->node_segment_id_.first + << " , " + << seg_exec_status->node_segment_id_.second + << " report status error over 20 times, " + "pleas check the error " + "and this segment will be cancelled!"; + seg_exec_status->CancelSegExec(); + } + } + + ); + } catch (caf::network_error& e) { + LOG(ERROR) << seg_exec_status->node_segment_id_.first << " , " + << seg_exec_status->node_segment_id_.second + << " cann't connect to node ( " + << seg_exec_status->coor_node_id_ + << " ) when report status"; + } + } + // guarantee it's the last action!!! + --seg_exec_status->logic_time_; + }, + [=](ExitAtom) { self->quit(); }, + caf::others >> [=]() { + LOG(WARNING) + << "segment tracker receives unknown message" + << endl; + } + + ); + self->send(self, ReportSegESAtom::value); +} + +} // namespace claims diff --git a/exec_tracker/segment_exec_tracker.h b/exec_tracker/segment_exec_tracker.h new file mode 100644 index 000000000..515293f58 --- /dev/null +++ b/exec_tracker/segment_exec_tracker.h @@ -0,0 +1,70 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/Executor/segment_exec_tracker.h + * + * Created on: Mar 24, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#ifndef EXEC_TRACKER_SEGMENT_EXEC_TRACKER_H_ +#define EXEC_TRACKER_SEGMENT_EXEC_TRACKER_H_ +#include "../exec_tracker/segment_exec_tracker.h" + +#include +#include +#include +#include "../common/error_define.h" +#include "../utility/lock.h" +#include "caf/all.hpp" +using caf::actor; +using std::pair; +using std::string; + +namespace claims { +// first=query_id, second=segment_id*kMaxNodeNum + node_id +#define kReportIntervalTime 3000 +typedef std::pair NodeSegmentID; +class SegmentExecStatus; +class SegmentExecTracker { + public: + SegmentExecTracker(); + virtual ~SegmentExecTracker(); + RetCode CancelSegExec(NodeSegmentID node_segment_id); + RetCode RegisterSegES(NodeSegmentID node_segment_id, + SegmentExecStatus* seg_exec_status); + RetCode UnRegisterSegES(NodeSegmentID node_segment_id); + // report all remote_segment_status located at slave node + static void ReportAllSegStatus(caf::event_based_actor* self, + SegmentExecTracker* seg_exec_tracker); + + actor segment_exec_tracker_actor_; + + private: + boost::unordered_map + node_segment_id_to_status_; + Lock map_lock_; +}; + +} // namespace claims + +#endif // EXEC_TRACKER_SEGMENT_EXEC_TRACKER_H_ diff --git a/exec_tracker/stmt_exec_status.cpp b/exec_tracker/stmt_exec_status.cpp new file mode 100644 index 000000000..713bc28e7 --- /dev/null +++ b/exec_tracker/stmt_exec_status.cpp @@ -0,0 +1,196 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/exec_tracker/stmt_exec_status.cpp + * + * Created on: Apr 3, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#include "./stmt_exec_status.h" + +#include +#include +#include +#include + +#include "../Environment.h" +#include "../node_manager/base_node.h" +#include "caf/all.hpp" + +#include "caf/io/all.hpp" +using boost::chrono::seconds; +using caf::io::remote_actor; +using std::make_pair; +using std::string; +namespace claims { + +StmtExecStatus::StmtExecStatus(string sql_stmt) + : sql_stmt_(sql_stmt), + query_result_(NULL), + exec_info_("OK"), + exec_status_(ExecStatus::kOk), + segment_id_gen_(1) {} + +StmtExecStatus::~StmtExecStatus() { + // due to the query result should return, so shouldn't be deleted here + // if (NULL != query_result_) delete query_result_; + lock_.acquire(); + for (auto it = node_seg_id_to_seges_.begin(); + it != node_seg_id_to_seges_.end(); ++it) { + delete it->second; + it->second = NULL; + } + node_seg_id_to_seges_.clear(); + lock_.release(); +} +/// just mark exec_status be kCancelled +RetCode StmtExecStatus::CancelStmtExec(bool locked) { + if (!locked) { + lock_.acquire(); + } + if (ExecStatus::kCancelled == exec_status_) { + if (!locked) { + lock_.release(); + } + return rSuccess; + } + exec_status_ = kCancelled; + LOG(INFO) << query_id_ << " query should be cancelled!"; + if (!locked) { + lock_.release(); + } + return rSuccess; +} +// check every segment status +bool StmtExecStatus::CouldBeDeleted(u_int64_t logic_time) { + // exec_status_ is set kDone or kError when the stmt is over in + // select_exec.cpp, and then it could be deleted + lock_.acquire(); + if (!(exec_status_ == kDone || exec_status_ == kError)) { + lock_.release(); + return false; + } + // then check every segment of this stmt, if one is kOk (e.t. not kCancel and + // kDone), then shouldn't delete this stmt + for (auto it = node_seg_id_to_seges_.begin(); + it != node_seg_id_to_seges_.end(); ++it) { + if (it->second->get_exec_status() == SegmentExecStatus::ExecStatus::kOk + && !(it->second->HaveErrorCase(logic_time))) { + lock_.release(); + return false; + } + } + LOG(INFO) << query_id_ + << " query can be deleted and status= " << exec_status_; + lock_.release(); + return true; +} +bool StmtExecStatus::HaveErrorCase(u_int64_t logic_time) { + lock_.acquire(); + int error_count = 0; + int count = 0; + int max_lost = 0; + for (auto it = node_seg_id_to_seges_.begin(); + it != node_seg_id_to_seges_.end(); ++it) { + if (it->second->HaveErrorCase(logic_time)) { + LOG(INFO) << "change status to kCancelled" << endl; + it->second->set_exec_status(SegmentExecStatus::ExecStatus::kCancelled); + error_count++; + } + // if segment has done, judging the Max time that is the difference between + // segment finish time and current time is over 1000. Besides, if the ratio + // of finished segments >= 50%, so it has to be deleted. + if (it->second->get_exec_status() == kDone) { + count++; + if (max_lost < logic_time - it->second->logic_time_) { + max_lost = logic_time - it->second->logic_time_; + } + } + if (it->second->get_exec_status() == kOk) { + if (count * 100 / node_seg_id_to_seges_.size() > 50) { + if (max_lost > 1000) { + LOG(ERROR) + << "This segment in loop, Error.Need to send the sql again." + << endl; + lock_.release(); + return true; + } + } + } + } + lock_.release(); + if (error_count > 0) { + return true; + } else { + return false; + } +} +RetCode StmtExecStatus::RegisterToTracker() { + return Environment::getInstance()->get_stmt_exec_tracker()->RegisterStmtES( + this); +} + +RetCode StmtExecStatus::UnRegisterFromTracker() { + return Environment::getInstance()->get_stmt_exec_tracker()->UnRegisterStmtES( + query_id_); +} +void StmtExecStatus::AddSegExecStatus(SegmentExecStatus* seg_exec_status) { + lock_.acquire(); + node_seg_id_to_seges_.insert( + make_pair(seg_exec_status->get_node_segment_id(), seg_exec_status)); + lock_.release(); +} +// if remote segment status is error, then cancel corresponding statement +// once the statement is cancelled, update the segment's status to be cancelled, +// and returning false for canceling remote segment +bool StmtExecStatus::UpdateSegExecStatus( + NodeSegmentID node_segment_id, SegmentExecStatus::ExecStatus exec_status, + string exec_info, u_int64_t logic_time) { + lock_.acquire(); + if (SegmentExecStatus::ExecStatus::kError == exec_status) { + CancelStmtExec(true); + } + auto it = node_seg_id_to_seges_.find(node_segment_id); + if (it != node_seg_id_to_seges_.end()) { + if (ExecStatus::kCancelled == exec_status_ || + ExecStatus::kError == exec_status_) { + it->second->UpdateStatus(SegmentExecStatus::ExecStatus::kCancelled, + exec_info, logic_time); + lock_.release(); + return false; + } else { + it->second->UpdateStatus(exec_status, exec_info, logic_time); + lock_.release(); + return true; + } + } else { + LOG(ERROR) << "query id = < " << it->first.first << " , " + << it->first.second + << " >couldn't be found when updating segment execution status!"; + lock_.release(); + assert(false); + return false; + } +} + +} // namespace claims diff --git a/exec_tracker/stmt_exec_status.h b/exec_tracker/stmt_exec_status.h new file mode 100644 index 000000000..5dcb6b1bc --- /dev/null +++ b/exec_tracker/stmt_exec_status.h @@ -0,0 +1,89 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/exec_tracker/stmt_exec_status.h + * + * Created on: Apr 3, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#ifndef EXEC_TRACKER_STMT_EXEC_STATUS_H_ +#define EXEC_TRACKER_STMT_EXEC_STATUS_H_ +#include +#include +#include +#include +#include "./segment_exec_status.h" +#include "../common/Block/ResultSet.h" +#include "../exec_tracker/segment_exec_tracker.h" +#include "../utility/lock.h" +using std::string; +namespace claims { +/// for monitoring the execution status of every segment, slave node has +/// remote_segment_status, coordinator has local_segment_status, and synchronize +/// the underlying 2 status at every reporting time +class StmtExecStatus { + public: + enum ExecStatus { kError, kOk, kCancelled, kDone }; + StmtExecStatus(string sql_stmt); + virtual ~StmtExecStatus(); + u_int64_t get_query_id() { return query_id_; } + void set_query_id(u_int64_t query_id) { query_id_ = query_id; } + RetCode CancelStmtExec(bool locked = false); + RetCode RegisterToTracker(); + RetCode UnRegisterFromTracker(); + short GenSegmentId() { return segment_id_gen_++; } + void AddSegExecStatus(SegmentExecStatus* seg_exec_status); + + // update remote_segment_status and local_segment_status + bool UpdateSegExecStatus(NodeSegmentID node_segment_id, + SegmentExecStatus::ExecStatus exec_status, + string exec_info, u_int64_t logic_time); + bool CouldBeDeleted(u_int64_t logic_time); + bool HaveErrorCase(u_int64_t logic_time); + void set_exec_status(ExecStatus exec_status) { exec_status_ = exec_status; } + ExecStatus get_exec_status() { return exec_status_; } + string get_exec_info() { return exec_info_; } + void set_exec_info(string exec_info) { + lock_.acquire(); + exec_info_ = exec_info; + lock_.release(); + } + ResultSet* get_query_result() { return query_result_; } + void set_query_result(ResultSet* query_result) { + query_result_ = query_result; + } + bool IsCancelled() { return exec_status_ == ExecStatus::kCancelled; } + + private: + string exec_info_; + ExecStatus exec_status_; + ResultSet* query_result_; + boost::unordered_map node_seg_id_to_seges_; + u_int64_t query_id_; + string sql_stmt_; + std::atomic_short segment_id_gen_; + Lock lock_; +}; +} // namespace claims + +#endif // EXEC_TRACKER_STMT_EXEC_STATUS_H_ diff --git a/exec_tracker/stmt_exec_tracker.cpp b/exec_tracker/stmt_exec_tracker.cpp new file mode 100644 index 000000000..4ab53ac74 --- /dev/null +++ b/exec_tracker/stmt_exec_tracker.cpp @@ -0,0 +1,154 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/Executor/query_exec_tracker.cpp + * + * Created on: Mar 24, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#include "../exec_tracker/stmt_exec_tracker.h" + +#include +#include +#include +#include "../common/error_define.h" +#include "../Environment.h" +#include "../node_manager/base_node.h" +#include "caf/all.hpp" +#include "caf/local_actor.hpp" +using std::make_pair; +using std::string; +using claims::common::rCouldnotFindCancelQueryId; +namespace claims { + +StmtExecTracker::StmtExecTracker() : query_id_gen_(0), logic_time_(0) { + stmt_exec_tracker_actor_ = caf::spawn(CheckStmtExecStatus, this); +} + +StmtExecTracker::~StmtExecTracker() { + caf::scoped_actor self; + self->send(stmt_exec_tracker_actor_, ExitAtom::value); + assert(query_id_to_stmtes_.size() == 0); +} + +RetCode StmtExecTracker::RegisterStmtES(StmtExecStatus* stmtes) { + lock_.acquire(); + stmtes->set_query_id(GenQueryId()); + query_id_to_stmtes_.insert(make_pair(stmtes->get_query_id(), stmtes)); + lock_.release(); + return rSuccess; +} + +RetCode StmtExecTracker::UnRegisterStmtES(u_int64_t query_id) { + lock_.acquire(); + auto it = query_id_to_stmtes_.find(query_id); + if (it == query_id_to_stmtes_.end()) { + LOG(WARNING) << "invalide query id = " << it->first + << " at UnRegisterStmtES" << endl; + } + query_id_to_stmtes_.erase(it); + + LOG(INFO) << "query id= " << query_id + << " has erased from StmtEs! then left stmt = " + << query_id_to_stmtes_.size() << endl; + lock_.release(); + return rSuccess; +} +// for invoking from outside, so should add lock +RetCode StmtExecTracker::CancelStmtExec(u_int64_t query_id) { + lock_.acquire(); + auto it = query_id_to_stmtes_.find(query_id); + if (it == query_id_to_stmtes_.end()) { + LOG(WARNING) << "inval query id at cancel query of stmt exec tracker" + << endl; + lock_.release(); + assert(false); + return rCouldnotFindCancelQueryId; + } + it->second->CancelStmtExec(); + lock_.release(); + return rSuccess; +} + +void StmtExecTracker::CheckStmtExecStatus(caf::event_based_actor* self, + StmtExecTracker* stmtes) { + self->become( + + [=](CheckStmtESAtom) { + stmtes->lock_.acquire(); + for (auto it = stmtes->query_id_to_stmtes_.begin(); + it != stmtes->query_id_to_stmtes_.end();) { + if (it->second->CouldBeDeleted((u_int64_t)stmtes->logic_time_)) { + LOG(INFO) << "query id = " << it->first << " will be deleted!"; + delete it->second; + it->second = NULL; + // pay attention to erase() + it = stmtes->query_id_to_stmtes_.erase(it); + } else { + if (it->second->HaveErrorCase(stmtes->logic_time_)) { + LOG(ERROR) << "query id = " << it->first + << " occur error and will be cancelled!"; + // assert(false); + it->second->CancelStmtExec(); + } + ++it; + } + } + stmtes->lock_.release(); + stmtes->logic_time_++; + self->delayed_send(self, std::chrono::milliseconds(kCheckIntervalTime), + CheckStmtESAtom::value); + }, + [=](ExitAtom) { self->quit(); }, + caf::others >> [=]() { + LOG(WARNING) << "stmt checking receives unkown message" + << endl; + }); + self->send(self, CheckStmtESAtom::value); +} + +// first find stmt_exec_status, then update status +bool StmtExecTracker::UpdateSegExecStatus( + NodeSegmentID node_segment_id, SegmentExecStatus::ExecStatus exec_status, + string exec_info) { + lock_.acquire(); + auto it = query_id_to_stmtes_.find(node_segment_id.first); + if (it != query_id_to_stmtes_.end()) { + StmtExecStatus::ExecStatus stmt_exec_status = it->second->get_exec_status(); + bool ret = it->second->UpdateSegExecStatus(node_segment_id, exec_status, + exec_info, logic_time_); + LOG(INFO) << node_segment_id.first << " , " << node_segment_id.second + << " receive : " << exec_status << " , " << exec_info + << " stmt status before: " << stmt_exec_status + << " ,after: " << it->second->get_exec_status(); + lock_.release(); + return ret; + } else { // maybe receive delayed message + LOG(WARNING) << "query id = " << node_segment_id.first + << " couldn't be found in tracker!"; + lock_.release(); + } + return false; +} + +} // namespace claims diff --git a/exec_tracker/stmt_exec_tracker.h b/exec_tracker/stmt_exec_tracker.h new file mode 100644 index 000000000..539493afa --- /dev/null +++ b/exec_tracker/stmt_exec_tracker.h @@ -0,0 +1,77 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/Executor/query_exec_tracker.h + * + * Created on: Mar 24, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#ifndef EXEC_TRACKER_STMT_EXEC_TRACKER_H_ +#define EXEC_TRACKER_STMT_EXEC_TRACKER_H_ +#include "../exec_tracker/stmt_exec_tracker.h" +#include +#include +#include +#include "./segment_exec_status.h" +#include "./stmt_exec_status.h" +#include "../common/Block/ResultSet.h" +#include "../common/error_define.h" +#include "../exec_tracker/segment_exec_tracker.h" +#include "../utility/lock.h" +using std::string; +namespace claims { +#define kMaxNodeNum 10000 +#define kCheckIntervalTime 5000 +/// tracker the execution status of every statement, each StmtExecStatus should +/// register to it +class StmtExecTracker { + public: + StmtExecTracker(); + virtual ~StmtExecTracker(); + u_int64_t GenQueryId() { return query_id_gen_++; } + RetCode RegisterStmtES(StmtExecStatus* stmtes); + RetCode UnRegisterStmtES(u_int64_t query_id); + RetCode CancelStmtExec(u_int64_t query_id); + + // check every StmtExecStatus in caf's thread, should be static due to used + // for spawning caf thread + static void CheckStmtExecStatus(caf::event_based_actor* self, + StmtExecTracker* stmtes); + // according to StmtExecTracker to find corresponding stmt_status, then to + // update segment_status + bool UpdateSegExecStatus(NodeSegmentID node_segment_id, + SegmentExecStatus::ExecStatus exec_status, + string exec_info); + u_int64_t get_logic_time() { return logic_time_; } + + private: + std::atomic_ullong logic_time_; + actor stmt_exec_tracker_actor_; + std::atomic_ullong query_id_gen_; + std::unordered_map query_id_to_stmtes_; + Lock lock_; +}; + +} // namespace claims + +#endif // EXEC_TRACKER_STMT_EXEC_TRACKER_H_ diff --git a/loader/Makefile.am b/loader/Makefile.am index 0c85d2ddd..7a5f5a864 100644 --- a/loader/Makefile.am +++ b/loader/Makefile.am @@ -2,10 +2,9 @@ AM_CPPFLAGS= -fPIC -fpermissive \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ --I${JAVA_HOME}/include/linux \ --I${THERON_HOME}/Include +-I${JAVA_HOME}/include/linux AM_LDFLAGS=-lc -lm -lrt -lxs -lboost_serialization @@ -19,19 +18,20 @@ LDADD = ../catalog/libcatalog.a \ ../common/Block/libblock.a \ ../common/file_handle/libfilehandle.a \ ../common/Schema/libschema.a \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a\ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ - ${BOOST_HOME}/stage/lib/libboost_serialization.so \ - ${THERON_HOME}/Lib/libtherond.a + ${BOOST_HOME}/stage/lib/libboost_serialization.so noinst_LIBRARIES=libloader.a libloader_a_SOURCES = \ - data_injector.cpp \ - data_injector.h file_connector.h \ - single_file_connector.cpp single_file_connector.h \ + data_injector.cpp data_injector.h \ + single_file_connector.h single_file_connector.cpp \ + single_thread_single_file_connector.h single_thread_single_file_connector.cpp \ table_file_connector.cpp table_file_connector.h \ - validity.h validity.cpp + validity.h validity.cpp \ + hdfs_loader.cpp hdfs_loader.h + SUBDIRS = test DIST_SUBDIRS = test diff --git a/loader/data_injector.cpp b/loader/data_injector.cpp index 18e7183c8..7c522184a 100644 --- a/loader/data_injector.cpp +++ b/loader/data_injector.cpp @@ -22,6 +22,9 @@ * Author: yukai * Email: yukai2014@gmail.com * + * Add the load_from_hdfs by hurry.huang + * Email: hurry.huang@infosys.com + * * Description: * */ @@ -67,6 +70,9 @@ #include "../utility/thread_pool.h" #include "../utility/Timer.h" #include "./table_file_connector.h" +#include "hdfs_loader.h" +#include "../common/memory_handle.h" +#define hdfsreadlength 64*1024*1024 using claims::common::FileOpenFlag; using claims::common::FilePlatform; @@ -79,7 +85,7 @@ using claims::catalog::Partitioner; using claims::catalog::ProjectionDescriptor; using claims::catalog::Catalog; using boost::lexical_cast; -using namespace claims::common; +using namespace claims::common; // NOLINT /* #define DEFINE_DEBUG_LOG(FLAG, log) \ #ifdef CLAIMS_DEBUG_LOG \ @@ -147,12 +153,17 @@ uint64_t DataInjector::total_unread_sem_time_ = 0; uint64_t DataInjector::total_read_sem_fail_count_ = 0; uint64_t DataInjector::total_unread_sem_fail_count_ = 0; uint64_t DataInjector::total_append_warning_time_ = 0; +string hdfs_name = "HDFS:"; DataInjector::DataInjector(TableDescriptor* table, const string col_separator, const string row_separator) : table_(table), col_separator_(col_separator), - row_separator_(row_separator) { + row_separator_(row_separator), + row_id_in_table_(table_->row_number_), + connector_(table_->get_connector()), + + hdfsloader_ (new HdfsLoader()){ sub_tuple_generator_.clear(); table_schema_ = table_->getSchema(); for (int i = 0; i < table_->getNumberOfProjection(); i++) { @@ -192,16 +203,14 @@ DataInjector::DataInjector(TableDescriptor* table, const string col_separator, sblock_ = new Block(BLOCK_SIZE); -#ifdef DATA_DO_LOAD - connector_ = new TableFileConnector( - Config::local_disk_mode ? FilePlatform::kDisk : FilePlatform::kHdfs, - write_path_); -#endif + // #ifdef DATA_DO_LOAD + // connector_ = table_->get_connector(); + // #endif } DataInjector::~DataInjector() { DELETE_PTR(table_schema_); - DELETE_PTR(connector_); + // DELETE_PTR(connector_); DELETE_PTR(sblock_); for (auto it : pj_buffer_) { for (auto iter : it) { @@ -240,10 +249,8 @@ RetCode DataInjector::PrepareInitInfo(FileOpenFlag open_flag) { tmp_tuple_count.push_back(0); tmp_block_num.push_back(0); } - LOG(INFO) - << "init number of partitions " << i << "\t" << j << "\t:" - << table_->getProjectoin(i)->getPartitioner()->getPartitionBlocks(j) - << endl; + LOG(INFO) << "init number of partitions (" << i << "," << j + << "):" << tmp_block_num[j]; } pj_buffer_.push_back(temp_v); blocks_per_partition_.push_back(tmp_block_num); @@ -269,7 +276,7 @@ RetCode DataInjector::LoadFromFileSingleThread(vector input_file_names, cout << endl; EXEC_AND_RETURN_ERROR( - ret, PrepareEverythingForLoading(input_file_names, open_flag, result), + ret, PrepareEverythingForLoading(input_file_names, open_flag, result, hdfsloader_), "failed to prepare everything for loading"); GETCURRENTTIME(start_read_time); @@ -334,7 +341,7 @@ RetCode DataInjector::LoadFromFileSingleThread(vector input_file_names, << row_id_in_file << ". ret:" << ret); total_insert_time_ += GetElapsedTimeInUs(start_insert_time); - ++row_id_in_table_; + __sync_add_and_fetch(&row_id_in_table_, 1L); tuple_record.clear(); } @@ -394,23 +401,45 @@ RetCode DataInjector::SetTableState(FileOpenFlag open_flag, } LOG(INFO) << "\n--------------------Load Begin!------------------------\n"; } else { - row_id_in_table_ = table_->getRowNumber(); LOG(INFO) << "\n------------------Append Begin!-----------------------\n"; } return ret; } RetCode DataInjector::CheckFiles(vector input_file_names, - ExecutedResult* result) { + ExecutedResult* result, HdfsLoader * hdfsloader_) { int ret = rSuccess; - for (auto file_name : input_file_names) { - ifstream input_file(file_name.c_str()); - if (!input_file.good()) { - ret = rOpenDiskFileFail; - PLOG(ERROR) << "[ " << ret << ", " << CStrError(ret) << " ]" - << "File name:" << file_name << ". Reason"; - result->SetError("Can't access file :" + file_name); - return ret; + for (auto &file_name : input_file_names) { + //add the load from hdfs by hurry.huang 22/feb/2017 + if(file_name.find(hdfs_name) == 0){ + file_name = file_name.substr(hdfs_name.length()); + ret = hdfsloader_->CheckHdfsFile(file_name); + if(ret != rSuccess){ + ret = rOpenHdfsFileFail; + PLOG(ERROR) << "[ " << ret << ", " << CStrError(ret) << " ]" + << "File name:" << file_name << ". Reason"; + result->SetError("Can't access file :" + file_name); + return ret; + } + /* + hdfsFileInfo* hdfsfile = hdfsGetPathInfo(fs_,file_name.c_str()); + if(NULL == hdfsfile){ + PLOG(ERROR) << "failed to open file :" << file_name << " in mode" + << file_status_info[FileHandleImp::kInReading] << " ."; + return rOpenHdfsFileFail; + } +*/ + } + else{ + ifstream input_file(file_name.c_str()); + if (!input_file.good()) { + ret = rOpenDiskFileFail; + PLOG(ERROR) << "[ " << ret << ", " << CStrError(ret) << " ]" + << "File name:" << file_name << ". Reason"; + result->SetError("Can't access file :" + file_name); + return ret; + + } } } return ret; @@ -418,19 +447,23 @@ RetCode DataInjector::CheckFiles(vector input_file_names, RetCode DataInjector::PrepareEverythingForLoading( vector input_file_names, FileOpenFlag open_flag, - ExecutedResult* result) { + ExecutedResult* result, HdfsLoader * hdfsloader_) { int ret = rSuccess; GET_TIME_DI(prepare_start_time); EXEC_AND_RETURN_ERROR(ret, PrepareInitInfo(open_flag), "failed to prepare initialization info"); + EXEC_AND_RETURN_ERROR(ret, hdfsloader_->PrepareForLoadFromHdfs(), + "failed to prepare load_from_hdfs ."); PLOG_DI("prepare time: " << GetElapsedTimeInUs(prepare_start_time) / 1000000.0); // open files GET_TIME_DI(open_start_time); #ifdef DATA_DO_LOAD - EXEC_AND_RETURN_ERROR(ret, connector_->Open(open_flag), - " failed to open connector"); + if (kCreateFile == open_flag) + EXEC_AND_LOG(ret, connector_.DeleteAllTableFiles(), + "deleted all table files", "failed to delete all table files"); + EXEC_AND_RETURN_ERROR(ret, connector_.Open(), " failed to open connector"); #endif PLOG_DI("open connector time: " << GetElapsedTimeInUs(open_start_time) / 1000000.0); @@ -443,7 +476,7 @@ RetCode DataInjector::PrepareEverythingForLoading( // check files GET_TIME_DI(start_check_file_time); - EXEC_AND_RETURN_ERROR(ret, CheckFiles(input_file_names, result), + EXEC_AND_RETURN_ERROR(ret, CheckFiles(input_file_names, result, hdfsloader_), "some files are unaccessible"); PLOG_DI("used " << GetElapsedTimeInUs(start_check_file_time) / 1000000.0 << " time to check file "); @@ -454,7 +487,7 @@ RetCode DataInjector::FinishJobAfterLoading(FileOpenFlag open_flag) { int ret = rSuccess; #ifdef DATA_DO_LOAD - EXEC_AND_LOG(ret, connector_->Close(), "closed connector.", + EXEC_AND_LOG(ret, connector_.Close(), "closed connector.", "Failed to close connector. ret:" << ret); #endif @@ -475,6 +508,8 @@ RetCode DataInjector::LoadFromFileMultiThread(vector input_file_names, double sample_rate) { int ret = rSuccess; int file_count = 0; + //HdfsLoader* hdfsloader_ = new HdfsLoader(); + LOG(INFO)<<"enter the loadfromfilemultithread and create the hdfsloader_"< input_file_names, cout << endl; EXEC_AND_RETURN_ERROR( - ret, PrepareEverythingForLoading(input_file_names, open_flag, result), + ret, PrepareEverythingForLoading(input_file_names, open_flag, result, hdfsloader_), "failed to prepare everything for loading"); // create threads handling tuples @@ -503,43 +538,93 @@ RetCode DataInjector::LoadFromFileMultiThread(vector input_file_names, // start to read every raw data file GETCURRENTTIME(start_read_time); for (auto file_name : input_file_names) { - ifstream input_file(file_name.c_str()); - DLOG_DI("Now handle file :" << file_name); - // read every tuple - while (GetTupleTerminatedBy(input_file, tuple_record, row_separator_) || - tuple_record != "") { - if (tuple_record == "\r") - tuple_record = ""; // eliminate the effect of '\r' - DLOG_DI("---------------read tuple " - << tuple_record << "tuple size is " << tuple_record.length() - << ". input file's eof is " << input_file.eof()); + if(file_name.find(hdfs_name) == 0){ + file_name = file_name.substr(hdfs_name.length()); + //hdfsFileInfo* hdfsfile = hdfsGetPathInfo(fs_, file_name.c_str()) ; + DLOG_DI("Now handle hdfs_file :" << file_name); + void * buffer; + buffer = Malloc(hdfsreadlength + 1); + int pos = 0; int read_num = 0; + const int length = hdfsreadlength; +// hdfsloader_->file_ = +// hdfsOpenFile(hdfsloader_->fs_, file_name.c_str(),O_RDONLY,0,0,0); + ret = hdfsloader_->OpenHdfsFile(file_name); + if (ret != rSuccess) { + cout << "open file error" << endl; + } + while(GetTupleTerminatedByFromHdfs(buffer,hdfsloader_, file_name, tuple_record, + row_separator_, pos, read_num, length) || tuple_record != ""){ + if(tuple_record == "\r") + tuple_record = ""; // eliminate the effect of '\r' + //std::cout<<"get the tuple_record is"<= sample_rate) continue; // sample + + int list_index = row_id_in_file % thread_count; + { // push into one thread local tuple pool + GET_TIME_DI(start_tuple_buffer_lock_time); + LockGuard guard( + task_list_access_lock_[list_index]); /// lock/sem + ATOMIC_ADD(total_lock_tuple_buffer_time_, + GetElapsedTimeInUs(start_tuple_buffer_lock_time)); + task_lists_[list_index].push_back( + std::move(LoadTask(tuple_record, file_name, row_id_in_file))); + } + + tuple_count_sem_in_lists_[list_index].post(); + + } + free(buffer); + buffer = NULL; + hdfsloader_->CloseHdfsFile(); + } + //the function for load_from_hdfs by hurry. + else{ + ifstream input_file(file_name.c_str()); + DLOG_DI("Now handle file :" << file_name); + // read every tuple + while (GetTupleTerminatedBy(input_file, tuple_record, row_separator_) || + tuple_record != "") { + if (tuple_record == "\r") + tuple_record = ""; // eliminate the effect of '\r' + DLOG_DI("---------------read tuple " + << tuple_record << "tuple size is " << tuple_record.length() + << ". input file's eof is " << input_file.eof()); + + // just to tell everyone "i am alive!!!" + if (0 == row_id_in_file % 50000) AnnounceIAmLoading(); + ++row_id_in_file; + + if (GetRandomDecimal() >= sample_rate) continue; // sample + + int list_index = row_id_in_file % thread_count; + { // push into one thread local tuple pool + GET_TIME_DI(start_tuple_buffer_lock_time); + LockGuard guard( + task_list_access_lock_[list_index]); /// lock/sem + ATOMIC_ADD(total_lock_tuple_buffer_time_, + GetElapsedTimeInUs(start_tuple_buffer_lock_time)); + task_lists_[list_index].push_back( + std::move(LoadTask(tuple_record, file_name, row_id_in_file))); + } + + tuple_count_sem_in_lists_[list_index].post(); + } + + DLOG_DI("--------------- input file's eof is " << input_file.eof()); + LOG(INFO) << "insert all " << row_id_in_file + << " line into tuple pool from " << file_name << " into blocks" + << endl; + input_file.close(); + ++file_count; + } - // just to tell everyone "i am alive!!!" - if (0 == row_id_in_file % 50000) AnnounceIAmLoading(); - ++row_id_in_file; - - if (GetRandomDecimal() >= sample_rate) continue; // sample - - int list_index = row_id_in_file % thread_count; - { // push into one thread local tuple pool - GET_TIME_DI(start_tuple_buffer_lock_time); - LockGuard guard( - task_list_access_lock_[list_index]); /// lock/sem - ATOMIC_ADD(total_lock_tuple_buffer_time_, - GetElapsedTimeInUs(start_tuple_buffer_lock_time)); - task_lists_[list_index].push_back( - std::move(LoadTask(tuple_record, file_name, row_id_in_file))); - } - - tuple_count_sem_in_lists_[list_index].post(); - } - - DLOG_DI("--------------- input file's eof is " << input_file.eof()); - LOG(INFO) << "insert all " << row_id_in_file - << " line into tuple pool from " << file_name << " into blocks" - << endl; - input_file.close(); - ++file_count; } __sync_add_and_fetch(&all_tuple_read_, 1); LOG(INFO) << "used " @@ -587,6 +672,7 @@ RetCode DataInjector::LoadFromFileMultiThread(vector input_file_names, DELETE_ARRAY(task_list_access_lock_); DELETE_ARRAY(tuple_count_sem_in_lists_); + return ret; } @@ -836,11 +922,8 @@ RetCode DataInjector::InsertFromString(const string tuples, EXEC_AND_RETURN_ERROR(ret, PrepareInitInfo(kAppendFile), "failed to prepare initialization info"); #ifdef DATA_DO_LOAD - EXEC_AND_RETURN_ERROR(ret, connector_->Open(kAppendFile), - " failed to open connector"); + EXEC_AND_RETURN_ERROR(ret, connector_.Open(), " failed to open connector"); #endif - - row_id_in_table_ = table_->getRowNumber(); LOG(INFO) << "\n------------------Insert Begin!-----------------------\n"; string::size_type cur = 0; @@ -854,7 +937,6 @@ RetCode DataInjector::InsertFromString(const string tuples, EXEC_AND_ONLY_LOG_ERROR(ret, AddRowIdColumn(tuple_record), "failed to add row_id column for tuple."); - --row_id_in_table_; // it will be added in line 894 LOG(INFO) << "row " << line << ": " << tuple_record << endl; vector columns_validities; @@ -866,9 +948,6 @@ RetCode DataInjector::InsertFromString(const string tuples, (ret = CheckAndToValue(tuple_record, tuple_buffer, RawDataSource::kSQL, columns_validities))) { // contain data error, which is stored in the end of columns_validities - - // eliminate the side effect in row_id_in_table_ - row_id_in_table_ -= correct_tuple_buffer.size(); for (auto it : correct_tuple_buffer) DELETE_PTR(it); correct_tuple_buffer.clear(); @@ -891,7 +970,6 @@ RetCode DataInjector::InsertFromString(const string tuples, if (rSuccess != ret) return ret; correct_tuple_buffer.push_back(tuple_buffer); - ++row_id_in_table_; ++line; prev_cur = cur + 1; } @@ -908,7 +986,7 @@ RetCode DataInjector::InsertFromString(const string tuples, "flush all last block that are not full", "failed to flush all last block"); #ifdef DATA_DO_LOAD - EXEC_AND_LOG(ret, connector_->Close(), "closed connector.", + EXEC_AND_LOG(ret, connector_.Close(), "closed connector.", "Failed to close connector."); #endif EXEC_AND_ONLY_LOG_ERROR(ret, UpdateCatalog(kAppendFile), @@ -921,6 +999,7 @@ RetCode DataInjector::InsertFromString(const string tuples, // flush the last block which is not full of 64*1024Byte RetCode DataInjector::FlushNotFullBlock( Block* block_to_write, vector>& pj_buffer) { + TableDescriptor* table = table_; int ret = rSuccess; for (int i = 0; i < table_->getNumberOfProjection(); i++) { for ( @@ -931,8 +1010,8 @@ RetCode DataInjector::FlushNotFullBlock( pj_buffer[i][j]->serialize(*block_to_write); #ifdef DATA_DO_LOAD EXEC_AND_LOG(ret, - connector_->AtomicFlush(i, j, block_to_write->getBlock(), - block_to_write->getsize()), + connector_.AtomicFlush(i, j, block_to_write->getBlock(), + block_to_write->getsize()), "flushed the last block from buffer(" << i << "," << j << ") into file", "failed to flush the last block from buffer(" @@ -948,8 +1027,6 @@ RetCode DataInjector::FlushNotFullBlock( RetCode DataInjector::UpdateCatalog(FileOpenFlag open_flag) { int ret = rSuccess; - // register the number of rows in table to catalog - table_->setRowNumber(row_id_in_table_); // register the partition information to catalog for (int i = 0; i < table_->getNumberOfProjection(); i++) { for ( @@ -1018,14 +1095,16 @@ RetCode DataInjector::InsertTupleIntoProjection( << " tuples"); void* block_tuple_addr = local_pj_buffer[i][part]->allocateTuple(tuple_max_length); + TableDescriptor* table = table_; + if (NULL == block_tuple_addr) { // if buffer is full, write buffer(64K) to HDFS/disk local_pj_buffer[i][part]->serialize(*block_to_write); #ifdef DATA_DO_LOAD EXEC_AND_ONLY_LOG_ERROR( - ret, connector_->AtomicFlush(i, part, block_to_write->getBlock(), - block_to_write->getsize()), - "failed to write to data file. ret:" << ret); + ret, connector_.AtomicFlush(i, part, block_to_write->getBlock(), + block_to_write->getsize()), + /* "written to data file", */ "failed to write to data file. "); #endif __sync_add_and_fetch(&blocks_per_partition_[i][part], 1); local_pj_buffer[i][part]->setEmpty(); @@ -1095,6 +1174,72 @@ istream& DataInjector::GetTupleTerminatedBy(ifstream& ifs, string& res, return ifs; } +bool DataInjector::GetTupleTerminatedByFromHdfs(void*& buffer, HdfsLoader* hdfsloader_, string & file_name, + string & res, const string & terminator,int & pos, int & read_num, + const int & length){ + res.clear(); + int c = 0; + int total_read_num = 0; + //cout<<"terminator is "<GetCharFromBuffer(buffer, pos, read_num, length, total_read_num); + //std::cout<<"get the char c:"<GetCharFromBuffer(buffer, pos, read_num, length, hdfsloader_, file_name, total_read_num); + if(c == -1)break; + res += c; + if (terminator[coincide_length] == c) { + if (++coincide_length == terminator.length()) { + // don't read terminator into string, same as getline() + res = res.substr(0, res.length() - terminator.length()); + return true; + } + } else { + break; + } + }*/ + + + } + } + while (true){ + c = hdfsloader_->GetCharFromBuffer(buffer, pos, read_num, length, total_read_num); + std::cout<<"get the char c:"<GetCharFromBuffer(buffer, pos, read_num, length, total_read_num); + if(c == -1)break; + res += c; + if (terminator[coincide_length] == c) { + if (++coincide_length == terminator.length()) { + // don't read terminator into string, same as getline() + res = res.substr(0, res.length() - terminator.length()); + return true; + } + } else { + break; + } + } + } + } + return false; + + + + //return true; +} + void DataInjector::AnnounceIAmLoading() { static char* load_output_info[7] = { "Loading \r", "Loading.\r", "Loading..\r", "Loading...\r", @@ -1183,12 +1328,12 @@ string DataInjector::GenerateDataValidityInfo(const Validity& vali, "input columns\n"; break; } - case rInvalidInsertData: { - oss << "Data value is invalid for column '" - << table_->getAttribute(vali.column_index_).attrName - << "' at line: " << line; - if ("" != file) oss << " in file: " << file; - oss << "\n"; + case rInvalidInsertData: { + oss << "Data value is invalid for column '" + << table_->getAttribute(vali.column_index_).attrName + << "' at line: " << line; + if ("" != file) oss << " in file: " << file; + oss << "\n"; break; } default: diff --git a/loader/data_injector.h b/loader/data_injector.h index b6b6e2415..858b25150 100644 --- a/loader/data_injector.h +++ b/loader/data_injector.h @@ -33,10 +33,11 @@ #include #include "../common/error_define.h" -#include "../common/file_handle/file_handle_imp.h" #include "../common/hash.h" #include "../catalog/table.h" #include "./validity.h" +#include "../common/file_handle/file_handle_imp.h" +#include "hdfs_loader.h" using claims::common::FileOpenFlag; using claims::catalog::TableDescriptor; @@ -51,8 +52,7 @@ class ExecutedResult; namespace claims { namespace loader { - -class FileConnector; +class TableFileConnector; class DataInjector { public: struct LoadTask { @@ -165,10 +165,11 @@ class DataInjector { static void* HandleTuple(void* ptr); RetCode SetTableState(FileOpenFlag open_flag, ExecutedResult* result); - RetCode CheckFiles(vector input_file_names, ExecutedResult* result); + RetCode CheckFiles(vector input_file_names, ExecutedResult* result + , HdfsLoader * hdfsloader_); RetCode PrepareEverythingForLoading(vector input_file_names, FileOpenFlag open_flag, - ExecutedResult* result); + ExecutedResult* result,HdfsLoader * hdfsloader_); RetCode FinishJobAfterLoading(FileOpenFlag open_flag); RetCode PrepareLocalPJBuffer(vector>& pj_buffer); @@ -178,10 +179,12 @@ class DataInjector { public: static istream& GetTupleTerminatedBy(ifstream& ifs, string& res, const string& terminator); + static bool GetTupleTerminatedByFromHdfs(void*& buffer, HdfsLoader* hdfsloader_, string & file_name, string& res, + const string& terminator, int & pos, int & read_num, const int & length); private: TableDescriptor* table_; - FileConnector* connector_; + TableFileConnector& connector_; Schema* table_schema_; vector projections_schema_; @@ -199,7 +202,7 @@ class DataInjector { string col_separator_; string row_separator_; - uint64_t row_id_in_table_; + uint64_t& row_id_in_table_; // support multi-thread std::list* task_lists_ = NULL; @@ -215,6 +218,9 @@ class DataInjector { int all_tuple_read_ = 0; RetCode multi_thread_status_ = rSuccess; ExecutedResult* result_; + HdfsLoader* hdfsloader_; + + /******************debug********************/ public: static uint64_t total_get_substr_time_; diff --git a/loader/hdfs_loader.cpp b/loader/hdfs_loader.cpp new file mode 100644 index 000000000..dda78ab05 --- /dev/null +++ b/loader/hdfs_loader.cpp @@ -0,0 +1,111 @@ +/* + * Copyright [2012-2017] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/stmt_handler/load_exec.cpp + * + * Created on: Feb 22, 2017 + * Author: hurry.huang + * Email: hurry.huang@infosys.com + * + * Description: + * this file is the function body of class LoadExec. + */ + +//#include +#include "hdfs_loader.h" +#include "../common/error_define.h" +#include "../common/file_handle/hdfs_connector.h" +#include +using namespace std; +using namespace claims::common; + +namespace claims{ +namespace loader{ + + HdfsLoader::HdfsLoader(){ + + } + HdfsLoader::~HdfsLoader(){ + if(NULL == file_){ + LOG(INFO) <<"hdfs file for load from hdfs has been closed."<(buffer),length); + pos = 0; + if(read_num == 0){ + pos = 0; + + return -1; + + } + } + total_read_num++; + return *((char*)buffer + pos++); + //return ' '; + + } + + RetCode HdfsLoader::OpenHdfsFile(string & file_name){ + int ret = rSuccess; + file_ = hdfsOpenFile(fs_, file_name.c_str(),O_RDONLY,0,0,0); + if (!file_) { + ret = rFailure; + } + return ret; + + + + } + + + +} +} diff --git a/loader/hdfs_loader.h b/loader/hdfs_loader.h new file mode 100644 index 000000000..61696bb6a --- /dev/null +++ b/loader/hdfs_loader.h @@ -0,0 +1,69 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/stmt_handler/load_exec.h + * + * Created on: feb 22, 2017 + * Author: hurry_huang + * Email: hurry.huang@infosys.com + * + * Description: + * this file contains one class about load data from hdfs. + * + */ + +#ifndef STMT_HANDLER_HDFS_LOAD_EXEC_H +#define STMT_HANDLER_HDFS_LOAD_EXEC_H + +//#include "../stmt_handler/stmt_exec.h" +#include +#include +#include "../common/error_define.h" +using std::string; + +namespace claims{ +namespace loader{ +/** + * @brief + * @details + */ +class HdfsLoader{ +public: + HdfsLoader(); + virtual ~HdfsLoader(); + //RetCode Execute (ExecutedResult* exec_result); + RetCode CheckHdfsFile(string file_name); + RetCode PrepareForLoadFromHdfs(); + int GetCharFromBuffer(void*& buffer, int & pos, int & read_num, const int & length, + int & total_read_num); + RetCode GetFromHdfs(const string& file_name,int & length); + RetCode OpenHdfsFile(string & file_name); + RetCode CloseHdfsFile(); +private: + //AstLoadTable* load_ast_; + hdfsFS fs_; + hdfsFile file_; + //common::FileHandleImp* imp_; + +}; + + +} +} + +#endif diff --git a/loader/single_file_connector.cpp b/loader/single_file_connector.cpp index b265fb86d..cb3702769 100644 --- a/loader/single_file_connector.cpp +++ b/loader/single_file_connector.cpp @@ -26,8 +26,130 @@ * */ -#include "single_file_connector.h" +#include "./single_file_connector.h" +#include +#include "../utility/lock_guard.h" + +#define FILE_CONNECTOR_DEBUG + +#ifdef CLAIMS_DEBUG_LOG +#ifdef FILE_CONNECTOR_DEBUG +#define DLOG_FC(info) DLOG(INFO) << info << std::endl; +#else +#define DLOG_FC(info) +#endif +#ifdef FILE_CONNECTOR_DEBUG +#define PLOG_FC(info) DLOG(INFO) << info << endl; +#else +#define PLOG_FC(info) +#endif +#else +#define DLOG_FC(info) +#endif + +#ifdef DATA_INJECTOR_PREF +#define ATOMIC_ADD(var, value) __sync_add_and_fetch(&var, value); +#define GET_TIME_DI(var) GETCURRENTTIME(var); +#else +#define ATOMIC_ADD(var, value) +#define GET_TIME_DI(var) +#endif + +using claims::utility::LockGuard; namespace claims { -namespace loader {} /* namespace loader */ +namespace loader { + +SingleFileConnector::SingleFileConnector(FilePlatform platform, + string file_name, + FileOpenFlag open_flag) + : platform_(platform), + file_name_(file_name), + is_closed(true), + open_flag_(open_flag), + ref_(0) { + imp_ = common::FileHandleImpFactory::Instance().CreateFileHandleImp( + platform_, file_name_); + if (common::FileOpenFlag::kCreateFile == open_flag_) { + flush_function = std::bind(&FileHandleImp::OverWrite, imp_, + std::placeholders::_1, std::placeholders::_2); + } else if (common::FileOpenFlag::kAppendFile == open_flag_) { + flush_function = std::bind(&FileHandleImp::Append, imp_, + std::placeholders::_1, std::placeholders::_2); + } else { + flush_function = [](const void* source, const size_t length) -> RetCode { + assert(false && "Can't flush a file opened with read mode"); + return common::rFailure; + }; + } +} + +RetCode SingleFileConnector::Open() { + RetCode ret = rSuccess; + if (0 != ref_) { + ++ref_; + DLOG_FC("++ref_ " + << " and now ref_ is " << ref_); + } else { + LockGuard guard(open_close_lcok_); + DLOG_FC("get locked to open " + << " and now ref_ is " << ref_); + if (0 == ref_) { + DLOG_FC("get right to execute Open()"); + EXEC_AND_RETURN_ERROR( + ret, imp_->SwitchStatus( + static_cast(open_flag_)), + "failed to open file:" << file_name_); + ++ref_; + DLOG_FC("after Opening, ++ref_: " << ref_); + is_closed = false; + } else { + ++ref_; // also need add ref_ + DLOG_FC("after other Opening, ++ref_: " << ref_); + } + } + return ret; +} + +RetCode SingleFileConnector::Close() { + RetCode ret = rSuccess; + if (!is_closed && 0 == (--ref_)) { + DLOG_FC("after --ref_, ref_ which should be 0 now is" << ref_); + LockGuard guard(open_close_lcok_); + DLOG_FC("get lock to close, ref_ is" << ref_); + if (0 == ref_ && !is_closed) { + DLOG_FC("get right to close, ref_ is" << ref_); + EXEC_AND_RETURN_ERROR(ret, imp_->Close(), "file name: " << file_name_); + is_closed = true; + } + } + assert(ref_ >= 0); + DLOG_FC("after --ref_, ref_ is" << ref_); + return ret; +} + +RetCode SingleFileConnector::Delete() { + RetCode ret = rSuccess; + DLOG_FC("before deleting, ref_ is " << ref_); + if (0 != ref_) { + EXEC_AND_RETURN_ERROR(ret, common::rFileInUsing, + "file name: " << file_name_); + return common::rFileInUsing; + } + LockGuard guard(open_close_lcok_); + // must double-check in case of deleting a file in using + if (0 == ref_ && is_closed) { + DLOG_FC("get lock to delete , ref_ is" << ref_); + EXEC_AND_RETURN_ERROR(ret, imp_->DeleteFile(), "failed to delete file " + << file_name_); + DLOG_FC("file:" << file_name_ << "is closed successfully"); + is_closed = true; + } else { + EXEC_AND_RETURN_ERROR(ret, common::rFileInUsing, + "file name: " << file_name_); + } + return ret; +} + +} /* namespace loader */ } /* namespace claims */ diff --git a/loader/single_file_connector.h b/loader/single_file_connector.h index b7da06bea..81f0eef7e 100644 --- a/loader/single_file_connector.h +++ b/loader/single_file_connector.h @@ -28,51 +28,67 @@ #ifndef LOADER_SINGLE_FILE_CONNECTOR_H_ #define LOADER_SINGLE_FILE_CONNECTOR_H_ +#include +#include +#include #include #include "./file_connector.h" #include "../common/file_handle/file_handle_imp_factory.h" #include "../common/file_handle/file_handle_imp.h" #include "../common/memory_handle.h" +#include "../common/rename.h" +#include "../utility/lock.h" +#include "../utility/lock_guard.h" namespace claims { namespace loader { using std::string; +using claims::common::FileOpenFlag; using claims::common::FileHandleImp; using claims::common::FilePlatform; +using claims::common::rSuccess; +using claims::utility::LockGuard; +using std::atomic; + +class SingleFileConnector { + NO_COPY_AND_ASSIGN(SingleFileConnector); -class SingleFileConnector : public FileConnector { public: - SingleFileConnector(FilePlatform platform, string file_name) - : FileConnector(platform), file_name_(file_name) { - imp_ = - common::FileHandleImpFactory::Instance().CreateFileHandleImp(platform_); + SingleFileConnector(FilePlatform platform, string file_name, + FileOpenFlag open_flag); + ~SingleFileConnector() { + Close(); + DELETE_PTR(imp_); } - ~SingleFileConnector() { DELETE_PTR(imp_); } - virtual RetCode Open(common::FileOpenFlag oepn_flag) { - return imp_->Open(file_name_, oepn_flag); - } - virtual RetCode Close() { return imp_->Close(); } - virtual RetCode Flush(const void* source, unsigned length) { - return imp_->Write(source, length); - } - virtual RetCode AtomicFlush(const void* source, unsigned length) { - return imp_->AtomicWrite(source, length); - } - virtual RetCode Flush(unsigned projection_offset, unsigned partition_offset, - const void* source, unsigned length) { - assert(false && "not implemented"); - return common::rFailure; - } - virtual RetCode AtomicFlush(unsigned projection_offset, - unsigned partition_offset, const void* source, - unsigned length) { - assert(false && "not implemented"); - return common::rFailure; + RetCode Open(); + RetCode Close(); + + inline RetCode AtomicFlush(const void* source, const size_t length) { + LockGuard guard(write_lock_); + // if (common::FileOpenFlag::kCreateFile == open_flag_) { + // return imp_->OverWrite(source, length); + // } else if (common::FileOpenFlag::kAppendFile == open_flag_) { + // return imp_->Append(source, length); + // } else { + // assert(false && "Can't flush a file opened with read mode"); + // return common::rFailure; + // } + return flush_function(source, length); } + /*RetCode AtomicFlush(const void* source, unsigned length, + function lock_func, function unlock_func, + bool overwrite = false) { + if (overwrite) + return imp_->AtomicOverWrite(source, length, lock_func, unlock_func); + else + return imp_->AtomicAppend(source, length, lock_func, unlock_func); + }*/ + + bool CanAccess() { return imp_->CanAccess(file_name_); } - virtual bool CanAccess() { return imp_->CanAccess(file_name_); } + RetCode Delete(); /** * @brief Method description: load total file into memory * @param buffer: set buffer point to a new memory allocated by this method, @@ -80,7 +96,10 @@ class SingleFileConnector : public FileConnector { * @return rSuccess if succeed. * @details   (additional) this method will modify buffer, set to a new memory */ - virtual RetCode LoadTotalFile(void*& buffer, uint64_t* length) { + RetCode LoadTotalFile(void*& buffer, uint64_t* length) { // NOLINT + assert(common::FileOpenFlag::kReadFile == open_flag_ && + "open mode must be read "); + LockGuard guard(write_lock_); return imp_->ReadTotalFile(buffer, length); } @@ -93,17 +112,26 @@ class SingleFileConnector : public FileConnector { * @return rSuccess if OK * @details   (additional) */ - virtual RetCode LoadFile(void* buffer, int64_t start, uint64_t length) { - int ret = imp_->SetPosition(start); - if (ret != common::rSuccess) { - LOG(ERROR) << "failed to set postion at " << start << ". ret:" << ret; - return ret; - } - return imp_->Read(buffer, length); + RetCode LoadFile(void* buffer, int64_t start, uint64_t length) { + assert(common::FileOpenFlag::kReadFile == open_flag_ && + "open mode must be read "); + return imp_->PRead(buffer, length, start); } private: string file_name_; + common::FilePlatform platform_; + common::FileHandleImp* imp_; + common::FileOpenFlag open_flag_ = static_cast(-1); + + Lock write_lock_; // when open with read mode, the lock become read_lock + + atomic ref_; + bool is_closed = true; + SpineLock open_close_lcok_; + // RetCode (*flush_function)(const void* source, unsigned length); + std::function + flush_function; }; } /* namespace loader */ diff --git a/loader/single_thread_single_file_connector.cpp b/loader/single_thread_single_file_connector.cpp new file mode 100644 index 000000000..a5db3e011 --- /dev/null +++ b/loader/single_thread_single_file_connector.cpp @@ -0,0 +1,33 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/loader/single_thread_single_file_connector.cpp + * + * Created on: Mar 21, 2016 + * Author: yukai + * Email: yukai2014@gmail.com + * + * Description: + * + */ + +#include "./single_thread_single_file_connector.h" + +namespace claims { +namespace loader {} /* namespace loader */ +} /* namespace claims */ diff --git a/loader/single_thread_single_file_connector.h b/loader/single_thread_single_file_connector.h new file mode 100644 index 000000000..b5993df24 --- /dev/null +++ b/loader/single_thread_single_file_connector.h @@ -0,0 +1,106 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/loader/single_thread_single_file_connector.h + * + * Created on: Mar 21, 2016 + * Author: yukai + * Email: yukai2014@gmail.com + * + * Description: + * + */ + +#ifndef LOADER_SINGLE_THREAD_SINGLE_FILE_CONNECTOR_H_ +#define LOADER_SINGLE_THREAD_SINGLE_FILE_CONNECTOR_H_ +#include +#include "./file_connector.h" +#include "../common/file_handle/file_handle_imp_factory.h" +#include "../common/file_handle/file_handle_imp.h" +#include "../common/memory_handle.h" +#include "../common/rename.h" + +using claims::common::FilePlatform; +using claims::common::rSuccess; +using std::string; + +namespace claims { +namespace loader { + +class SingleThreadSingleFileConnector { + NO_COPY_AND_ASSIGN(SingleThreadSingleFileConnector); + + public: + SingleThreadSingleFileConnector(FilePlatform platform, string file_name) + : platform_(platform), file_name_(file_name) { + imp_ = common::FileHandleImpFactory::Instance().CreateFileHandleImp( + platform_, file_name_); + } + ~SingleThreadSingleFileConnector() { DELETE_PTR(imp_); } + + RetCode Close() { return imp_->Close(); } + + bool CanAccess() { return imp_->CanAccess(file_name_); } + + RetCode Delete() { + RetCode ret = rSuccess; + EXEC_AND_RETURN_ERROR(ret, imp_->DeleteFile(), "failed to delete file " + << file_name_); + return ret; + } + /** + * @brief Method description: load total file into memory + * @param buffer: set buffer point to a new memory allocated by this method, + * @param length: set to the length of file, also the length of new memory + * @return rSuccess if succeed. + * @details   (additional) this method will modify buffer, set to a new memory + */ + RetCode LoadTotalFile(void*& buffer, uint64_t* length) { + return imp_->ReadTotalFile(buffer, length); + } + + /** + * @brief Method description: load a part of file into memory pointed by + * buffer + * @param buffer: point to the memory where file is load + * @param start: the position from which to read + * @param length: the length of data need to read + * @return rSuccess if OK + * @details   (additional) + */ + RetCode LoadFile(void* buffer, int64_t start, uint64_t length) { + return imp_->PRead(buffer, length, start); + } + + RetCode Overwrite(const void* source, unsigned length) { + return imp_->OverWrite(source, length); + } + RetCode Append(const void* source, unsigned length) { + return imp_->Append(source, length); + } + + private: + string file_name_; + common::FilePlatform platform_; + common::FileHandleImp* imp_; +}; + +} /* namespace loader */ +} /* namespace claims */ + +#endif // LOADER_SINGLE_THREAD_SINGLE_FILE_CONNECTOR_H_ diff --git a/loader/table_file_connector.cpp b/loader/table_file_connector.cpp index 533683065..acf824176 100644 --- a/loader/table_file_connector.cpp +++ b/loader/table_file_connector.cpp @@ -27,20 +27,18 @@ */ #include "./table_file_connector.h" +#include #include #include -#include #include "./file_connector.h" +#include "../catalog/table.h" +#include "../common/error_define.h" #include "../common/file_handle/file_handle_imp.h" #include "../common/file_handle/file_handle_imp_factory.h" #include "../common/memory_handle.h" using claims::common::FilePlatform; - -namespace claims { -namespace loader { - using claims::common::FileHandleImpFactory; using claims::common::FileOpenFlag; using claims::common::rSuccess; @@ -49,12 +47,50 @@ using claims::common::FilePlatform; using std::vector; using std::string; +namespace claims { +namespace loader { + TableFileConnector::TableFileConnector(FilePlatform platform, - vector> writepath) - : FileConnector(platform), write_path_name_(writepath) { - imp_ = FileHandleImpFactory::Instance().CreateFileHandleImp(platform_); + TableDescriptor* table, + FileOpenFlag open_flag) + : platform_(platform), + table_(table), + write_path_name_(table->GetAllPartitionsPath()), + ref_(0), + open_flag_(open_flag) { + for (auto projection_iter : write_path_name_) { + vector projection_files; + projection_files.clear(); + vector projection_locks; + projection_locks.clear(); + for (auto partition_iter : projection_iter) { + FileHandleImp* file = + FileHandleImpFactory::Instance().CreateFileHandleImp(platform_, + partition_iter); + projection_files.push_back(file); + projection_locks.push_back(Lock()); + LOG(INFO) + << "push file handler which handles " << partition_iter + << " into projection_files. Now the size of projection_files is " + << projection_files.size() << std::endl; + } + file_handles_.push_back(projection_files); + write_locks_.push_back(projection_locks); + } + LOG(INFO) << "open all " << file_handles_.size() << " file successfully" + << std::endl; } +// TableFileConnector::TableFileConnector(FilePlatform platform, +// TableDescriptor* table) +// : TableFileConnector(platform, table->GetAllPartitionsPath()) {} + +// TableFileConnector::TableFileConnector(FilePlatform platform, +// vector> writepath) +// : FileConnector(platform), write_path_name_(writepath) { +// imp_ = FileHandleImpFactory::Instance().CreateFileHandleImp(platform_); +//} + TableFileConnector::~TableFileConnector() { Close(); for (auto proj_iter : file_handles_) { @@ -63,23 +99,46 @@ TableFileConnector::~TableFileConnector() { } } LOG(INFO) << "closed all hdfs file of table" << std::endl; - DELETE_PTR(imp_); + // DELETE_PTR(imp_); } -RetCode TableFileConnector::Open(FileOpenFlag open_flag_) { +RetCode TableFileConnector::Open() { + RetCode ret = rSuccess; + if (0 != ref_) { + ++ref_; + } else { + LockGuard guard(open_close_lock_); + if (0 == ref_) { // double-check + for (auto partitions_imp : file_handles_) { + for (auto imp : partitions_imp) { + RetCode subret = rSuccess; + EXEC_AND_ONLY_LOG_ERROR( + subret, imp->SwitchStatus( + static_cast(open_flag_)), + "failed to open file:" << imp->get_file_name()); + if (rSuccess != subret) ret = subret; // one failed, all failed + } + } + if (rSuccess == ret) { + table_->update_lock_.acquire(); // lock to avoid updating table + ++ref_; + is_closed = false; + } + } else { + ++ref_; + } + } + return ret; +} + +/*RetCode TableFileConnector::Open() { for (auto projection_iter : write_path_name_) { vector projection_files; projection_files.clear(); for (auto partition_iter : projection_iter) { - if (FileOpenFlag::kCreateFile == open_flag_ && - rSuccess != imp_->CanAccess(partition_iter)) { - LOG(WARNING) << "The file " << partition_iter - << " is already exits! It will be override!\n"; - } FileHandleImp* file = - FileHandleImpFactory::Instance().CreateFileHandleImp(platform_); - int ret = file->Open(partition_iter, open_flag_); - if (ret != rSuccess) return ret; + FileHandleImpFactory::Instance().CreateFileHandleImp(platform_, + partition_iter); projection_files.push_back(file); LOG(INFO) << "push file handler which handles " << partition_iter @@ -90,57 +149,158 @@ RetCode TableFileConnector::Open(FileOpenFlag open_flag_) { } LOG(INFO) << "open all file successfully" << std::endl; return rSuccess; -} +}*/ +/* -RetCode TableFileConnector::Close() { +RetCode TableFileConnector::Flush(unsigned projection_offset, + unsigned partition_offset, const void* source, + unsigned length, bool overwrite = false) { + assert(file_handles_.size() != 0 && "make sure file handles is not empty"); int ret = rSuccess; - for (int i = 0; i < file_handles_.size(); ++i) - for (int j = 0; j < file_handles_[i].size(); ++j) - EXEC_AND_ONLY_LOG_ERROR(ret, file_handles_[i][j]->Close(), - "failed to close " << write_path_name_[i][j] - << ". ret:" << ret); - if (rSuccess == ret) LOG(INFO) << "closed all file handles" << std::endl; + if (overwrite) + EXEC_AND_ONLY_LOG_ERROR( + ret, file_handles_[projection_offset][partition_offset]->OverWrite( + source, length), + "failed to overwrite file."); + else + EXEC_AND_ONLY_LOG_ERROR( + ret, file_handles_[projection_offset][partition_offset]->Append(source, + length), + "failed to append file."); return ret; } +*/ +/* -RetCode TableFileConnector::Flush(unsigned projection_offset, - unsigned partition_offset, const void* source, - unsigned length) { +RetCode TableFileConnector::AtomicFlush(unsigned projection_offset, + unsigned partition_offset, + const void* source, unsigned length, + function lock_func, + function unlock_func, + bool overwrite = false) { assert(file_handles_.size() != 0 && "make sure file handles is not empty"); int ret = rSuccess; - EXEC_AND_ONLY_LOG_ERROR( - ret, - file_handles_[projection_offset][partition_offset]->Write(source, length), - "failed to write file. ret:" << ret); + if (overwrite) + EXEC_AND_ONLY_LOG_ERROR( + ret, + file_handles_[projection_offset][partition_offset]->AtomicOverWrite( + source, length, lock_func, unlock_func), + "failed to overwrite file."); + else + EXEC_AND_ONLY_LOG_ERROR( + ret, file_handles_[projection_offset][partition_offset]->AtomicAppend( + source, length, lock_func, unlock_func), + "failed to append file."); return ret; } +*/ RetCode TableFileConnector::AtomicFlush(unsigned projection_offset, unsigned partition_offset, const void* source, unsigned length) { assert(file_handles_.size() != 0 && "make sure file handles is not empty"); int ret = rSuccess; - EXEC_AND_ONLY_LOG_ERROR( - ret, file_handles_[projection_offset][partition_offset]->AtomicWrite( - source, length), - "failed to write file. ret:" << ret); + if (FileOpenFlag::kCreateFile == open_flag_) { + LockGuard guard(write_locks_[projection_offset][partition_offset]); + EXEC_AND_ONLY_LOG_ERROR( + ret, file_handles_[projection_offset][partition_offset]->OverWrite( + source, length), + "failed to overwrite file."); + } else if (FileOpenFlag::kAppendFile == open_flag_) { + LockGuard guard(write_locks_[projection_offset][partition_offset]); + EXEC_AND_ONLY_LOG_ERROR( + ret, file_handles_[projection_offset][partition_offset]->Append(source, + length), + "failed to append file."); + } else { + assert(false && "Can't flush a file opened with read mode"); + return common::rFailure; + } return ret; } -RetCode TableFileConnector::DeleteFiles() { - vector>::iterator prj_writepath; - vector::iterator par_writepath; - - for (prj_writepath = write_path_name_.begin(); - prj_writepath != write_path_name_.end(); prj_writepath++) { - vector partitions_file_handles; - for (par_writepath = (*prj_writepath).begin(); - par_writepath != (*prj_writepath).end(); par_writepath++) { - imp_->Open((*par_writepath).c_str(), FileOpenFlag::kReadFile); - imp_->DeleteFile(); +RetCode TableFileConnector::Close() { + assert(file_handles_.size() != 0 && "make sure file handles is not empty"); + + int ret = rSuccess; + if (!is_closed && 0 == (--ref_)) { + LockGuard guard(open_close_lock_); + if (0 == ref_ && !is_closed) { + for (int i = 0; i < file_handles_.size(); ++i) { + for (int j = 0; j < file_handles_[i].size(); ++j) { + RetCode subret = rSuccess; + EXEC_AND_ONLY_LOG_ERROR(subret, file_handles_[i][j]->Close(), + "failed to close " << write_path_name_[i][j]); + if (rSuccess != subret) ret = subret; + } + } + if (rSuccess == ret) { + is_closed = true; + table_->update_lock_.release(); // now table can update its catalog. + LOG(INFO) << "closed all file handles" << std::endl; + } } } + assert(ref_ >= 0); + return ret; +} +RetCode TableFileConnector::DeleteAllTableFiles() { + /* + * this method may be called after creating table and before creating + * projection, so file_handles_ may be empty + */ + // assert(!(0 == table_->row_number_ && file_handles_.size() != 0) && + // "make sure file handles is not empty"); + + RetCode ret = rSuccess; + if (0 != ref_) { + EXEC_AND_RETURN_ERROR(ret, common::rFileInUsing, "now reference is " + << ref_); + } + LockGuard guard(open_close_lock_); + // must double-check in case of deleting a file in using + if (0 == ref_ && is_closed) { + for (auto prj_files : file_handles_) { + for (auto file : prj_files) { + RetCode subret = rSuccess; + EXEC_AND_ONLY_LOG_ERROR(subret, file->DeleteFile(), + "failed to delete file " + << file->get_file_name()); + if (rSuccess != subret) ret = subret; + } + } + if (rSuccess == ret) { + LOG(INFO) << "deleted all files" << std::endl; + } + } else { + ret = common::rFileInUsing; + EXEC_AND_RETURN_ERROR(ret, ret, "now reference is " << ref_); + } + return ret; +} + +RetCode TableFileConnector::UpdateWithNewProj() { + int proj_index = table_->projection_list_.size() - 1; + vector prj_write_path; + vector prj_locks; + vector prj_imps; + for (int j = 0; j < table_->projection_list_[proj_index] + ->getPartitioner() + ->getNumberOfPartitions(); + ++j) { + string path = + PartitionID(table_->getProjectoin(proj_index)->getProjectionID(), j) + .getPathAndName(); + + prj_write_path.push_back(path); + prj_locks.push_back(Lock()); + prj_imps.push_back( + FileHandleImpFactory::Instance().CreateFileHandleImp(platform_, path)); + } + write_path_name_.push_back(prj_write_path); + write_locks_.push_back(prj_locks); + file_handles_.push_back(prj_imps); return rSuccess; } diff --git a/loader/table_file_connector.h b/loader/table_file_connector.h index 0f8e5a99c..9a872f273 100644 --- a/loader/table_file_connector.h +++ b/loader/table_file_connector.h @@ -29,6 +29,7 @@ #ifndef LOADER_TABLE_FILE_CONNECTOR_H_ #define LOADER_TABLE_FILE_CONNECTOR_H_ +#include #include #include @@ -36,50 +37,65 @@ #include "../common/file_handle/file_handle_imp.h" #include "../common/file_handle/file_handle_imp_factory.h" #include "./file_connector.h" +#include "../common/rename.h" + +using std::vector; +using std::string; +using std::atomic; namespace claims { +namespace catalog { +class TableDescriptor; +} +using claims::catalog::TableDescriptor; + namespace loader { +class TableFileConnector { + NO_COPY_AND_ASSIGN(TableFileConnector); -class TableFileConnector : public FileConnector { public: - TableFileConnector(common::FilePlatform platform, - std::vector> writepath); - virtual ~TableFileConnector(); - virtual RetCode Open(common::FileOpenFlag open_flag_); - virtual RetCode Close(); + TableFileConnector(common::FilePlatform platform, TableDescriptor* table, + common::FileOpenFlag open_flag); + // TableFileConnector(common::FilePlatform platform, + // std::vector> writepath); + ~TableFileConnector(); + RetCode Open(); + RetCode Close(); /** * @brief Method description: flush length bytes data from source into file * whose projection id is projection_offset and partition id is * partition_offset */ - virtual RetCode Flush(unsigned projection_offset, unsigned partition_offset, - const void* source, unsigned length); - virtual RetCode Flush(const void* source, unsigned length) { - assert(false && "not implemented"); - return common::rFailure; - } - virtual RetCode AtomicFlush(unsigned projection_offset, - unsigned partition_offset, const void* source, - unsigned length); - virtual RetCode AtomicFlush(const void* source, unsigned length) { - assert(false && "not implemented"); - return common::rFailure; - } - virtual RetCode LoadTotalFile(void*& buffer, uint64_t* length) { - assert(false); - return common::rFailure; - } - virtual RetCode LoadFile(void* buffer, int64_t start, uint64_t length) { - assert(false); - return common::rFailure; - } - - RetCode DeleteFiles(); + /* + RetCode Flush(unsigned projection_offset, unsigned partition_offset, + const void* source, unsigned length, bool overwrite = false); + + RetCode AtomicFlush(unsigned projection_offset, unsigned partition_offset, + const void* source, unsigned length, + function lock_func, function + unlock_func, + bool overwrite = false); + */ + RetCode AtomicFlush(unsigned projection_offset, unsigned partition_offset, + const void* source, unsigned length); + RetCode DeleteAllTableFiles(); + + RetCode UpdateWithNewProj(); private: - std::vector> file_handles_; - std::vector> write_path_name_; + common::FilePlatform platform_; + vector> file_handles_; + vector> write_path_name_; + TableDescriptor* table_; + + common::FileOpenFlag open_flag_ = static_cast(-1); + + vector> write_locks_; + + atomic ref_; + bool is_closed = true; + Lock open_close_lock_; }; } /* namespace loader */ diff --git a/loader/test/Makefile.am b/loader/test/Makefile.am index 7ff9c21d9..1c211bfc5 100644 --- a/loader/test/Makefile.am +++ b/loader/test/Makefile.am @@ -2,7 +2,7 @@ AM_CPPFLAGS= -fPIC -fpermissive \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include \ -I${JAVA_HOME}/include/linux \ -I${GTEST_HOME}/include \ @@ -16,14 +16,14 @@ endif LDADD = ../../catalog/libcatalog.a \ ../libloader.a \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a\ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ - ${BOOST_HOME}/stage/lib/libboost_serialization.so \ - ${THERON_HOME}/Lib/libtherond.a + ${BOOST_HOME}/stage/lib/libboost_serialization.so noinst_LIBRARIES=libtest.a libtest_a_SOURCES = \ - single_file_connector_test.h single_file_connector_test.cpp\ - table_file_connector_test.h table_file_connector_test.cpp \ + single_file_connector_test.h single_file_connector_test.cpp \ + single_thread_single_file_connector_test.h\ + table_file_connector_test.h table_file_connector_test.cpp \ data_injector_test.cpp data_injector_test.h diff --git a/loader/test/data_injector_test.h b/loader/test/data_injector_test.h index 0b1e60dc6..7a5bdc9cd 100644 --- a/loader/test/data_injector_test.h +++ b/loader/test/data_injector_test.h @@ -34,6 +34,7 @@ #include #include "../../loader/data_injector.h" #include "../../catalog/catalog.h" +#include "../common/memory_handle.h" using std::ofstream; using claims::catalog::Catalog; @@ -116,6 +117,45 @@ TEST_F(DataInjectorTest, GetTuple4) { res.clear(); } } +TEST_F(DataInjectorTest, PrepareForLoadFromHdfs){ + +} +/* +TEST_F(DataInjectorTest, GetTupleFromHdfs){ + char* data[5] = { + "0|AFRICA|lar deposits. blithely final packages cajole. regular waters are final requests. regular accounts are according to |\n", + "1|AMERICA|hs use ironic, even requests. s|\n", + "2|ASIA|ges. thinly even pinto beans ca|\n", + "3|EUROPE|ly final courts cajole furiously final excuse|\n", + "4|MIDDLE EAST|uickly special accounts cajole carefully blithely close requests. carefully final asymptotes haggle furiousl|\n" + }; + HdfsLoader* hdfsloader_ = new HdfsLoader(); + hdfsloader_->PrepareForLoadFromHdfs(); + //printf("open the hdfsloader_"); + string file_name = "/test/claims/region.tbl"; + hdfsloader_->CheckHdfsFile(file_name); + hdfsloader_->OpenHdfsFile(file_name); + //printf("open the file_name"); + int pos = 0; + int read_num = 0; + string row_separator_ = " "; + int length = 64; + string tuple_record = ""; + void * buffer_ = claims::common::Malloc(length + 1); + int i = 0; + while(DataInjector::GetTupleTerminatedByFromHdfs(buffer_,hdfsloader_, file_name, tuple_record, + row_separator_, pos, read_num, length) ){ + EXPECT_STREQ(data[i], data[i++]); + //tuple_record.c_str()); + tuple_record.clear(); + } + +} + +*/ + + + } /* namespace loader */ } /* namespace claims */ diff --git a/loader/test/single_file_connector_test.cpp b/loader/test/single_file_connector_test.cpp index a6aafa737..914d9e504 100644 --- a/loader/test/single_file_connector_test.cpp +++ b/loader/test/single_file_connector_test.cpp @@ -40,35 +40,48 @@ #include "../single_file_connector.h" using claims::common::FilePlatform; -using namespace claims::common; +using namespace claims::common; // NOLINT namespace claims { namespace loader { -SingleFileConnectorTest::SingleFileConnectorTest() { - connector_ = NULL; - path_ = "SingleFileConnectorTest"; - data_ = "fafasfffffffffffffffdfsfsffsfsfsfs a."; - data_length_ = 38; - // snprintf(data_, 38, "fafasfffffffffffffffdfsfsffsfsfsfs a."); - LOG(INFO) << "data_: " << data_ << std::endl; -} - -void SingleFileConnectorTest::SetUpTestCase() { - std::cout << "=============" << std::endl; - LOG(INFO) << "=============" << std::endl; -} - -void SingleFileConnectorTest::TearDownTestCase() {} +// void SingleFileConnectorTest::WriteOrAppendFile(FilePlatform file_platform, +// FileOpenFlag open_flag, +// char* expect, +// int expect_length) { +// connector_ = new SingleFileConnector(file_platform, path_); +// if (open_flag == kCreateFile) { +// if (rSuccess != connector_->AtomicFlush(data_, data_length_)) { +// LOG(ERROR) << "failed to flush (" << path_ << ")" << std::endl; +// FAIL(); +// } +// } +// if (rSuccess != connector_->Close()) FAIL(); +// DELETE_PTR(connector_); +// +// void* read_buffer = NULL; +// uint64_t length = 0; +// SingleFileConnector* reader = NULL; +// reader = new SingleFileConnector(file_platform, path_); +// if (rSuccess != reader->LoadTotalFile(read_buffer, &length)) { +// FAIL(); +// } +// EXPECT_EQ(expect_length, length); +// EXPECT_STREQ(expect, static_cast(read_buffer)); +// reader->Close(); +// DELETE_PTR(reader); +// SUCCEED(); +//} +/* void SingleFileConnectorTest::WriteOrAppendFile(FilePlatform file_platform, FileOpenFlag open_flag, char* expect, int expect_length) { - connector_ = new SingleFileConnector(file_platform, path_); - if (rSuccess != connector_->Open(open_flag)) FAIL(); - if (rSuccess != connector_->Flush(data_, data_length_)) { + connector_ = new SingleFileConnector(file_platform, path_, open_flag); + connector_->Open(); + if (rSuccess != connector_->AtomicFlush(data_, data_length_)) { LOG(ERROR) << "failed to flush (" << path_ << ")" << std::endl; FAIL(); } @@ -77,9 +90,9 @@ void SingleFileConnectorTest::WriteOrAppendFile(FilePlatform file_platform, void* read_buffer = NULL; uint64_t length = 0; - FileConnector* reader = NULL; - reader = new SingleFileConnector(file_platform, path_); - reader->Open(kReadFile); + SingleFileConnector* reader = NULL; + reader = new SingleFileConnector(file_platform, path_, kReadFile); + reader->Open(); if (rSuccess != reader->LoadTotalFile(read_buffer, &length)) { FAIL(); } @@ -89,30 +102,86 @@ void SingleFileConnectorTest::WriteOrAppendFile(FilePlatform file_platform, DELETE_PTR(reader); SUCCEED(); } -TEST_F(SingleFileConnectorTest, DiskWrite) { - WriteOrAppendFile(kDisk, kCreateFile, data_, data_length_); -} -TEST_F(SingleFileConnectorTest, DiskAppend) { - char double_data[] = - "fafasfffffffffffffffdfsfsffsfsfsfs a" - ".fafasfffffffffffffffdfsfsffsfsfsfs a."; - WriteOrAppendFile(kDisk, kAppendFile, double_data, sizeof(double_data) - 1); -} -TEST_F(SingleFileConnectorTest, DiskOverWrite) { - WriteOrAppendFile(kDisk, kCreateFile, data_, data_length_); -} -TEST_F(SingleFileConnectorTest, HdfsWrite) { - WriteOrAppendFile(kHdfs, kCreateFile, data_, data_length_); -} -TEST_F(SingleFileConnectorTest, HdfsAppend) { - char double_data[] = - "fafasfffffffffffffffdfsfsffsfsfsfs a" - ".fafasfffffffffffffffdfsfsffsfsfsfs a."; - WriteOrAppendFile(kHdfs, kAppendFile, double_data, sizeof(double_data) - 1); + +void SingleFileConnectorTest::TryDeleteInUsingFile(FilePlatform file_platform, + FileOpenFlag open_flag, + char* expect, + int expect_length) { + SingleFileConnector* con = + new SingleFileConnector(file_platform, path_, open_flag); + EXPECT_EQ(rSuccess, con->Delete()); + std::thread t1(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, con, + 1); + std::thread t2(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, con, + 2); + std::thread t3(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, con, + 3); + std::thread t4(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, con, + 4); + std::thread t5([con] { assert(rFileInUsing == con->Delete()); }); + std::thread t6(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, con, + 6); + t1.join(); + t2.join(); + t3.join(); + t4.join(); + t5.join(); + t6.join(); + DELETE_PTR(con); + + SingleFileConnector* reader = + new SingleFileConnector(kDisk, path_, kReadFile); + reader->Open(); + void* buffer; + uint64_t length = 0; + EXPECT_EQ(rSuccess, reader->LoadTotalFile(buffer, &length)); + EXPECT_EQ(rFileInUsing, con->Delete()); // can't delete + EXPECT_EQ(rSuccess, con->Close()); + EXPECT_EQ(rSuccess, con->Delete()); + DELETE_PTR(reader); + + EXPECT_EQ(expect_length, length); + EXPECT_STREQ(expect, static_cast(buffer)); } -TEST_F(SingleFileConnectorTest, HdfsOverWrite) { - WriteOrAppendFile(kHdfs, kCreateFile, data_, data_length_); + +void SingleFileConnectorTest::MultiThreadWrite(FilePlatform file_platform, + FileOpenFlag open_flag, + char* expect, + int expect_length) { + SingleFileConnector* con = + new SingleFileConnector(file_platform, path_, open_flag); + EXPECT_EQ(rSuccess, con->Delete()); + std::thread t1(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, con, + 1); + std::thread t2(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, con, + 2); + std::thread t3(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, con, + 3); + std::thread t4(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, con, + 4); + std::thread t5(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, con, + 5); + t1.join(); + t2.join(); + t3.join(); + t4.join(); + t5.join(); + DELETE_PTR(con); + + SingleFileConnector* reader = + new SingleFileConnector(kDisk, path_, kReadFile); + reader->Open(); + void* buffer; + uint64_t length = 0; + EXPECT_EQ(rSuccess, reader->LoadTotalFile(buffer, &length)); + EXPECT_EQ(rSuccess, reader->Close()); + DELETE_PTR(reader); + + EXPECT_STREQ(expect, static_cast(buffer)); + EXPECT_EQ(expect_length, length); } +*/ + } // namespace loader } // namespace claims diff --git a/loader/test/single_file_connector_test.h b/loader/test/single_file_connector_test.h index 26acefae8..bd71c92f6 100644 --- a/loader/test/single_file_connector_test.h +++ b/loader/test/single_file_connector_test.h @@ -31,42 +31,298 @@ #include #include +#include #include #include #include #include - +#include //NOLINT #include "../../common/file_handle/file_handle_imp_factory.h" #include "../../common/memory_handle.h" +#include "../file_connector.h" +#include "../single_file_connector.h" namespace claims { namespace loader { +using std::thread; +using namespace claims::common; // NOLINT -using claims::common::FilePlatform; -using claims::common::FileOpenFlag; - -class FileConnector; +class SingleFileConnector; class SingleFileConnectorTest : public ::testing::Test { public: - SingleFileConnectorTest(); + SingleFileConnectorTest() + : connector_(NULL), path_("SingleFileConnectorTest") { + eightfold_data_ = + "fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs a."; + sevenfold_data_ = + "fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs a."; + double_data_ = + "fafasfffffffffffffffdfsfsffsfsfsfs a" + ".fafasfffffffffffffffdfsfsffsfsfsfs a."; + data_length_ = 38; + // snprintf(data_, 38, "fafasfffffffffffffffdfsfsffsfsfsfs a."); + LOG(INFO) << "data_: " << data_ << std::endl + << " double_data_: " << double_data_ << std::endl; + } + + void Delete(FilePlatform file_platform) { + SingleFileConnector* con = + new SingleFileConnector(file_platform, path_, kAppendFile); + con->Open(); + con->AtomicFlush(data_, data_length_); + con->Close(); + DELETE_PTR(con); - static void SetUpTestCase(); - static void TearDownTestCase(); + void* buffer; + uint64_t length = 0; + con = new SingleFileConnector(file_platform, path_, kReadFile); + con->Open(); + ASSERT_EQ(rSuccess, con->LoadTotalFile(buffer, &length)); + ASSERT_EQ(rFileInUsing, con->Delete()); // can't delete + ASSERT_EQ(rSuccess, con->Close()); + ASSERT_EQ(rSuccess, con->Delete()); + DELETE_PTR(con); + + con = new SingleFileConnector(file_platform, path_, kCreateFile); + con->Open(); + ASSERT_EQ(rSuccess, con->AtomicFlush(data_, data_length_)); + ASSERT_EQ(rFileInUsing, con->Delete()); // can't delete + ASSERT_EQ(rSuccess, con->Close()); + ASSERT_EQ(rSuccess, con->Delete()); + DELETE_PTR(con); + + con = new SingleFileConnector(file_platform, path_, kAppendFile); + con->Open(); + ASSERT_EQ(rSuccess, con->AtomicFlush(data_, data_length_)); + ASSERT_EQ(rFileInUsing, con->Delete()); // can't delete + ASSERT_EQ(rSuccess, con->Close()); + ASSERT_EQ(rSuccess, con->Delete()); + DELETE_PTR(con); + } void WriteOrAppendFile(FilePlatform file_platform, FileOpenFlag open_flag, - char* expect, int expect_length); + char* expect, int expect_length) { + connector_ = new SingleFileConnector(file_platform, path_, open_flag); + connector_->Open(); + if (rSuccess != connector_->AtomicFlush(data_, data_length_)) { + LOG(ERROR) << "failed to flush (" << path_ << ")" << std::endl; + FAIL(); + } + if (rSuccess != connector_->Close()) FAIL(); + DELETE_PTR(connector_); + + void* read_buffer = NULL; + uint64_t length = 0; + SingleFileConnector* reader = NULL; + reader = new SingleFileConnector(file_platform, path_, kReadFile); + reader->Open(); + if (rSuccess != reader->LoadTotalFile(read_buffer, &length)) { + FAIL(); + } + ASSERT_EQ(expect_length, length); + ASSERT_STREQ(expect, static_cast(read_buffer)); + reader->Close(); + DELETE_PTR(reader); + SUCCEED(); + } + + void MultiThreadWriteOrAppend(SingleFileConnector* connector, int length) { + usleep(length * 2); // stagger executing of all threads + ASSERT_EQ(rSuccess, connector->Open()); + ASSERT_EQ(rSuccess, connector->AtomicFlush( + (length % 2 ? data_ : double_data_), + (length % 2 ? data_length_ : data_length_ * 2))); + ASSERT_EQ(rSuccess, connector->Close()); + } + + void TryDeleteInUsingFile(FilePlatform file_platform, FileOpenFlag open_flag, + char* expect, int expect_length) { + SingleFileConnector* con = + new SingleFileConnector(file_platform, path_, open_flag); + ASSERT_EQ(rSuccess, con->Delete()); + std::thread t1(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, + con, 1); + std::thread t2(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, + con, 2); + std::thread t3(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, + con, 3); + std::thread t4(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, + con, 4); + std::thread t5([con] { ASSERT_EQ(rFileInUsing, con->Delete()); }); + std::thread t6(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, + con, 6); + DLOG(INFO) << "waiting for all thread finished"; + usleep(100); + t1.join(); + t2.join(); + t3.join(); + t4.join(); + t5.join(); + t6.join(); + DELETE_PTR(con); + + DLOG(INFO) << "start to read file "; + SingleFileConnector* reader = + new SingleFileConnector(file_platform, path_, kReadFile); + reader->Open(); + void* buffer; + uint64_t length = 0; + ASSERT_EQ(rSuccess, reader->LoadTotalFile(buffer, &length)); + ASSERT_EQ(rFileInUsing, reader->Delete()); // can't delete + ASSERT_EQ(rSuccess, reader->Close()); + ASSERT_EQ(rSuccess, reader->Delete()); + DELETE_PTR(reader); + + // ASSERT_EQ(expect_length, length); + // ASSERT_STREQ(expect, static_cast(buffer)); + if (kCreateFile == open_flag) { + EXPECT_TRUE((0 == strcmp(data_, static_cast(buffer)) && + data_length_ == length) || + (0 == strcmp(double_data_, static_cast(buffer)) && + data_length_ * 2 == length)); + } else if (kAppendFile == open_flag) { + EXPECT_EQ(expect_length * 8, length); + EXPECT_STREQ(eightfold_data_, static_cast(buffer)); + } + } + + void MultiThreadWrite(FilePlatform file_platform, FileOpenFlag open_flag, + char* expect, int expect_length) { + SingleFileConnector* con = + new SingleFileConnector(file_platform, path_, open_flag); + ASSERT_EQ(rSuccess, con->Delete()); + std::thread t1(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, + con, 1); + std::thread t2(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, + con, 2); + std::thread t3(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, + con, 3); + std::thread t4(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, + con, 4); + std::thread t5(&SingleFileConnectorTest::MultiThreadWriteOrAppend, this, + con, 5); + DLOG(INFO) << "waiting for all thread finished"; + usleep(100); + t1.join(); + t2.join(); + t3.join(); + t4.join(); + t5.join(); + DELETE_PTR(con); + + DLOG(INFO) << "start to read file "; + SingleFileConnector* reader = + new SingleFileConnector(file_platform, path_, kReadFile); + reader->Open(); + void* buffer; + uint64_t length = 0; + ASSERT_EQ(rSuccess, reader->LoadTotalFile(buffer, &length)); + ASSERT_EQ(rSuccess, reader->Close()); + DELETE_PTR(reader); + + std::cout << "data:" << data_ << std::endl << " length:" << length + << std::endl; + if (kCreateFile == open_flag) { + EXPECT_TRUE((0 == strcmp(data_, static_cast(buffer)) && + data_length_ == length) || + (0 == strcmp(double_data_, static_cast(buffer)) && + data_length_ * 2 == length)); + } else if (kAppendFile == open_flag) { + EXPECT_EQ(expect_length * 7, length); + EXPECT_STREQ(sevenfold_data_, static_cast(buffer)); + } + // ASSERT_STREQ(expect, static_cast(buffer)); + // ASSERT_EQ(expect_length, length); + } + + static void SetUpTestCase() { + std::cout << "=============" << std::endl; + LOG(INFO) << "=============" << std::endl; + } + + static void TearDownTestCase() { + LOG(INFO) << "----------=============--------------" << std::endl; + } + + virtual void SetUp() { LOG(INFO) << "-----------------------" << std::endl; } + virtual void TearDown() { LOG(INFO) << "---------------------" << std::endl; } public: - FileConnector* connector_; + SingleFileConnector* connector_ = NULL; std::string path_; - char* data_; - int data_length_; + char* data_ = "fafasfffffffffffffffdfsfsffsfsfsfs a."; + char* double_data_; + char* sevenfold_data_; + char* eightfold_data_; + int data_length_ = 38; }; +TEST_F(SingleFileConnectorTest, DiskWrite) { + WriteOrAppendFile(kDisk, kCreateFile, data_, data_length_); +} +TEST_F(SingleFileConnectorTest, DiskAppend) { + WriteOrAppendFile(kDisk, kAppendFile, double_data_, data_length_ * 2); +} +TEST_F(SingleFileConnectorTest, DiskOverWrite) { + WriteOrAppendFile(kDisk, kCreateFile, data_, data_length_); +} -} // namespace loader +TEST_F(SingleFileConnectorTest, SingleThreadDiskDelete) { Delete(kDisk); } + +TEST_F(SingleFileConnectorTest, HdfsWrite) { + WriteOrAppendFile(kHdfs, kCreateFile, data_, data_length_); +} +TEST_F(SingleFileConnectorTest, HdfsAppend) { + WriteOrAppendFile(kHdfs, kAppendFile, double_data_, data_length_ * 2); +} +TEST_F(SingleFileConnectorTest, HdfsOverWrite) { + WriteOrAppendFile(kHdfs, kCreateFile, data_, data_length_); +} +TEST_F(SingleFileConnectorTest, SingleThreadHdfsDelete) { Delete(kHdfs); } + +////////////////////////////////// +TEST_F(SingleFileConnectorTest, DiskMultiThreadWrite) { + MultiThreadWrite(kDisk, kCreateFile, data_, data_length_); +} +TEST_F(SingleFileConnectorTest, HdfsMultiThreadWrite) { + MultiThreadWrite(kHdfs, kCreateFile, data_, data_length_); +} +TEST_F(SingleFileConnectorTest, DiskMultiThreadAppend) { + MultiThreadWrite(kDisk, kAppendFile, data_, data_length_); +} +TEST_F(SingleFileConnectorTest, HdfsMultiThreadAppend) { + MultiThreadWrite(kHdfs, kAppendFile, data_, data_length_); +} +///////////////////////////////// +TEST_F(SingleFileConnectorTest, DiskTryDeleteInWritingFile) { + TryDeleteInUsingFile(kDisk, kCreateFile, data_, data_length_); +} +TEST_F(SingleFileConnectorTest, HdfsTryDeleteInWritingFile) { + TryDeleteInUsingFile(kHdfs, kCreateFile, data_, data_length_); +} +TEST_F(SingleFileConnectorTest, DiskTryDeleteInAppendingFile) { + TryDeleteInUsingFile(kDisk, kAppendFile, data_, data_length_); +} +TEST_F(SingleFileConnectorTest, HdfsTryDeleteInAppendingFile) { + TryDeleteInUsingFile(kHdfs, kAppendFile, data_, data_length_); +} + +} // namespace loader } // namespace claims #endif // LOADER_TEST_SINGLE_FILE_CONNECTOR_TEST_H_ diff --git a/loader/test/single_thread_single_file_connector_test.h b/loader/test/single_thread_single_file_connector_test.h new file mode 100644 index 000000000..d204419b8 --- /dev/null +++ b/loader/test/single_thread_single_file_connector_test.h @@ -0,0 +1,188 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/loader/test/single_thread_single_file_connector_test.h + * + * Created on: Mar 23, 2016 + * Author: yukai + * Email: yukai2014@gmail.com + * + * Description: + * + */ + +#ifndef LOADER_TEST_SINGLE_THREAD_SINGLE_FILE_CONNECTOR_TEST_H_ +#define LOADER_TEST_SINGLE_THREAD_SINGLE_FILE_CONNECTOR_TEST_H_ + +#include +#include +#include +#include +#include +#include + +#include "../../common/memory_handle.h" +#include "../single_thread_single_file_connector.h" + +namespace claims { +namespace loader { +using namespace claims::common; // NOLINT + +class SingleThreadSingleFileConnectorTest : public ::testing::Test { + public: + SingleThreadSingleFileConnectorTest() + : connector_(NULL), path_("SingleFileConnectorTest") { + double_data_ = + "fafasfffffffffffffffdfsfsffsfsfsfs a" + ".fafasfffffffffffffffdfsfsffsfsfsfs a."; + data_length_ = 38; + // snprintf(data_, 38, "fafasfffffffffffffffdfsfsffsfsfsfs a."); + LOG(INFO) << "data_: " << data_ << std::endl; + } + + void WriteOrAppendFile(FilePlatform file_platform, FileOpenFlag open_flag, + char* expect, int expect_length) { + connector_ = new SingleThreadSingleFileConnector(file_platform, path_); + if (open_flag == kCreateFile) { + if (rSuccess != connector_->Overwrite(data_, data_length_)) { + LOG(ERROR) << "failed to flush (" << path_ << ")" << std::endl; + FAIL(); + } + } else if (kAppendFile == open_flag) { + if (rSuccess != connector_->Append(data_, data_length_)) { + LOG(ERROR) << "failed to append (" << path_ << ")" << std::endl; + FAIL(); + } + } + if (rSuccess != connector_->Close()) FAIL(); + DELETE_PTR(connector_); + + void* read_buffer = NULL; + uint64_t length = 0; + SingleThreadSingleFileConnector* reader = + new SingleThreadSingleFileConnector(file_platform, path_); + if (rSuccess != reader->LoadTotalFile(read_buffer, &length)) { + FAIL(); + } + EXPECT_EQ(expect_length, length); + EXPECT_STREQ(expect, static_cast(read_buffer)); + reader->Close(); + DELETE_PTR(reader); + + SUCCEED(); + } + + void EasyTranslate(FilePlatform platform) { + void* read_buffer = NULL; + uint64_t length = 0; + connector_ = new SingleThreadSingleFileConnector(platform, path_); + EXPECT_EQ(rSuccess, connector_->Delete()); + // test access + EXPECT_EQ(false, connector_->CanAccess()); + + // overwrite + EXPECT_EQ(rSuccess, connector_->Overwrite(data_, data_length_)); + EXPECT_EQ(rSuccess, connector_->LoadTotalFile(read_buffer, &length)); + EXPECT_STREQ(data_, static_cast(read_buffer)); + EXPECT_EQ(data_length_, length); + + // test access + EXPECT_EQ(true, connector_->CanAccess()); + + // append + EXPECT_EQ(rSuccess, connector_->Append(data_, data_length_)); + EXPECT_EQ(rSuccess, connector_->LoadTotalFile(read_buffer, &length)); + EXPECT_STREQ(double_data_, static_cast(read_buffer)); + EXPECT_EQ(data_length_ * 2, length); + + // close + EXPECT_EQ(rSuccess, connector_->Close()); + // duplicate close + EXPECT_EQ(rSuccess, connector_->Close()); + + // overwrite after closed + EXPECT_EQ(rSuccess, connector_->Overwrite(data_, data_length_)); + EXPECT_EQ(rSuccess, connector_->LoadTotalFile(read_buffer, &length)); + EXPECT_STREQ(data_, static_cast(read_buffer)); + EXPECT_EQ(data_length_, length); + + // overwrite again + EXPECT_EQ(rSuccess, connector_->Overwrite(data_, data_length_)); + EXPECT_EQ(rSuccess, connector_->LoadTotalFile(read_buffer, &length)); + EXPECT_STREQ(data_, static_cast(read_buffer)); + EXPECT_EQ(data_length_, length); + + // close + EXPECT_EQ(rSuccess, connector_->Close()); + + // delete file + EXPECT_EQ(rSuccess, connector_->Delete()); + + // test access + EXPECT_EQ(false, connector_->CanAccess()); + + // double delete file + EXPECT_EQ(rSuccess, connector_->Delete()); + } + + static void SetUpTestCase() { + std::cout << "=============" << std::endl; + LOG(INFO) << "=============" << std::endl; + } + + static void TearDownTestCase() {} + virtual void SetUp() { LOG(INFO) << "-----------------------" << std::endl; } + virtual void TearDown() { LOG(INFO) << "---------------------" << std::endl; } + + public: + SingleThreadSingleFileConnector* connector_ = NULL; + std::string path_; + char* data_ = "fafasfffffffffffffffdfsfsffsfsfsfs a."; + char* double_data_; + int data_length_ = 38; +}; +TEST_F(SingleThreadSingleFileConnectorTest, DiskWrite) { + WriteOrAppendFile(kDisk, kCreateFile, data_, data_length_); +} +TEST_F(SingleThreadSingleFileConnectorTest, DiskAppend) { + WriteOrAppendFile(kDisk, kAppendFile, double_data_, data_length_ * 2); +} +TEST_F(SingleThreadSingleFileConnectorTest, DiskOverWrite) { + WriteOrAppendFile(kDisk, kCreateFile, data_, data_length_); +} +TEST_F(SingleThreadSingleFileConnectorTest, HdfsWrite) { + WriteOrAppendFile(kHdfs, kCreateFile, data_, data_length_); +} +TEST_F(SingleThreadSingleFileConnectorTest, HdfsAppend) { + WriteOrAppendFile(kHdfs, kAppendFile, double_data_, data_length_ * 2); +} +TEST_F(SingleThreadSingleFileConnectorTest, HdfsOverWrite) { + WriteOrAppendFile(kHdfs, kCreateFile, data_, data_length_); +} + +TEST_F(SingleThreadSingleFileConnectorTest, DiskEeayTranslate) { + EasyTranslate(kDisk); +} + +TEST_F(SingleThreadSingleFileConnectorTest, HdfsEeayTranslate) { + EasyTranslate(kHdfs); +} + +} // namespace loader +} // namespace claims +#endif // LOADER_TEST_SINGLE_THREAD_SINGLE_FILE_CONNECTOR_TEST_H_ diff --git a/loader/test/table_file_connector_test.cpp b/loader/test/table_file_connector_test.cpp index fac1b81af..221b5ffd9 100644 --- a/loader/test/table_file_connector_test.cpp +++ b/loader/test/table_file_connector_test.cpp @@ -37,8 +37,16 @@ #include "../../common/file_handle/file_handle_imp_factory.h" #include "../../common/memory_handle.h" #include "../file_connector.h" -#include "../table_file_connector.h" #include "../single_file_connector.h" +#include "../table_file_connector.h" + +#include "../../catalog/catalog.h" +#include "../../catalog/table.h" + +#include "../../Daemon/Daemon.h" +#include "../../Daemon/Executing.h" + +using claims::catalog::Catalog; namespace claims { @@ -48,96 +56,80 @@ using std::vector; using std::string; using std::endl; using std::ostringstream; -using namespace claims::common; +using namespace claims::common; // NOLINT using claims::common::FileOpenFlag; using claims::common::FilePlatform; using claims::loader::FileConnector; using claims::loader::SingleFileConnector; -TableFileConnectorTest::TableFileConnectorTest() { - connector_ = NULL; - data_ = "fafasfffffffffffffffdfsfsffsfsfsfs a."; - data_length_ = 38; - // snprintf(data_, 38, "fafasfffffffffffffffdfsfsffsfsfsfs a."); - LOG(INFO) << "data_: " << data_ << std::endl; - - for (int i = 0; i < 3; ++i) { - vector temp; - for (int j = 0; j < 3; ++j) { - ostringstream oss; - oss << "TableFileConnectorP" << i << "G" << j; - temp.push_back(oss.str()); - } - paths_.push_back(temp); - } -} - -void TableFileConnectorTest::SetUpTestCase() { - std::cout << "=============" << std::endl; - LOG(INFO) << "=============" << std::endl; -} -void TableFileConnectorTest::TearDownTestCase() {} - -void TableFileConnectorTest::WriteOrAppendFile( - common::FilePlatform file_platform, common::FileOpenFlag open_flag, - char* expect, int expect_length) { - connector_ = new TableFileConnector(file_platform, paths_); - - if (rSuccess != connector_->Open(open_flag)) FAIL(); - for (int i = 0; i < 3; ++i) { - for (int j = 0; j < 3; ++j) { - if (rSuccess != connector_->Flush(i, j, data_, data_length_)) { - LOG(ERROR) << "failed to flush (" << i << "," << j << ")" << endl; - FAIL(); - } - } - } - if (rSuccess != connector_->Close()) FAIL(); - DELETE_PTR(connector_); - - FileConnector* reader = NULL; - void* read_buffer = NULL; - uint64_t length = 0; - for (int i = 0; i < 3; ++i) { - for (int j = 0; j < 3; ++j) { - reader = new SingleFileConnector(file_platform, paths_[i][j]); - reader->Open(kReadFile); - if (rSuccess != reader->LoadTotalFile(read_buffer, &length)) { - FAIL(); - } - EXPECT_EQ(expect_length, length); - EXPECT_STREQ(expect, static_cast(read_buffer)); - reader->Close(); - DELETE_PTR(reader); - } - } - SUCCEED(); -} - -TEST_F(TableFileConnectorTest, DiskWrite) { - WriteOrAppendFile(kDisk, kCreateFile, data_, data_length_); -} -TEST_F(TableFileConnectorTest, DiskAppend) { - char double_data[] = - "fafasfffffffffffffffdfsfsffsfsfsfs " - "a.fafasfffffffffffffffdfsfsffsfsfsfs a."; - WriteOrAppendFile(kDisk, kAppendFile, double_data, sizeof(double_data) - 1); -} -TEST_F(TableFileConnectorTest, DiskOverWrite) { - WriteOrAppendFile(kDisk, kCreateFile, data_, data_length_); -} -TEST_F(TableFileConnectorTest, HdfsWrite) { - WriteOrAppendFile(kHdfs, kCreateFile, data_, data_length_); -} -TEST_F(TableFileConnectorTest, HdfsAppend) { - char double_data[] = - "fafasfffffffffffffffdfsfsffsfsfsfs " - "a.fafasfffffffffffffffdfsfsffsfsfsfs a."; - WriteOrAppendFile(kHdfs, kAppendFile, double_data, sizeof(double_data) - 1); -} -TEST_F(TableFileConnectorTest, HdfsOverWrite) { - WriteOrAppendFile(kHdfs, kCreateFile, data_, data_length_); -} +// TableFileConnectorTest::TableFileConnectorTest() { +// connector_ = NULL; +// data_ = "fafasfffffffffffffffdfsfsffsfsfsfs a."; +// data_length_ = 38; +// // snprintf(data_, 38, "fafasfffffffffffffffdfsfsffsfsfsfs a."); +// LOG(INFO) << "data_: " << data_ << std::endl; +// +// for (int i = 0; i < 3; ++i) { +// vector temp; +// for (int j = 0; j < 3; ++j) { +// ostringstream oss; +// oss << "TableFileConnectorP" << i << "G" << j; +// temp.push_back(oss.str()); +// } +// paths_.push_back(temp); +// } +// +// string create_table_stmt = +// "create table " + table_name + " (a int , b varchar(12));"; +// string create_prj_stmt1 = "create projection on " + table_name + +// " (a , b ) number = 2 partitioned on a ;"; +// string create_prj_stmt2 = "create projection on " + table_name + +// " (a ) number = 3 partitioned on a ;"; +// ExecutedResult* er = new ExecutedResult(); +// Executing::run_sql(create_table_stmt, er); +// Executing::run_sql(create_prj_stmt1, er); +// Executing::run_sql(create_prj_stmt2, er); +//} +// +// void TableFileConnectorTest::WriteOrAppendFile( +// common::FilePlatform file_platform, common::FileOpenFlag open_flag, +// char* expect, int expect_length) { +// TableDescriptor* table = Catalog::getInstance()->getTable(table_name); +// connector_ = new TableFileConnector(file_platform, table); +// +// for (int i = 0; i < table->getNumberOfProjection(); ++i) { +// for (int j = 0; j < (*table->GetProjectionList())[i] +// ->getPartitioner() +// ->getNumberOfPartitions(); +// ++j) { +// if (rSuccess != connector_->AtomicFlush(i, j, data_, data_length_)) { +// LOG(ERROR) << "failed to flush (" << i << "," << j << ")" << endl; +// FAIL(); +// } +// } +// } +// if (rSuccess != connector_->Close()) FAIL(); +// DELETE_PTR(connector_); +// +// SingleFileConnector* reader = NULL; +// void* read_buffer = NULL; +// uint64_t length = 0; +// +// vector> write_path_name_ = table->GetAllPartitionsPath(); +// for (auto projection_iter : write_path_name_) { +// for (auto partition_iter : projection_iter) { +// reader = new SingleFileConnector(file_platform, partition_iter); +// if (rSuccess != reader->LoadTotalFile(read_buffer, &length)) { +// FAIL(); +// } +// EXPECT_EQ(expect_length, length); +// EXPECT_STREQ(expect, static_cast(read_buffer)); +// reader->Close(); +// DELETE_PTR(reader); +// } +// } +// SUCCEED(); +//} } // namespace loader diff --git a/loader/test/table_file_connector_test.h b/loader/test/table_file_connector_test.h index 04b4ae793..704d5d9fd 100644 --- a/loader/test/table_file_connector_test.h +++ b/loader/test/table_file_connector_test.h @@ -36,32 +36,213 @@ #include #include "../../common/file_handle/file_handle_imp_factory.h" +#include "../../common/memory_handle.h" +#include "../file_connector.h" +#include "../single_file_connector.h" +#include "../table_file_connector.h" + +#include "../../catalog/catalog.h" +#include "../../catalog/table.h" +#include "../../common/error_define.h" + +#include "../../Daemon/Daemon.h" +#include "../../Daemon/Executing.h" namespace claims { namespace loader { using std::vector; using std::endl; using std::string; -class FileConnector; +using claims::catalog::Catalog; + +using namespace claims::common; // NOLINT class TableFileConnectorTest : public ::testing::Test { public: - TableFileConnectorTest(); + TableFileConnectorTest() { + connector_ = NULL; + eightfold_data_ = + "fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs a."; + sevenfold_data_ = + "fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs a."; + double_data_ = + "fafasfffffffffffffffdfsfsffsfsfsfs a" + ".fafasfffffffffffffffdfsfsffsfsfsfs a."; + data_length_ = 38; + LOG(INFO) << "data_: " << data_ << std::endl; + + // creation of table and projection is done in elastic_iterator_model_test.h + string create_table_stmt = + "create table " + table_name + " (a int , b varchar(12));"; + string create_prj_stmt1 = "create projection on " + table_name + + " (a , b ) number = 2 partitioned on a ;"; + string create_prj_stmt2 = "create projection on " + table_name + + " (a ) number = 3 partitioned on a ;"; + RetCode ret = rSuccess; + EXEC_AND_ONLY_LOG_ERROR(ret, Catalog::getInstance()->restoreCatalog(), + "failed to restore catalog"); + } - static void SetUpTestCase(); - static void TearDownTestCase(); + static void SetUpTestCase() { + std::cout << "=============" << std::endl; + LOG(INFO) << "=============" << std::endl; + } + static void TearDownTestCase() {} void WriteOrAppendFile(common::FilePlatform file_platform, common::FileOpenFlag open_flag, char* expect, - int expect_length); + int expect_length) { + TableDescriptor* table = Catalog::getInstance()->getTable(table_name); + assert(table != NULL); + connector_ = new TableFileConnector(file_platform, table, open_flag); + + connector_->Open(); + for (int i = 0; i < table->getNumberOfProjection(); ++i) { + for (int j = 0; j < (*table->GetProjectionList())[i] + ->getPartitioner() + ->getNumberOfPartitions(); + ++j) { + if (rSuccess != connector_->AtomicFlush(i, j, data_, data_length_)) { + LOG(ERROR) << "failed to flush (" << i << "," << j << ")" << endl; + FAIL(); + } + } + } + if (rSuccess != connector_->Close()) FAIL(); + DELETE_PTR(connector_); + + SingleFileConnector* reader = NULL; + void* read_buffer = NULL; + uint64_t length = 0; + + vector> write_path_name_ = table->GetAllPartitionsPath(); + for (auto projection_iter : write_path_name_) { + for (auto partition_iter : projection_iter) { + reader = + new SingleFileConnector(file_platform, partition_iter, kReadFile); + if (rSuccess != reader->LoadTotalFile(read_buffer, &length)) { + FAIL(); + } + EXPECT_EQ(expect_length, length); + EXPECT_STREQ(expect, static_cast(read_buffer)); + reader->Close(); + DELETE_PTR(reader); + } + } + SUCCEED(); + } + + void Delete(FilePlatform file_platform) { + TableFileConnector* con = new TableFileConnector( + file_platform, Catalog::getInstance()->getTable(table_name), + kAppendFile); + con->Open(); + con->AtomicFlush(0, 0, data_, data_length_); + con->Close(); + DELETE_PTR(con); + + void* buffer; + uint64_t length = 0; + con = new TableFileConnector( + file_platform, Catalog::getInstance()->getTable(table_name), kReadFile); + con->Open(); + // ASSERT_EQ(rSuccess, con->LoadTotalFile(buffer, &length)); + ASSERT_EQ(rFileInUsing, con->DeleteAllTableFiles()); // can't delete + ASSERT_EQ(rSuccess, con->Close()); + ASSERT_EQ(rSuccess, con->DeleteAllTableFiles()); + DELETE_PTR(con); + + con = new TableFileConnector(file_platform, + Catalog::getInstance()->getTable(table_name), + kCreateFile); + con->Open(); + ASSERT_EQ(rSuccess, con->AtomicFlush(0, 0, data_, data_length_)); + ASSERT_EQ(rFileInUsing, con->DeleteAllTableFiles()); // can't delete + ASSERT_EQ(rSuccess, con->Close()); + ASSERT_EQ(rSuccess, con->DeleteAllTableFiles()); + DELETE_PTR(con); + + con = new TableFileConnector(file_platform, + Catalog::getInstance()->getTable(table_name), + kAppendFile); + con->Open(); + ASSERT_EQ(rSuccess, con->AtomicFlush(1, 0, data_, data_length_)); + ASSERT_EQ(rFileInUsing, con->DeleteAllTableFiles()); // can't delete + ASSERT_EQ(rSuccess, con->Close()); + ASSERT_EQ(rSuccess, con->DeleteAllTableFiles()); + DELETE_PTR(con); + } + + void MultiThreadWriteOrAppend(TableFileConnector* connector, int length, + int i, int j) { + usleep(length * 2); // stagger executing of all threads + ASSERT_EQ(rSuccess, connector->Open()); + ASSERT_EQ(rSuccess, connector->AtomicFlush( + i, j, (length % 2 ? data_ : double_data_), + (length % 2 ? data_length_ : data_length_ * 2))); + ASSERT_EQ(rSuccess, connector->Close()); + } + + void MultiThreadWrite(FilePlatform file_platform, FileOpenFlag open_flag, + char* expect, int expect_length) { + TableFileConnector* con = new TableFileConnector( + file_platform, Catalog::getInstance()->getTable(table_name), + kAppendFile); + ASSERT_EQ(rSuccess, con->DeleteAllTableFiles()); + } + + virtual void SetUp() { LOG(INFO) << "-----------------------" << std::endl; } + virtual void TearDown() { LOG(INFO) << "---------------------" << std::endl; } public: - FileConnector* connector_; + TableFileConnector* connector_; vector> paths_; + string table_name = "sfdfsf"; char* data_; - int data_length_; + char* double_data_; + char* sevenfold_data_; + char* eightfold_data_; + int data_length_ = 38; }; +TEST_F(TableFileConnectorTest, DiskWrite) { + WriteOrAppendFile(kDisk, kCreateFile, data_, data_length_); +} +TEST_F(TableFileConnectorTest, DiskAppend) { + char double_data[] = + "fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs a."; + WriteOrAppendFile(kDisk, kAppendFile, double_data, sizeof(double_data) - 1); +} +TEST_F(TableFileConnectorTest, DiskOverWrite) { + WriteOrAppendFile(kDisk, kCreateFile, data_, data_length_); +} +TEST_F(TableFileConnectorTest, HdfsWrite) { + WriteOrAppendFile(kHdfs, kCreateFile, data_, data_length_); +} +TEST_F(TableFileConnectorTest, HdfsAppend) { + char double_data[] = + "fafasfffffffffffffffdfsfsffsfsfsfs " + "a.fafasfffffffffffffffdfsfsffsfsfsfs a."; + WriteOrAppendFile(kHdfs, kAppendFile, double_data, sizeof(double_data) - 1); +} +TEST_F(TableFileConnectorTest, HdfsOverWrite) { + WriteOrAppendFile(kHdfs, kCreateFile, data_, data_length_); +} + } // namespace loader } // namespace claims diff --git a/logical_operator/Makefile.am b/logical_operator/Makefile.am index 698d07c1d..958258ed0 100644 --- a/logical_operator/Makefile.am +++ b/logical_operator/Makefile.am @@ -1,8 +1,7 @@ AM_CPPFLAGS= -fPIC -fpermissive \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux\ --I${THERON_HOME}/Include \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization @@ -41,7 +40,8 @@ liblogicalqueryplan_a_SOURCES = \ logical_cross_join.cpp logical_cross_join.h \ logical_subquery.cpp logical_subquery.h \ logical_sort.cpp logical_sort.h \ - logical_delete_filter.cpp logical_delete_filter.h + logical_delete_filter.cpp logical_delete_filter.h \ + logical_outer_join.cpp logical_outer_join.h SUBDIRS = diff --git a/logical_operator/logical_aggregation.cpp b/logical_operator/logical_aggregation.cpp index 6fe314f74..dabef085b 100755 --- a/logical_operator/logical_aggregation.cpp +++ b/logical_operator/logical_aggregation.cpp @@ -56,6 +56,7 @@ using claims::common::ExprUnary; using claims::common::OperType; using claims::common::ExprConst; using claims::common::ExprColumn; +using claims::common::LogicInitCnxt; using claims::physical_operator::PhysicalAggregation; namespace claims { @@ -129,16 +130,17 @@ PlanContext LogicalAggregation::GetPlanContext() { ChangeAggAttrsForAVG(); // initialize expression of group_by_attrs and aggregation_attrs - Schema* input_schema = GetSchema(child_context.attribute_list_); - map column_to_id; - GetColumnToId(child_context.attribute_list_, column_to_id); + LogicInitCnxt licnxt; + licnxt.schema0_ = child_context.GetSchema(); + GetColumnToId(child_context.attribute_list_, licnxt.column_id0_); + for (int i = 0; i < group_by_attrs_.size(); ++i) { - group_by_attrs_[i]->InitExprAtLogicalPlan(group_by_attrs_[i]->actual_type_, - column_to_id, input_schema); + licnxt.return_type_ = group_by_attrs_[i]->actual_type_; + group_by_attrs_[i]->InitExprAtLogicalPlan(licnxt); } for (int i = 0; i < aggregation_attrs_.size(); ++i) { - aggregation_attrs_[i]->InitExprAtLogicalPlan( - aggregation_attrs_[i]->actual_type_, column_to_id, input_schema); + licnxt.return_type_ = aggregation_attrs_[i]->actual_type_; + aggregation_attrs_[i]->InitExprAtLogicalPlan(licnxt); } if (CanOmitHashRepartition(child_context)) { @@ -198,7 +200,8 @@ PlanContext LogicalAggregation::GetPlanContext() { ret.plan_partitioner_.set_partition_key( group_by_attrs_[0]->ExprNodeToAttr(0)); } - + /// set location of this coordinator, should invoke + /// coordinator.get_location() NodeID location = 0; int64_t data_cardinality = EstimateGroupByCardinality(child_context); PartitionOffset offset = 0; @@ -247,14 +250,15 @@ void LogicalAggregation::SetGroupbyAndAggAttrsForGlobalAgg( Schema* input_schema) { ExprNode* group_by_node = NULL; ExprUnary* agg_node = NULL; - map column_to_id; + LogicInitCnxt licnxt; + licnxt.schema0_ = input_schema; int group_by_size = group_by_attrs_.size(); // map column name to id for (int i = 0; i < group_by_size; ++i) { - column_to_id["NULL_MID." + group_by_attrs_[i]->alias_] = i; + licnxt.column_id0_["NULL_MID." + group_by_attrs_[i]->alias_] = i; } for (int i = 0; i < aggregation_attrs_.size(); ++i) { - column_to_id["NULL_MID." + aggregation_attrs_[i]->alias_] = + licnxt.column_id0_["NULL_MID." + aggregation_attrs_[i]->alias_] = i + group_by_size; } // reconstruct group by attributes and initialize them @@ -262,8 +266,8 @@ void LogicalAggregation::SetGroupbyAndAggAttrsForGlobalAgg( group_by_node = new ExprColumn( ExprNodeType::t_qcolcumns, group_by_attrs_[i]->actual_type_, group_by_attrs_[i]->alias_, "NULL_MID", group_by_attrs_[i]->alias_); - group_by_node->InitExprAtLogicalPlan(group_by_node->actual_type_, - column_to_id, input_schema); + licnxt.return_type_ = group_by_node->actual_type_; + group_by_node->InitExprAtLogicalPlan(licnxt); group_by_attrs.push_back(group_by_node); } // reconstruct aggregation attributes and initialize them @@ -275,8 +279,8 @@ void LogicalAggregation::SetGroupbyAndAggAttrsForGlobalAgg( aggregation_attrs_[i]->actual_type_, aggregation_attrs_[i]->alias_, "NULL_MID", aggregation_attrs_[i]->alias_)); - agg_node->InitExprAtLogicalPlan(agg_node->actual_type_, column_to_id, - input_schema); + licnxt.return_type_ = agg_node->actual_type_; + agg_node->InitExprAtLogicalPlan(licnxt); aggregation_attrs.push_back(agg_node); } } @@ -324,7 +328,6 @@ PhysicalOperatorBase* LogicalAggregation::GetPhysicalPlan( expander_state.child_ = local_aggregation; expander_state.schema_ = local_agg_state.hash_schema_->duplicateSchema(); PhysicalOperatorBase* expander_lower = new Expander(expander_state); - ExchangeMerger::State exchange_state; exchange_state.block_size_ = block_size; exchange_state.child_ = expander_lower; @@ -358,9 +361,9 @@ PhysicalOperatorBase* LogicalAggregation::GetPhysicalPlan( global_agg_state.num_of_buckets_ = local_agg_state.num_of_buckets_; global_agg_state.avg_index_ = avg_id_in_agg_; global_agg_state.count_column_id_ = count_column_id_; - PhysicalOperatorBase* global_aggregation = - new PhysicalAggregation(global_agg_state); - ret = global_aggregation; + // PhysicalOperatorBase* global_aggregation = + // new PhysicalAggregation(global_agg_state); + ret = new PhysicalAggregation(global_agg_state); break; } case kReparGlobalAgg: { diff --git a/logical_operator/logical_cross_join.cpp b/logical_operator/logical_cross_join.cpp index d7ed48c4d..c83e575bb 100644 --- a/logical_operator/logical_cross_join.cpp +++ b/logical_operator/logical_cross_join.cpp @@ -1,4 +1,5 @@ -#include "../common/error_no.h" +#include "../common/expression/expr_node.h" +#include "../common/memory_handle.h" /* * Copyright [2012-2015] DaSE@ECNU @@ -29,6 +30,10 @@ */ #define GLOG_NO_ABBREVIATED_SEVERITIES +#include +#include +#include +#include "../common/error_no.h" #include "../Config.h" #include "../IDsGenerator.h" #include "../Resource/NodeTracker.h" @@ -38,6 +43,7 @@ #include "../physical_operator/exchange_merger.h" #include "../logical_operator/logical_cross_join.h" #include "../physical_operator/physical_nest_loop_join.h" +using claims::common::LogicInitCnxt; using claims::physical_operator::ExchangeMerger; using claims::physical_operator::Expander; using claims::physical_operator::PhysicalNestLoopJoin; @@ -60,6 +66,17 @@ LogicalCrossJoin::LogicalCrossJoin(LogicalOperator* left_child, left_child_(left_child), right_child_(right_child), plan_context_(NULL), + join_policy_(kUninitialized) { + join_condi_.clear(); +} +LogicalCrossJoin::LogicalCrossJoin(LogicalOperator* left_child, + LogicalOperator* right_child, + std::vector join_condi) + : LogicalOperator(kLogicalCrossJoin), + left_child_(left_child), + right_child_(right_child), + plan_context_(NULL), + join_condi_(join_condi), join_policy_(kUninitialized) {} LogicalCrossJoin::~LogicalCrossJoin() { if (NULL != plan_context_) { @@ -190,6 +207,16 @@ PlanContext LogicalCrossJoin::GetPlanContext() { } default: { assert(false); } } + // initialize expression + LogicInitCnxt licnxt; + GetColumnToId(left_plan_context.attribute_list_, licnxt.column_id0_); + GetColumnToId(right_plan_context.attribute_list_, licnxt.column_id1_); + licnxt.schema0_ = left_plan_context.GetSchema(); + licnxt.schema1_ = right_plan_context.GetSchema(); + for (int i = 0; i < join_condi_.size(); ++i) { + licnxt.return_type_ = join_condi_[i]->actual_type_; + join_condi_[i]->InitExprAtLogicalPlan(licnxt); + } plan_context_ = new PlanContext(); *plan_context_ = ret; } else { @@ -256,6 +283,7 @@ PhysicalOperatorBase* LogicalCrossJoin::GetPhysicalPlan( PlanContext right_plan_context = right_child_->GetPlanContext(); PhysicalNestLoopJoin::State state; state.block_size_ = block_size; + state.join_condi_ = join_condi_; state.input_schema_left_ = GetSchema(left_plan_context.attribute_list_); state.input_schema_right_ = GetSchema(right_plan_context.attribute_list_); state.output_schema_ = GetSchema(plan_context_->attribute_list_); @@ -312,7 +340,7 @@ int LogicalCrossJoin::GenerateChildPhysicalQueryPlan( expander_state.block_size_ = blocksize; expander_state.init_thread_count_ = Config::initial_degree_of_parallelism; expander_state.child_ = right_child_->GetPhysicalPlan(blocksize); - expander_state.schema_ = left_plan_context.GetSchema(); + expander_state.schema_ = right_plan_context.GetSchema(); PhysicalOperatorBase* expander = new Expander(expander_state); ExchangeMerger::State exchange_state; exchange_state.block_size_ = blocksize; diff --git a/logical_operator/logical_cross_join.h b/logical_operator/logical_cross_join.h index 69751d881..5801d2696 100644 --- a/logical_operator/logical_cross_join.h +++ b/logical_operator/logical_cross_join.h @@ -29,9 +29,14 @@ #ifndef LOGICAL_OPERATOR_LOGICAL_CROSS_JOIN_H_ #define LOGICAL_OPERATOR_LOGICAL_CROSS_JOIN_H_ +#include #include "../physical_operator/physical_operator_base.h" #include "../common/error_define.h" +#include "../common/expression/expr_node.h" #include "../logical_operator/logical_operator.h" +#include "../sql_parser/ast_node/ast_node.h" + +using claims::common::ExprNode; namespace claims { namespace logical_operator { @@ -43,6 +48,9 @@ class LogicalCrossJoin : public LogicalOperator { public: LogicalCrossJoin(); LogicalCrossJoin(LogicalOperator* left_child, LogicalOperator* right_child); + LogicalCrossJoin(LogicalOperator* left_child, LogicalOperator* right_child, + std::vector join_condi); + virtual ~LogicalCrossJoin(); PlanContext GetPlanContext(); PhysicalOperatorBase* GetPhysicalPlan(const unsigned& blocksize); @@ -97,6 +105,8 @@ class LogicalCrossJoin : public LogicalOperator { LogicalOperator* right_child_; PlanContext* plan_context_; JoinPolicy join_policy_; + + std::vector join_condi_; }; } // namespace logical_operator } // namespace claims diff --git a/logical_operator/logical_equal_join.cpp b/logical_operator/logical_equal_join.cpp index a2e4f38fe..c47a17ea1 100755 --- a/logical_operator/logical_equal_join.cpp +++ b/logical_operator/logical_equal_join.cpp @@ -34,6 +34,7 @@ #include #include "../catalog/stat/StatManager.h" +#include "../common/expression/expr_node.h" #include "../Config.h" #include "../IDsGenerator.h" #include "../common/Logging.h" @@ -42,6 +43,7 @@ #include "../physical_operator/physical_hash_join.h" #include "../physical_operator/physical_operator_base.h" +using claims::common::LogicInitCnxt; using claims::physical_operator::ExchangeMerger; using claims::physical_operator::Expander; using claims::physical_operator::PhysicalHashJoin; @@ -64,6 +66,22 @@ LogicalEqualJoin::LogicalEqualJoin(std::vector joinpair_list, right_join_key_list_.push_back(joinpair_list[i].right_join_attr_); } } +LogicalEqualJoin::LogicalEqualJoin(std::vector joinpair_list, + LogicalOperator* left_input, + LogicalOperator* right_input, + vector join_condi) + : LogicalOperator(kLogicalEqualJoin), + joinkey_pair_list_(joinpair_list), + left_child_(left_input), + right_child_(right_input), + join_condi_(join_condi), + join_policy_(kNull), + plan_context_(NULL) { + for (unsigned i = 0; i < joinpair_list.size(); ++i) { + left_join_key_list_.push_back(joinpair_list[i].left_join_attr_); + right_join_key_list_.push_back(joinpair_list[i].right_join_attr_); + } +} LogicalEqualJoin::~LogicalEqualJoin() { if (NULL != plan_context_) { delete plan_context_; @@ -264,6 +282,15 @@ PlanContext LogicalEqualJoin::GetPlanContext() { } } + LogicInitCnxt licnxt; + GetColumnToId(left_dataflow.attribute_list_, licnxt.column_id0_); + GetColumnToId(right_dataflow.attribute_list_, licnxt.column_id1_); + licnxt.schema0_ = left_dataflow.GetSchema(); + licnxt.schema1_ = right_dataflow.GetSchema(); + for (int i = 0; i < join_condi_.size(); ++i) { + licnxt.return_type_ = join_condi_[i]->actual_type_; + join_condi_[i]->InitExprAtLogicalPlan(licnxt); + } plan_context_ = new PlanContext(); *plan_context_ = ret; lock_->release(); @@ -345,9 +372,7 @@ PhysicalOperatorBase* LogicalEqualJoin::GetPhysicalPlan( state.join_index_left_ = GetLeftJoinKeyIds(); state.join_index_right_ = GetRightJoinKeyIds(); - - state.payload_left_ = GetLeftPayloadIds(); - state.payload_right_ = GetRightPayloadIds(); + state.join_condi_ = join_condi_; switch (join_policy_) { case kNoRepartition: { state.child_left_ = child_iterator_left; @@ -562,41 +587,7 @@ std::vector LogicalEqualJoin::GetRightJoinKeyIds() const { } return ret; } -std::vector LogicalEqualJoin::GetLeftPayloadIds() const { - std::vector ret; - const PlanContext dataflow = left_child_->GetPlanContext(); - const std::vector left_join_key_index_list = GetLeftJoinKeyIds(); - - for (unsigned i = 0; i < dataflow.attribute_list_.size(); i++) { - bool found_equal = false; - for (unsigned j = 0; j < left_join_key_index_list.size(); j++) { - if (i == left_join_key_index_list[j]) { - found_equal = true; - break; - } - } - if (!found_equal) { - ret.push_back(i); - } - } - return ret; -} -std::vector LogicalEqualJoin::GetRightPayloadIds() const { - std::vector ret; - const PlanContext dataflow = right_child_->GetPlanContext(); - const std::vector right_join_key_index_list = GetRightJoinKeyIds(); - - for (unsigned i = 0; i < dataflow.attribute_list_.size(); i++) { - for (unsigned j = 0; j < right_join_key_index_list.size(); j++) { - if (i == right_join_key_index_list[j]) { - break; - } - } - ret.push_back(i); - } - return ret; -} int LogicalEqualJoin::GetIdInLeftJoinKeys(const Attribute& attribute) const { for (unsigned i = 0; i < joinkey_pair_list_.size(); i++) { if (joinkey_pair_list_[i].left_join_attr_ == attribute) { diff --git a/logical_operator/logical_equal_join.h b/logical_operator/logical_equal_join.h index 51087c7a8..2eb643e73 100755 --- a/logical_operator/logical_equal_join.h +++ b/logical_operator/logical_equal_join.h @@ -74,6 +74,9 @@ class LogicalEqualJoin : public LogicalOperator { */ LogicalEqualJoin(std::vector, LogicalOperator* left_input, LogicalOperator* right_input); + LogicalEqualJoin(std::vector joinpair_list, + LogicalOperator* left_input, LogicalOperator* right_input, + vector join_condi); virtual ~LogicalEqualJoin(); /** * @brief Method description: Get the child information. @@ -107,8 +110,6 @@ class LogicalEqualJoin : public LogicalOperator { private: std::vector GetLeftJoinKeyIds() const; std::vector GetRightJoinKeyIds() const; - std::vector GetLeftPayloadIds() const; - std::vector GetRightPayloadIds() const; int GetIdInLeftJoinKeys(const Attribute&) const; int GetIdInLeftJoinKeys( const Attribute&, @@ -177,6 +178,7 @@ class LogicalEqualJoin : public LogicalOperator { const Attribute& attr_left, const Attribute& attr_right) const; private: + std::vector join_condi_; std::vector joinkey_pair_list_; std::vector left_join_key_list_; std::vector right_join_key_list_; diff --git a/logical_operator/logical_filter.cpp b/logical_operator/logical_filter.cpp index 5338db96c..2edb5f6fc 100644 --- a/logical_operator/logical_filter.cpp +++ b/logical_operator/logical_filter.cpp @@ -35,12 +35,14 @@ #include "../catalog/stat/StatManager.h" #include "../IDsGenerator.h" #include "../common/AttributeComparator.h" +#include "../common/expression/expr_node.h" #include "../common/TypePromotionMap.h" #include "../common/TypeCast.h" #include "../common/Expression/initquery.h" #include "../physical_operator/exchange_merger.h" #include "../physical_operator/physical_filter.h" +using claims::common::LogicInitCnxt; using claims::physical_operator::ExchangeMerger; using claims::physical_operator::PhysicalFilter; namespace claims { @@ -86,6 +88,7 @@ PlanContext LogicalFilter::GetPlanContext() { * of the input data, which may be maintained in the * catalog module. */ + const unsigned before_filter_cardinality = plan_context.plan_partitioner_.GetPartition(i)->get_cardinality(); const unsigned after_filter_cardinality = @@ -95,23 +98,29 @@ PlanContext LogicalFilter::GetPlanContext() { } } } - std::map column_to_id; - GetColumnToId(plan_context.attribute_list_, column_to_id); - Schema* input_schema = GetSchema(plan_context.attribute_list_); +// std::map column_to_id; +// GetColumnToId(plan_context.attribute_list_, column_to_id); +// Schema* input_schema = GetSchema(plan_context.attribute_list_); #ifdef NEWCONDI for (int i = 0; i < condi_.size(); ++i) { // Initialize expression of logical execution plan. InitExprAtLogicalPlan(condi_[i], t_boolean, column_to_id, input_schema); } #else + LogicInitCnxt licnxt; + GetColumnToId(plan_context.attribute_list_, licnxt.column_id0_); + licnxt.schema0_ = plan_context.GetSchema(); for (int i = 0; i < condition_.size(); ++i) { - condition_[i]->InitExprAtLogicalPlan(t_boolean, column_to_id, input_schema); + licnxt.return_type_ = t_boolean; + condition_[i]->InitExprAtLogicalPlan(licnxt); } #endif plan_context_ = new PlanContext(); *plan_context_ = plan_context; + plan_context_->attribute_list_.assign(plan_context.attribute_list_.begin(), + plan_context.attribute_list_.end()); lock_->release(); - return plan_context; + return *plan_context_; } PhysicalOperatorBase* LogicalFilter::GetPhysicalPlan( diff --git a/logical_operator/logical_operator.h b/logical_operator/logical_operator.h index c8127998c..28bfb8cdd 100755 --- a/logical_operator/logical_operator.h +++ b/logical_operator/logical_operator.h @@ -55,7 +55,8 @@ enum OperatorType { kLogicalCrossJoin, kLogicalLimit, kLogicalSubquery, - kLogicalDeleteFilter + kLogicalDeleteFilter, + kLogicalOuterJoin }; typedef PhysicalOperatorBase* PhysicalPlan; @@ -74,8 +75,9 @@ typedef struct PhysicalPlanDescriptor { */ class LogicalOperator { public: - LogicalOperator(){}; - LogicalOperator(OperatorType operator_type) : operator_type_(operator_type) { + LogicalOperator() {} + LogicalOperator(OperatorType operator_type) { + operator_type_ = operator_type; lock_ = new Lock(); } virtual ~LogicalOperator() { @@ -131,7 +133,7 @@ class LogicalOperator { Lock* lock_; // static std::atomic_uint MIDINADE_TABLE_ID; - private: + protected: OperatorType operator_type_; }; diff --git a/logical_operator/logical_outer_join.cpp b/logical_operator/logical_outer_join.cpp new file mode 100644 index 000000000..dbc5d9ba3 --- /dev/null +++ b/logical_operator/logical_outer_join.cpp @@ -0,0 +1,890 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/logical_operator/logical_left_join.cpp + * + * Created on: Mar 16, 2016 + * Author: yuyang + * Email: youngfish93@hotmail.com + * + * Description: + * + */ +#include "../logical_operator/logical_outer_join.h" +#include +#include +#include +#include +#include + +#include "../catalog/stat/StatManager.h" +#include "../common/expression/expr_node.h" +#include "../Config.h" +#include "../IDsGenerator.h" +#include "../common/Logging.h" +#include "../physical_operator/exchange_merger.h" +#include "../physical_operator/expander.h" +#include "../physical_operator/physical_hash_join.h" +#include "../physical_operator/physical_operator_base.h" +#include "../physical_operator/physical_outer_hash_join.h" + +using claims::common::LogicInitCnxt; +using claims::physical_operator::ExchangeMerger; +using claims::physical_operator::Expander; +using claims::physical_operator::PhysicalHashJoin; +using claims::physical_operator::PhysicalOperatorBase; +using claims::physical_operator::PhysicalOuterHashJoin; + +// using claims::physical_operator::PhysicalJoin; +namespace claims { +namespace logical_operator { +LogicalOuterJoin::LogicalOuterJoin( + std::vector joinpair_list, + LogicalOperator* left_input, LogicalOperator* right_input, int join_type) + : LogicalOperator(kLogicalOuterJoin), + joinkey_pair_list_(joinpair_list), + left_child_(left_input), + right_child_(right_input), + join_policy_(kNull), + plan_context_(NULL), + join_type_(join_type) { + for (unsigned i = 0; i < joinpair_list.size(); ++i) { + left_join_key_list_.push_back(joinpair_list[i].left_join_attr_); + right_join_key_list_.push_back(joinpair_list[i].right_join_attr_); + } +} +LogicalOuterJoin::LogicalOuterJoin( + std::vector joinpair_list, + LogicalOperator* left_input, LogicalOperator* right_input, int join_type, + vector join_condi) + : LogicalOperator(kLogicalOuterJoin), + joinkey_pair_list_(joinpair_list), + left_child_(left_input), + right_child_(right_input), + join_policy_(kNull), + plan_context_(NULL), + join_type_(join_type), + join_condi_(join_condi) {} +LogicalOuterJoin::~LogicalOuterJoin() { + if (NULL != plan_context_) { + delete plan_context_; + plan_context_ = NULL; + } + if (NULL != left_child_) { + delete left_child_; + left_child_ = NULL; + } + if (NULL != right_child_) { + delete right_child_; + right_child_ = NULL; + } +} +void LogicalOuterJoin::DecideJoinPolicy(const PlanContext& left_dataflow, + const PlanContext& right_dataflow) { + const bool left_dataflow_key_partitioned = CanOmitHashRepartition( + left_join_key_list_, left_dataflow.plan_partitioner_); + const bool right_dataflow_key_partitioned = CanOmitHashRepartition( + right_join_key_list_, right_dataflow.plan_partitioner_); + + const Attribute left_partition_key = + left_dataflow.plan_partitioner_.get_partition_key(); + const Attribute right_partition_key = + right_dataflow.plan_partitioner_.get_partition_key(); + if (left_dataflow_key_partitioned && right_dataflow_key_partitioned) { + if (IsInOneJoinPair(left_partition_key, right_partition_key)) { + // the best situation + if (left_dataflow.plan_partitioner_.HasSamePartitionLocation( + right_dataflow.plan_partitioner_)) { + join_policy_ = kNoRepartition; + } else { + join_policy_ = + DecideLeftOrRightRepartition(left_dataflow, right_dataflow); + } + } else { + join_policy_ = + DecideLeftOrRightRepartition(left_dataflow, right_dataflow); + } + } else { + if (left_dataflow_key_partitioned && !right_dataflow_key_partitioned) { + join_policy_ = kRightRepartition; + } + if (!left_dataflow_key_partitioned && right_dataflow_key_partitioned) { + join_policy_ = kLeftRepartition; + } + if (!left_dataflow_key_partitioned && !right_dataflow_key_partitioned) + join_policy_ = kCompleteRepartition; + } +} +PlanContext LogicalOuterJoin::GetPlanContext() { + lock_->acquire(); + if (NULL != plan_context_) { + // the data flow has been computed*/ + lock_->release(); + return *plan_context_; + } + + /** + * in the current implementation, only the hash join is considered + */ + PlanContext left_dataflow = left_child_->GetPlanContext(); + PlanContext right_dataflow = right_child_->GetPlanContext(); + PlanContext ret; + DecideJoinPolicy(left_dataflow, right_dataflow); + const Attribute left_partition_key = + left_dataflow.plan_partitioner_.get_partition_key(); + const Attribute right_partition_key = + right_dataflow.plan_partitioner_.get_partition_key(); + ret.attribute_list_.insert(ret.attribute_list_.end(), + left_dataflow.attribute_list_.begin(), + left_dataflow.attribute_list_.end()); + ret.attribute_list_.insert(ret.attribute_list_.end(), + right_dataflow.attribute_list_.begin(), + right_dataflow.attribute_list_.end()); + /**finally, construct the output data flow according to the join police**/ + switch (join_policy_) { + case kNoRepartition: { + LOG(INFO) << "no_repartition" << std::endl; + /** + * Use the left partitioner as the output dataflow partitioner. + * TODO(admin): In fact, the output dataflow partitioner should contains + * both left partitioner and right partitioner. + */ + // ret.property_.partitioner=left_dataflow.property_.partitioner; + ret.plan_partitioner_.set_partition_list( + left_dataflow.plan_partitioner_.get_partition_list()); + ret.plan_partitioner_.set_partition_func( + left_dataflow.plan_partitioner_.get_partition_func()); + ret.plan_partitioner_.set_partition_key(left_partition_key); + ret.plan_partitioner_.AddShadowPartitionKey(right_partition_key); + /** + * Set the generated data size. + * Currently, we assume the generated data size is the sum of input data + * volume. + * TODO(admin): Some reasonable output size estimation is needed. + */ + for (unsigned i = 0; i < ret.plan_partitioner_.GetNumberOfPartitions(); + i++) { + const unsigned l_cardinality = + left_dataflow.plan_partitioner_.GetPartition(i)->get_cardinality(); + const unsigned r_cardinality = + right_dataflow.plan_partitioner_.GetPartition(i)->get_cardinality(); + ret.plan_partitioner_.GetPartition(i) + ->set_cardinality(l_cardinality + r_cardinality); + } + + ret.commu_cost_ = left_dataflow.commu_cost_ + right_dataflow.commu_cost_; + break; + } + case kLeftRepartition: { + LOG(INFO) << "left_repartiotion" << std::endl; + // ret.property_.partitioner=right_dataflow.property_.partitioner; + + ret.plan_partitioner_.set_partition_list( + right_dataflow.plan_partitioner_.get_partition_list()); + ret.plan_partitioner_.set_partition_func( + right_dataflow.plan_partitioner_.get_partition_func()); + ret.plan_partitioner_.set_partition_key( + right_dataflow.plan_partitioner_.get_partition_key()); + // ret.property_.partitioner.addShadowPartitionKey(right_partition_key); + /* set the generated data size*/ + const unsigned left_total_size = + left_dataflow.plan_partitioner_.GetAggregatedDataSize(); + const unsigned right_partition_count = + right_dataflow.plan_partitioner_.GetNumberOfPartitions(); + for (unsigned i = 0; i < ret.plan_partitioner_.GetNumberOfPartitions(); + i++) { + const unsigned r_size = + right_dataflow.plan_partitioner_.GetPartition(i)->get_cardinality(); + ret.plan_partitioner_.GetPartition(i) + ->set_cardinality(r_size + left_total_size / right_partition_count); + } + + ret.commu_cost_ = left_dataflow.commu_cost_ + right_dataflow.commu_cost_; + ret.commu_cost_ += + left_dataflow.plan_partitioner_.GetAggregatedDataSize(); + break; + } + case kRightRepartition: { + LOG(INFO) << "right_repartition" << std::endl; + // ret.property_.partitioner=left_dataflow.property_.partitioner; + + ret.plan_partitioner_.set_partition_list( + left_dataflow.plan_partitioner_.get_partition_list()); + ret.plan_partitioner_.set_partition_func( + left_dataflow.plan_partitioner_.get_partition_func()); + ret.plan_partitioner_.set_partition_key( + left_dataflow.plan_partitioner_.get_partition_key()); + // ret.property_.partitioner.addShadowPartitionKey(right_partition_key); + /** + * set the generated data size + */ + const unsigned right_total_size = + right_dataflow.plan_partitioner_.GetAggregatedDataSize(); + const unsigned left_partition_count = + left_dataflow.plan_partitioner_.GetNumberOfPartitions(); + for (unsigned i = 0; i < ret.plan_partitioner_.GetNumberOfPartitions(); + i++) { + const unsigned l_size = + left_dataflow.plan_partitioner_.GetPartition(i)->get_cardinality(); + ret.plan_partitioner_.GetPartition(i) + ->set_cardinality(l_size + right_total_size / left_partition_count); + } + ret.commu_cost_ = left_dataflow.commu_cost_ + right_dataflow.commu_cost_; + ret.commu_cost_ += + right_dataflow.plan_partitioner_.GetAggregatedDataSize(); + break; + } + case kCompleteRepartition: { + /** + * TODO(admin): the repartition strategy (e.g., the degree of parallelism + * and the partition function) in such case is not decided by any child + * data flow. + * Additional optimization can be made by adopting the partition strategy + * which benefits the remaining work. + */ + LOG(INFO) << "complete_repartition" << std::endl; + ret.commu_cost_ = left_dataflow.commu_cost_ + right_dataflow.commu_cost_; + ret.commu_cost_ += + left_dataflow.plan_partitioner_.GetAggregatedDataSize(); + ret.commu_cost_ += + right_dataflow.plan_partitioner_.GetAggregatedDataSize(); + + ret.plan_partitioner_ = DecideOutputDataflowProperty( + left_dataflow, right_dataflow, join_type_); + // + // QueryOptimizationLogging::log("[Complete_repartition + // hash join] is not implemented, because I'm very lazy. -_- \n"); + // assert(false); + break; + } + default: { + LOG(ERROR) << "The join police has not been decided!" << std::endl; + assert(false); + break; + } + } + + LogicInitCnxt licnxt; + GetColumnToId(left_dataflow.attribute_list_, licnxt.column_id0_); + GetColumnToId(right_dataflow.attribute_list_, licnxt.column_id1_); + licnxt.schema0_ = left_dataflow.GetSchema(); + licnxt.schema1_ = right_dataflow.GetSchema(); + for (int i = 0; i < join_condi_.size(); ++i) { + licnxt.return_type_ = join_condi_[i]->actual_type_; + join_condi_[i]->InitExprAtLogicalPlan(licnxt); + } + + plan_context_ = new PlanContext(); + *plan_context_ = ret; + lock_->release(); + return ret; +} + +bool LogicalOuterJoin::IsHashOnLeftKey(const Partitioner& part, + const Attribute& key) const { + if (part.getPartitionFashion() != PartitionFunction::hash_f) return false; + for (unsigned i = 0; i < joinkey_pair_list_.size(); i++) { + } + return part.getPartitionKey() == key; +} +// TODO(fzh) should consider shadow_partition_keys_ +bool LogicalOuterJoin::CanOmitHashRepartition( + const std::vector& join_key_list, + const PlanPartitioner& partitoiner) const { + Attribute attribute = partitoiner.get_partition_key(); + for (unsigned i = 0; i < join_key_list.size(); i++) { + if (attribute == join_key_list[i]) return true; + } + return false; +} + +bool LogicalOuterJoin::IsInOneJoinPair( + const Attribute& left_partition_key, + const Attribute& right_partition_key) const { + for (unsigned i = 0; i < joinkey_pair_list_.size(); i++) { + if (left_partition_key == joinkey_pair_list_[i].left_join_attr_ && + right_partition_key == joinkey_pair_list_[i].right_join_attr_) { + return true; + } + } + return false; +} +LogicalOuterJoin::JoinPolicy LogicalOuterJoin::DecideLeftOrRightRepartition( + const PlanContext& left_dataflow, const PlanContext& right_dataflow) const { + const unsigned left_data_size = left_dataflow.GetAggregatedDatasize(); + const unsigned right_data_size = right_dataflow.GetAggregatedDatasize(); + if (left_data_size > right_data_size) { + return kRightRepartition; + } else { + return kLeftRepartition; + } +} + +PhysicalOperatorBase* LogicalOuterJoin::GetPhysicalPlan( + const unsigned& block_size) { + if (NULL == plan_context_) { + GetPlanContext(); + } + PhysicalOuterHashJoin* join_iterator; + PhysicalOperatorBase* child_iterator_left = + left_child_->GetPhysicalPlan(block_size); + PhysicalOperatorBase* child_iterator_right = + right_child_->GetPhysicalPlan(block_size); + PlanContext dataflow_left = left_child_->GetPlanContext(); + PlanContext dataflow_right = right_child_->GetPlanContext(); + PhysicalOuterHashJoin::State state; + state.block_size_ = block_size; + state.hashtable_bucket_num_ = 1024 * 1024; + // state.ht_nbuckets=1024; + state.input_schema_left_ = GetSchema(dataflow_left.attribute_list_); + state.input_schema_right_ = GetSchema(dataflow_right.attribute_list_); + state.hashtable_schema_ = GetSchema(dataflow_left.attribute_list_); + // the bucket size is 64-byte-aligned + // state_.ht_bucketsize = + // ((state_.input_schema_left->getTupleMaxSize()-1)/64+1)*64; + /** + * In the initial implementation, I set the bucket size to be up round to + * cache line size, e.g., 64Bytes. Finally, I realized that different from + * aggregation,the hash table bucket in the build phase of hash join is filled + * very quickly and hence a * a relatively large bucket size could reduce the + * number of overflowing buckets and avoid the random memory access caused by + * acceesing overflowing buckets. + */ + state.hashtable_bucket_size_ = 128; + state.output_schema_ = GetSchema(plan_context_->attribute_list_); + + state.join_index_left_ = GetLeftJoinKeyIds(); + state.join_index_right_ = GetRightJoinKeyIds(); + + // cout << "In logical plan : join_condi_.size = " << join_condi_.size(); + state.join_condi_ = join_condi_; + state.join_type_ = join_type_; + switch (join_policy_) { + case kNoRepartition: { + state.child_left_ = child_iterator_left; + state.child_right_ = child_iterator_right; + + join_iterator = new PhysicalOuterHashJoin(state); + break; + } + case kLeftRepartition: { + // state_.child_left + Expander::State expander_state; + expander_state.block_count_in_buffer_ = EXPANDER_BUFFER_SIZE; + expander_state.block_size_ = block_size; + expander_state.init_thread_count_ = Config::initial_degree_of_parallelism; + expander_state.child_ = child_iterator_left; + expander_state.schema_ = GetSchema(dataflow_left.attribute_list_); + PhysicalOperatorBase* expander = new Expander(expander_state); + + NodeTracker* node_tracker = NodeTracker::GetInstance(); + ExchangeMerger::State exchange_state; + exchange_state.block_size_ = block_size; + exchange_state.child_ = expander; // child_iterator_left; + exchange_state.exchange_id_ = + IDsGenerator::getInstance()->generateUniqueExchangeID(); + + std::vector upper_id_list = + GetInvolvedNodeID(plan_context_->plan_partitioner_); + exchange_state.upper_id_list_ = upper_id_list; + + std::vector lower_id_list = + GetInvolvedNodeID(dataflow_left.plan_partitioner_); + exchange_state.lower_id_list_ = lower_id_list; + + const Attribute right_partition_key = + plan_context_->plan_partitioner_.get_partition_key(); + + /* get the left attribute that is corresponding to the partition key.*/ + Attribute left_partition_key = + joinkey_pair_list_[GetIdInRightJoinKeys(right_partition_key)] + .left_join_attr_; + + exchange_state.partition_schema_ = + partition_schema::set_hash_partition(GetIdInAttributeList( + dataflow_left.attribute_list_, left_partition_key)); + + // exchange_state.schema=getSchema(dataflow_left.attribute_list_, + // dataflow_right.attribute_list_); + exchange_state.schema_ = GetSchema(dataflow_left.attribute_list_); + PhysicalOperatorBase* exchange = new ExchangeMerger(exchange_state); + state.child_left_ = exchange; + state.child_right_ = child_iterator_right; + join_iterator = new PhysicalOuterHashJoin(state); + break; + } + case kRightRepartition: { + Expander::State expander_state; + expander_state.block_count_in_buffer_ = EXPANDER_BUFFER_SIZE; + expander_state.block_size_ = block_size; + expander_state.init_thread_count_ = Config::initial_degree_of_parallelism; + expander_state.child_ = child_iterator_right; + expander_state.schema_ = GetSchema(dataflow_right.attribute_list_); + PhysicalOperatorBase* expander = new Expander(expander_state); + + NodeTracker* node_tracker = NodeTracker::GetInstance(); + ExchangeMerger::State exchange_state; + exchange_state.block_size_ = block_size; + exchange_state.child_ = expander; + exchange_state.exchange_id_ = + IDsGenerator::getInstance()->generateUniqueExchangeID(); + + std::vector upper_id_list = + GetInvolvedNodeID(plan_context_->plan_partitioner_); + exchange_state.upper_id_list_ = upper_id_list; + + std::vector lower_id_list = + GetInvolvedNodeID(dataflow_right.plan_partitioner_); + exchange_state.lower_id_list_ = lower_id_list; + + const Attribute output_partition_key = + plan_context_->plan_partitioner_.get_partition_key(); + + /* get the right attribute that is corresponding to the partition key.*/ + Attribute right_repartition_key; + if (plan_context_->plan_partitioner_.HasShadowPartitionKey()) { + right_repartition_key = + joinkey_pair_list_[GetIdInLeftJoinKeys( + output_partition_key, + plan_context_->plan_partitioner_ + .get_shadow_partition_keys())] + .right_join_attr_; + } else { + right_repartition_key = + joinkey_pair_list_[GetIdInLeftJoinKeys(output_partition_key)] + .right_join_attr_; + } + + exchange_state.partition_schema_ = + partition_schema::set_hash_partition(GetIdInAttributeList( + dataflow_right.attribute_list_, right_repartition_key)); + + exchange_state.schema_ = GetSchema(dataflow_right.attribute_list_); + PhysicalOperatorBase* exchange = new ExchangeMerger(exchange_state); + state.child_left_ = child_iterator_left; + state.child_right_ = exchange; + join_iterator = new PhysicalOuterHashJoin(state); + break; + } + case kCompleteRepartition: { + /* build left input*/ + Expander::State expander_state_l; + expander_state_l.block_count_in_buffer_ = EXPANDER_BUFFER_SIZE; + expander_state_l.block_size_ = block_size; + expander_state_l.init_thread_count_ = + Config::initial_degree_of_parallelism; + expander_state_l.child_ = child_iterator_left; + expander_state_l.schema_ = GetSchema(dataflow_left.attribute_list_); + PhysicalOperatorBase* expander_l = new Expander(expander_state_l); + + ExchangeMerger::State l_exchange_state; + l_exchange_state.block_size_ = block_size; + l_exchange_state.child_ = expander_l; + l_exchange_state.exchange_id_ = + IDsGenerator::getInstance()->generateUniqueExchangeID(); + + std::vector lower_id_list = + GetInvolvedNodeID(dataflow_left.plan_partitioner_); + l_exchange_state.lower_id_list_ = lower_id_list; + + std::vector upper_id_list = + GetInvolvedNodeID(plan_context_->plan_partitioner_); + l_exchange_state.upper_id_list_ = upper_id_list; + + const Attribute left_partition_key = + plan_context_->plan_partitioner_.get_partition_key(); + l_exchange_state.partition_schema_ = + partition_schema::set_hash_partition(GetIdInAttributeList( + dataflow_left.attribute_list_, left_partition_key)); + l_exchange_state.schema_ = GetSchema(dataflow_left.attribute_list_); + PhysicalOperatorBase* l_exchange = new ExchangeMerger(l_exchange_state); + + // build right input + + Expander::State expander_state_r; + expander_state_r.block_count_in_buffer_ = EXPANDER_BUFFER_SIZE; + expander_state_r.block_size_ = block_size; + expander_state_r.init_thread_count_ = + Config::initial_degree_of_parallelism; + expander_state_r.child_ = child_iterator_right; + expander_state_r.schema_ = GetSchema(dataflow_right.attribute_list_); + PhysicalOperatorBase* expander_r = new Expander(expander_state_r); + + ExchangeMerger::State r_exchange_state; + r_exchange_state.block_size_ = block_size; + r_exchange_state.child_ = expander_r; + r_exchange_state.exchange_id_ = + IDsGenerator::getInstance()->generateUniqueExchangeID(); + + lower_id_list = GetInvolvedNodeID(dataflow_right.plan_partitioner_); + r_exchange_state.lower_id_list_ = lower_id_list; + + upper_id_list = GetInvolvedNodeID(plan_context_->plan_partitioner_); + r_exchange_state.upper_id_list_ = upper_id_list; + + const Attribute right_partition_key = + joinkey_pair_list_[GetIdInLeftJoinKeys(left_partition_key)] + .right_join_attr_; + r_exchange_state.partition_schema_ = + partition_schema::set_hash_partition(GetIdInAttributeList( + dataflow_right.attribute_list_, right_partition_key)); + r_exchange_state.schema_ = GetSchema(dataflow_right.attribute_list_); + PhysicalOperatorBase* r_exchange = new ExchangeMerger(r_exchange_state); + + // finally build the join iterator itself + state.child_left_ = l_exchange; + state.child_right_ = r_exchange; + join_iterator = new PhysicalOuterHashJoin(state); + break; + } + default: { break; } + } + return join_iterator; +} + +bool LogicalOuterJoin::GetOptimalPhysicalPlan( + Requirement requirement, PhysicalPlanDescriptor& physical_plan_descriptor, + const unsigned& block_size) {} + +std::vector LogicalOuterJoin::GetLeftJoinKeyIds() const { + std::vector ret; + const PlanContext dataflow = left_child_->GetPlanContext(); + for (unsigned i = 0; i < joinkey_pair_list_.size(); i++) { + for (unsigned j = 0; j < dataflow.attribute_list_.size(); j++) { + if (joinkey_pair_list_[i].left_join_attr_ == + dataflow.attribute_list_[j]) { + ret.push_back(j); + } + } + } + return ret; +} + +std::vector LogicalOuterJoin::GetRightJoinKeyIds() const { + std::vector ret; + const PlanContext dataflow = right_child_->GetPlanContext(); + for (unsigned i = 0; i < joinkey_pair_list_.size(); i++) { + for (unsigned j = 0; j < dataflow.attribute_list_.size(); j++) { + if (joinkey_pair_list_[i].right_join_attr_ == + dataflow.attribute_list_[j]) { + ret.push_back(j); + } + } + } + return ret; +} +std::vector LogicalOuterJoin::GetLeftPayloadIds() const { + std::vector ret; + const PlanContext dataflow = left_child_->GetPlanContext(); + const std::vector left_join_key_index_list = GetLeftJoinKeyIds(); + + for (unsigned i = 0; i < dataflow.attribute_list_.size(); i++) { + bool found_equal = false; + for (unsigned j = 0; j < left_join_key_index_list.size(); j++) { + if (i == left_join_key_index_list[j]) { + found_equal = true; + break; + } + } + if (!found_equal) { + ret.push_back(i); + } + } + return ret; +} + +std::vector LogicalOuterJoin::GetRightPayloadIds() const { + std::vector ret; + const PlanContext dataflow = right_child_->GetPlanContext(); + const std::vector right_join_key_index_list = GetRightJoinKeyIds(); + + for (unsigned i = 0; i < dataflow.attribute_list_.size(); i++) { + for (unsigned j = 0; j < right_join_key_index_list.size(); j++) { + if (i == right_join_key_index_list[j]) { + break; + } + } + ret.push_back(i); + } + return ret; +} +int LogicalOuterJoin::GetIdInLeftJoinKeys(const Attribute& attribute) const { + for (unsigned i = 0; i < joinkey_pair_list_.size(); i++) { + if (joinkey_pair_list_[i].left_join_attr_ == attribute) { + return i; + } + } + assert(false); + return -1; +} +int LogicalOuterJoin::GetIdInLeftJoinKeys( + const Attribute& attribute, + const std::vector shadow_attribute_list) const { + for (unsigned i = 0; i < joinkey_pair_list_.size(); i++) { + if (joinkey_pair_list_[i].left_join_attr_ == attribute) { + return i; + } + } + /** + * The attribute fails to match any join key. + * Now we try to match the shadow partition attribute(s) + */ + for (unsigned s = 0; s < shadow_attribute_list.size(); s++) { + for (unsigned i = 0; i < joinkey_pair_list_.size(); i++) { + if (joinkey_pair_list_[i].left_join_attr_ == shadow_attribute_list[s]) { + return i; + } + } + } + + /** + * Neither the partition attribute nor the shadow partition attribute could + * match any join key. + */ + assert(false); + return -1; +} +int LogicalOuterJoin::GetIdInRightJoinKeys(const Attribute& attribute) const { + for (unsigned i = 0; i < joinkey_pair_list_.size(); i++) { + if (joinkey_pair_list_[i].right_join_attr_ == attribute) { + return i; + } + } + assert(false); + return -1; +} +int LogicalOuterJoin::GetIdInRightJoinKeys( + const Attribute& attribute, + const std::vector shadow_attribute_list) const { + for (unsigned i = 0; i < joinkey_pair_list_.size(); i++) { + if (joinkey_pair_list_[i].right_join_attr_ == attribute) { + return i; + } + } + /** + * The attribute fails to match any join key. + * Now we try to match the shadow partition attribute(s) + */ + for (unsigned s = 0; s < shadow_attribute_list.size(); s++) { + for (unsigned i = 0; i < joinkey_pair_list_.size(); i++) { + if (joinkey_pair_list_[i].right_join_attr_ == shadow_attribute_list[s]) { + return i; + } + } + } + + /** + * neither the partition attribute nor the shadow partition attribute could + * match any join key. + */ + assert(false); + return -1; +} +int LogicalOuterJoin::GetIdInAttributeList( + const std::vector& attributes, + const Attribute& attribute) const { + for (unsigned i = 0; i < attributes.size(); i++) { + if (attributes[i] == attribute) { + return i; + } + } + assert(false); + return -1; +} +PlanPartitioner LogicalOuterJoin::DecideOutputDataflowProperty( + const PlanContext& left_dataflow, const PlanContext& right_dataflow, + int join_type) const { + PlanPartitioner ret; + + // const unsigned l_data_cardinality=left_dataflow.getAggregatedDatasize(); + // const unsigned r_datasize=right_dataflow.getAggregatedDatasize(); + const unsigned long l_data_cardinality = + left_dataflow.GetAggregatedDataCardinality(); + const unsigned long r_data_cardinality = + right_dataflow.GetAggregatedDataCardinality(); + + std::vector all_node_id_list = + NodeTracker::GetInstance()->GetNodeIDList(); + /** + * In the current implementation, all the nodes are involved in the + * complete_repartition method. + * TODO: decide the degree of parallelism + */ + const unsigned degree_of_parallelism = all_node_id_list.size(); + + std::vector dataflow_partition_list; + for (unsigned i = 0; i < degree_of_parallelism; i++) { + const NodeID location = all_node_id_list[i]; + + /** + * Currently, the join output size cannot be predicted due to the absence of + * data statistics. + * We just use the magic number as following + */ + // const unsigned + // cardinality=l_data_cardinality/degree_of_parallelism+r_data_cardinality/degree_of_parallelism; + const unsigned long cardinality = + l_data_cardinality * r_data_cardinality * + PredictEqualJoinSelectivity(left_dataflow, right_dataflow) / + degree_of_parallelism; + PlanPartitionInfo dfp(i, cardinality, location); + dataflow_partition_list.push_back(dfp); + } + ret.set_partition_list(dataflow_partition_list); + ret.set_partition_key(joinkey_pair_list_[0].left_join_attr_); + ret.AddShadowPartitionKey(joinkey_pair_list_[0].right_join_attr_); + PartitionFunction* partition_function = + PartitionFunctionFactory::createBoostHashFunction(degree_of_parallelism); + ret.set_partition_func(partition_function); + return ret; +} +void LogicalOuterJoin::Print(int level) const { + cout << setw(level * kTabSize) << " " + << "OuterJoin: "; + ++level; + switch (join_policy_) { + case kNoRepartition: { + cout << "no_repartition!" << endl; + break; + } + case kLeftRepartition: { + cout << "left_repartition!" << endl; + break; + } + case kRightRepartition: { + cout << "right_repartition!" << endl; + break; + } + case kCompleteRepartition: { + cout << "complete_repartition!" << endl; + break; + } + default: { cout << "not given!" << endl; } + } + GetPlanContext(); + cout << setw(level * kTabSize) << " " + << "[Partition info: " + << plan_context_->plan_partitioner_.get_partition_key().attrName + << " table_id= " + << plan_context_->plan_partitioner_.get_partition_key().table_id_ + << " column_id= " + << plan_context_->plan_partitioner_.get_partition_key().index << " ]" + << endl; + for (unsigned i = 0; i < this->joinkey_pair_list_.size(); i++) { + cout << setw(level * kTabSize) << " " + << joinkey_pair_list_[i].left_join_attr_.attrName << " = " + << joinkey_pair_list_[i].right_join_attr_.attrName << endl; + } + --level; + left_child_->Print(level); + right_child_->Print(level); +} +double LogicalOuterJoin::PredictEqualJoinSelectivity( + const PlanContext& left_dataflow, const PlanContext& right_dataflow) const { + /** + * Currently, we assume that we do not know the joint distribution of join + * attributes. + * Consequently, we predict the selectivity for each join attribute pair and + * finally combine them. + */ + double ret = 1; + for (unsigned i = 0; i < joinkey_pair_list_.size(); i++) { + ret *= PredictEqualJoinSelectivityOnSingleJoinAttributePair( + joinkey_pair_list_[i].left_join_attr_, + joinkey_pair_list_[i].right_join_attr_); + } + return ret; +} +double LogicalOuterJoin::PredictEqualJoinSelectivityOnSingleJoinAttributePair( + const Attribute& attr_left, const Attribute& attr_right) const { + double ret; + TableStatistic* t_l_stat = + StatManager::getInstance()->getTableStatistic(attr_left.table_id_); + TableStatistic* t_r_stat = + StatManager::getInstance()->getTableStatistic(attr_right.table_id_); + if (t_r_stat && t_l_stat) { + unsigned long t_l_card = t_l_stat->getCardinality(); + unsigned long t_r_card = t_r_stat->getCardinality(); + + AttributeStatistics* a_l_stat = + StatManager::getInstance()->getAttributeStatistic(attr_left); + AttributeStatistics* a_r_stat = + StatManager::getInstance()->getAttributeStatistic(attr_right); + if (a_l_stat && a_r_stat) { + /** + * both tables have the attribute level statistics. + */ + Histogram* attr_left_hist = a_l_stat->getHistogram(); + Histogram* attr_right_hist = a_r_stat->getHistogram(); + if (attr_left_hist && attr_right_hist) { + /** + * Both tables have histogram, so we predict the selectivity based on + * histogram. + */ + ; // Waiting for Zhutao's implementation + + const unsigned long a_l_dist_card = a_l_stat->getDistinctCardinality(); + const unsigned long a_r_dist_card = a_r_stat->getDistinctCardinality(); + double min_card = + a_l_dist_card < a_r_dist_card ? a_l_dist_card : a_r_dist_card; + min_card *= 1; // 0.8 is the magic number + const double output_card = min_card * t_l_card / (double)a_l_dist_card * + t_r_card / (double)a_r_dist_card; + ret = output_card / t_l_card / t_r_card; + double max_card = + a_l_dist_card > a_r_dist_card ? a_l_dist_card : a_r_dist_card; + ret = 1 / max_card; + } else { + /** + * predict based on the cardinality and distinct cardinality of the two + * attribute. + */ + const unsigned long a_l_dist_card = a_l_stat->getDistinctCardinality(); + const unsigned long a_r_dist_card = a_r_stat->getDistinctCardinality(); + double min_card = + a_l_dist_card < a_r_dist_card ? a_l_dist_card : a_r_dist_card; + min_card *= 1; // 0.8 is the magic number + const double output_card = min_card * t_l_card / (double)a_l_dist_card * + t_r_card / (double)a_r_dist_card; + + double max_card = + a_l_dist_card > a_r_dist_card ? a_l_dist_card : a_r_dist_card; + ret = 1 / max_card; + } + } else { + /** + * Not both a_l and a_r have the attribute level statistics, so we predict + * the join size based + * on magic number. + */ + ret = 0.1; + } + } else { + /** + * No table statistic is available, so we use the the magic number. + */ + ret = 0.1; + } + LOG(INFO) << "Predicted selectivity for " << attr_left.attrName.c_str() + << " and " << attr_right.attrName.c_str() << " is " << ret + << std::endl; + return ret; +} + +} // namespace logical_operator +} // namespace claims diff --git a/logical_operator/logical_outer_join.h b/logical_operator/logical_outer_join.h new file mode 100644 index 000000000..3d7fcaf15 --- /dev/null +++ b/logical_operator/logical_outer_join.h @@ -0,0 +1,202 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/logical_operator/logical_left_join.h + * + * Created on: Mar 16, 2016 + * Author: yuyang + * Email: youngfish93@hotmail.com + * + * Description: + * + */ + +#ifndef LOGICAL_OPERATOR_LOGICAL_OUTER_JOIN_H_ +#define LOGICAL_OPERATOR_LOGICAL_OUTER_JOIN_H_ +#include +#include "../common/expression/expr_node.h" +#include "../catalog/attribute.h" +#include "../catalog/partitioner.h" +#include "../logical_operator/logical_operator.h" +#include "../physical_operator/physical_sort.h" +#include "../logical_operator/logical_equal_join.h" + +namespace claims { +namespace logical_operator { + +/** + * @brief Generate equal join logical plan. + * @details EqualJoin operator achieves the join statement tables.It requires + * equivalent conditions.As for implementation, one is to + * send all data to a same machine.But the generated table will be too big to + * store and it is inefficient.So we partition the data to several machines.Each + * of them does some join and return. + */ +class LogicalOuterJoin : public LogicalOperator { + public: + /** + * @brief The JoinPair contains two attributes to be joined. + */ + // struct JoinPair { + // JoinPair(const Attribute& left_join_attr, const Attribute& + // right_join_attr) + // : left_join_attr_(left_join_attr), right_join_attr_(right_join_attr) + // {} + // Attribute left_join_attr_; + // Attribute right_join_attr_; + // }; + enum JoinPolicy { + kNull, + kNoRepartition, + kLeftRepartition, + kRightRepartition, + kCompleteRepartition + }; + + public: + /** + * @brief Method description:Create a OuterJoin implement. + * @param std::vector + * @param LogicalOperator* left_input + * @param LogicalOperator* right_input + */ + LogicalOuterJoin(std::vector, + LogicalOperator* left_input, LogicalOperator* right_input, + int join_type); + LogicalOuterJoin(std::vector, + LogicalOperator* left_input, LogicalOperator* right_input, + int join_type, vector join_condi); + virtual ~LogicalOuterJoin(); + /** + * @brief Method description: Get the child information. + * @details The mostly important member is "property" and "attributes + * list". When getting data from child, we make some choices according to + * JoinPolice. JoinPolice is generated from + * canLeverageHashPartition(), isEqualCondition(), hasSamePartition(). + * If all of them is true, it means partition_key in the join_list and two + * partition_keys in the same join_pair.Besides, it they have partitioned to + * the same machine.So we don't need repartition, and JoinPolice = + * kNodeRepartition. + * If both canLeverageHashPartition()s are true, but isEqualCondition() or + * hasSamePartition() is false, the machine which has smaller data will send + * its data to another according to decideLeftOrRightRepartition(). + * If canLeverageHashPartition(left) is true but canLeverageHashPartition + * (right) is false, right nodes will be repartitioned. + * join_police_ = kRightRepartition + * If canLeverageHashPartition(left) is false but canLeverageHashPartition + * (right) is true, left nodes will be repartitioned. + * oin_police_ = kLeftRepartition + * If both canLeverageHashPartition()s are false, left and right nodes will be + * repartitioned. + * join_police_ = kCompleteRepartition + */ + PlanContext GetPlanContext(); + PhysicalOperatorBase* GetPhysicalPlan(const unsigned& blocksize); + bool GetOptimalPhysicalPlan(Requirement requirement, + PhysicalPlanDescriptor& physical_plan_descriptor, + const unsigned& block_size = 4096 * 1024); + + private: + std::vector GetLeftJoinKeyIds() const; + std::vector GetRightJoinKeyIds() const; + std::vector GetLeftPayloadIds() const; + std::vector GetRightPayloadIds() const; + int GetIdInLeftJoinKeys(const Attribute&) const; + int GetIdInLeftJoinKeys( + const Attribute&, + const std::vector shadow_attribute_list) const; + int GetIdInRightJoinKeys(const Attribute&) const; + int GetIdInRightJoinKeys( + const Attribute&, + const std::vector shadow_attribute_list) const; + int GetIdInAttributeList(const std::vector& attributes, + const Attribute&) const; + bool IsHashOnLeftKey(const Partitioner& part, const Attribute& key) const; + void DecideJoinPolicy(const PlanContext& left_context, + const PlanContext& right_context); + /** + * @brief Method description:Check whether the partitioning is based on hash + * and the hash key is a subset of the join keys such that hash join is + * enabled. + * @param const std::vector& partition_key_list + * @param const DataflowPartitioningDescriptor& partitoiner + * @return bool + */ + bool CanOmitHashRepartition(const std::vector& join_key_list, + const PlanPartitioner& partitoiner) const; + /** + * @brief Method description:Check whether two partition_keys in the same + * join_pair. + * @param const Attribute& left + * @param const Attribute& right + * @return bool + */ + bool IsInOneJoinPair(const Attribute& left_partition_key, + const Attribute& right_partition_key) const; + + /** + * @brief Method description:Check which has the smaller data. + * @details Current version only consider the data size for simplicity. + * @param const Dataflow& left_dataflow + * @param const Dataflow& right_dataflow + * @return JoinPolice + * TODO(admin): Consider not only data size but also other factors, such as + * parallelism, resource, etc. + */ + JoinPolicy DecideLeftOrRightRepartition( + const PlanContext& left_dataflow, + const PlanContext& right_dataflow) const; + + PlanPartitioner DecideOutputDataflowProperty( + const PlanContext& left_dataflow, const PlanContext& right_dataflow, + int join_type) const; + void Print(int level = 0) const; + + /** + * Assuming that R and S are the two join table, the selectivity is + * the number of tuples generated by the join operator to the number of + * |R|*|S|. + */ + double PredictEqualJoinSelectivity(const PlanContext& left_dataflow, + const PlanContext& right_dataflow) const; + + /** + * assuming that R ane S are the two join table, and the join condition is + * R.x=S.x. + * return |O|, where |O|=|R.x=x1|*|S.x=x1|+|R.x=x2|*|S.x=x2|+...... + */ + double PredictEqualJoinSelectivityOnSingleJoinAttributePair( + const Attribute& attr_left, const Attribute& attr_right) const; + + private: + std::vector join_condi_; + std::vector joinkey_pair_list_; + std::vector left_join_key_list_; + std::vector right_join_key_list_; + LogicalOperator* left_child_; + LogicalOperator* right_child_; + JoinPolicy join_policy_; + PlanContext* plan_context_; + // join_type_ = 0 means left join while join_type = 1 means right join + // join_type_ = 2 means full join + int join_type_; +}; +} // namespace logical_operator +} // namespace claims + +#endif // LOGICAL_OPERATOR_LOGICAL_OUTER_JOIN_H_ diff --git a/logical_operator/logical_project.cpp b/logical_operator/logical_project.cpp index 701c143d2..cffafc8c6 100644 --- a/logical_operator/logical_project.cpp +++ b/logical_operator/logical_project.cpp @@ -45,6 +45,7 @@ using claims::common::ExprColumn; using claims::common::ExprNode; +using claims::common::LogicInitCnxt; using claims::physical_operator::PhysicalProject; namespace claims { namespace logical_operator { @@ -117,12 +118,12 @@ PlanContext LogicalProject::GetPlanContext() { */ for (int i = 0; i < expression_tree_.size(); ++i) { column_type* column = NULL; - if (t_string == expression_tree_[i]->return_type || - t_decimal == expression_tree_[i]->return_type) { - column = new column_type(expression_tree_[i]->return_type, + if (t_string == expression_tree_[i]->return_type_ || + t_decimal == expression_tree_[i]->return_type_) { + column = new column_type(expression_tree_[i]->return_type_, expression_tree_[i]->length); } else { - column = new column_type(expression_tree_[i]->return_type); + column = new column_type(expression_tree_[i]->return_type_); } // set TableID const unsigned kTableID = INTERMEIDATE_TABLEID; @@ -134,12 +135,13 @@ PlanContext LogicalProject::GetPlanContext() { } #else ret_attrs.clear(); - map column_to_id; + LogicInitCnxt licnxt; + licnxt.schema0_ = input_schema; int mid_table_id = MIDINADE_TABLE_ID++; - GetColumnToId(child_plan_context.attribute_list_, column_to_id); + GetColumnToId(child_plan_context.attribute_list_, licnxt.column_id0_); for (int i = 0; i < expr_list_.size(); ++i) { - expr_list_[i]->InitExprAtLogicalPlan(expr_list_[i]->actual_type_, - column_to_id, input_schema); + licnxt.return_type_ = expr_list_[i]->actual_type_; + expr_list_[i]->InitExprAtLogicalPlan(licnxt); ret_attrs.push_back(expr_list_[i]->ExprNodeToAttr(i, mid_table_id)); // update partition key diff --git a/logical_operator/logical_query_plan_root.cpp b/logical_operator/logical_query_plan_root.cpp index db9d20761..02a2260f5 100644 --- a/logical_operator/logical_query_plan_root.cpp +++ b/logical_operator/logical_query_plan_root.cpp @@ -197,7 +197,7 @@ PlanContext LogicalQueryPlanRoot::GetPlanContext() { LOG(INFO) << "Communication cost: " << ret.commu_cost_ << " predicted ouput size= " << ret.plan_partitioner_.GetAggregatedDataCardinality() << endl; - plan_context_ = new PlanContext; + plan_context_ = new PlanContext(); *plan_context_ = ret; lock_->release(); return ret; diff --git a/logical_operator/logical_query_plan_root.h b/logical_operator/logical_query_plan_root.h index e489f239a..67c4bccd9 100644 --- a/logical_operator/logical_query_plan_root.h +++ b/logical_operator/logical_query_plan_root.h @@ -70,6 +70,7 @@ class LogicalQueryPlanRoot : public LogicalOperator { const OutputStyle& fashion = kPerformance); LogicalQueryPlanRoot(NodeID collecter_node_id, LogicalOperator* child, const OutputStyle& fashion = kPerformance); + virtual ~LogicalQueryPlanRoot(); PlanContext GetPlanContext(); /** diff --git a/logical_operator/logical_scan.cpp b/logical_operator/logical_scan.cpp index 842414895..70e9c79bc 100644 --- a/logical_operator/logical_scan.cpp +++ b/logical_operator/logical_scan.cpp @@ -29,19 +29,19 @@ #include "../logical_operator/logical_scan.h" #include #include -#include #include - +#include #include #include "../catalog/catalog.h" #include "../IDsGenerator.h" +#include "../logical_operator/logical_operator.h" #include "../logical_operator/plan_partition_info.h" #include "../physical_operator/exchange_merger.h" #include "../physical_operator/physical_operator_base.h" #include "../physical_operator/physical_projection_scan.h" #include "../Resource/NodeTracker.h" - +using std::string; using claims::physical_operator::ExchangeMerger; using claims::physical_operator::PhysicalProjectionScan; namespace claims { @@ -70,11 +70,11 @@ LogicalScan::LogicalScan(ProjectionDescriptor* projection, scan_attribute_list_ = projection->getAttributeList(); target_projection_ = projection; } -LogicalScan::LogicalScan(ProjectionDescriptor* projection, - const string table_alias, const float sample_rate) +LogicalScan::LogicalScan(ProjectionDescriptor* const projection, + string table_alias, const float sample_rate) : LogicalOperator(kLogicalScan), - sample_rate_(sample_rate), table_alias_(table_alias), + sample_rate_(sample_rate), plan_context_(NULL) { scan_attribute_list_ = projection->getAttributeList(); ChangeAliasAttr(); diff --git a/logical_operator/logical_scan.h b/logical_operator/logical_scan.h index 2faf27649..7e8a0e052 100644 --- a/logical_operator/logical_scan.h +++ b/logical_operator/logical_scan.h @@ -35,6 +35,7 @@ #include "../catalog/attribute.h" #include "../catalog/table.h" #include "../logical_operator/logical_operator.h" +#include "../logical_operator/plan_context.h" #include "../physical_operator/physical_operator_base.h" namespace claims { @@ -52,7 +53,7 @@ class LogicalScan : public LogicalOperator { LogicalScan(std::vector attribute_list); LogicalScan(const TableID&); LogicalScan(ProjectionDescriptor* projection, const float sample_rate_ = 1); - LogicalScan(ProjectionDescriptor* projection, const string table_alias, + LogicalScan(ProjectionDescriptor* const projection, string table_alias, const float sample_rate_ = 1); LogicalScan(const TableID&, diff --git a/logical_operator/logical_sort.cpp b/logical_operator/logical_sort.cpp index e9665431d..56944012a 100644 --- a/logical_operator/logical_sort.cpp +++ b/logical_operator/logical_sort.cpp @@ -45,6 +45,7 @@ #include "../physical_operator/physical_operator_base.h" #include "../physical_operator/physical_sort.h" using claims::common::ExprNode; +using claims::common::LogicInitCnxt; using claims::physical_operator::ExchangeMerger; using claims::physical_operator::Expander; using claims::physical_operator::PhysicalSort; @@ -97,10 +98,12 @@ PlanContext LogicalSort::GetPlanContext() { partition_list.push_back(par); ret.plan_partitioner_.set_partition_list(partition_list); SetColumnId(child_plan_context_); - Schema *input_schema = GetSchema(child_plan_context_.attribute_list_); + LogicInitCnxt licnxt; + licnxt.schema0_ = GetSchema(child_plan_context_.attribute_list_); + GetColumnToId(child_plan_context_.attribute_list_, licnxt.column_id0_); for (int i = 0; i < order_by_attrs_.size(); ++i) { - order_by_attrs_[i].first->InitExprAtLogicalPlan( - order_by_attrs_[i].first->actual_type_, column_to_id_, input_schema); + licnxt.return_type_ = order_by_attrs_[i].first->actual_type_; + order_by_attrs_[i].first->InitExprAtLogicalPlan(licnxt); } plan_context_ = new PlanContext(); *plan_context_ = ret; diff --git a/node_manager/Makefile.am b/node_manager/Makefile.am new file mode 100644 index 000000000..3f4a94b3d --- /dev/null +++ b/node_manager/Makefile.am @@ -0,0 +1,36 @@ +AM_CPPFLAGS= -fPIC -fpermissive \ +-I${HADOOP_HOME}/include\ +-I${JAVA_HOME}/include\ +-I${JAVA_HOME}/include/linux \ +-I${BOOST_HOME} \ +-I${BOOST_HOME}/boost/serialization \ +-I${CAF_HOME}/libcaf_io \ +-I${CAF_HOME}/libcaf_core + +AM_LDFLAGS=-lc -lm -lrt -lcaf_core -lcaf_io -lboost_system -lboost_serialization -ll -ly -lstdc++ -lxs + +if OPT_TCMALLOC +AM_CPPFLAGS+=-fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-free +AM_LDFLAGS+=-ltcmalloc +endif + +LDADD = ../Executor/libexecutor.a \ + ../common/libcommon.a \ + ../utility/libutility.a \ + ${BOOST_HOME}/stage/lib/libboost_system.a \ + ${BOOST_HOME}/stage/lib/libboost_system.so \ + ${BOOST_HOME}/stage/lib/libboost_serialization.a \ + ${BOOST_HOME}/stage/lib/libboost_serialization.a \ + ${CAF_HOME}/build/lib/libcaf_core.so \ + ${CAF_HOME}/build/lib/libcaf_io.so + +noinst_LIBRARIES=libnodemanager.a + +libnodemanager_a_SOURCES = \ + base_node.cpp base_node.h \ + master_node.cpp master_node.h \ + slave_node.cpp slave_node.h \ + node_main.cpp + + + diff --git a/node_manager/base_node.cpp b/node_manager/base_node.cpp new file mode 100644 index 000000000..d04f06b23 --- /dev/null +++ b/node_manager/base_node.cpp @@ -0,0 +1,104 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/node_manager/base_node.cpp + * + * Created on: Jan 4, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#include "./base_node.h" + +#include +#include +#include +#include + +#include "../Config.h" +#include "../Executor/PortManager.h" +#include "caf/all.hpp" +using caf::actor; +using std::make_pair; +using std::string; +using std::vector; +namespace claims { + +BaseNode::BaseNode() : node_id_(-1) { + ReadNodeAddr(); + ReadMasterAddr(); +} +BaseNode::BaseNode(string node_ip, uint16_t node_port) + : node_addr_(make_pair(node_ip, node_port)), node_id_(-1) { + ReadMasterAddr(); +} +BaseNode::~BaseNode() {} + +NodeAddr BaseNode::GetNodeAddr() { return node_addr_; } + +void BaseNode::ReadNodeAddr() { + libconfig::Config cfg; + cfg.readFile(Config::config_file.c_str()); + string ip = (const char *)cfg.lookup("ip"); + node_addr_ = make_pair(ip, PortManager::getInstance()->applyPort()); +} + +NodeAddr BaseNode::GetMasterAddr() { return master_addr_; } + +void BaseNode::ReadMasterAddr() { + libconfig::Config cfg; + cfg.readFile(Config::config_file.c_str()); + std::string master_ip = (const char *)cfg.lookup("coordinator.ip"); + std::string master_port = (const char *)cfg.lookup("coordinator.port"); + master_addr_ = make_pair(master_ip, std::atoi(master_port.c_str())); +} +NodeAddr BaseNode::GetNodeAddrFromId(const unsigned int id) { + lock_.acquire(); + auto it = node_id_to_addr_.find(id); + lock_.release(); + if (it != node_id_to_addr_.end()) { + return it->second; + } else { + return NodeAddr("0", 0); + } +} +actor &BaseNode::GetNodeActorFromId(const unsigned int id) { + lock_.acquire(); + auto it = node_id_to_actor_.find(id); + lock_.release(); + if (it != node_id_to_actor_.end()) { + return it->second; + } else { + actor null_actor; + return null_actor; + } +} +vector BaseNode::GetAllNodeID() { + vector all_node_id; + all_node_id.clear(); + lock_.acquire(); + for (auto it = node_id_to_addr_.begin(); it != node_id_to_addr_.end(); ++it) { + all_node_id.push_back(it->first); + } + lock_.release(); + return all_node_id; +} +} // namespace claims diff --git a/node_manager/base_node.h b/node_manager/base_node.h new file mode 100644 index 000000000..f99ddfd19 --- /dev/null +++ b/node_manager/base_node.h @@ -0,0 +1,121 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/node_manager/base_node.h + * + * Created on: Jan 4, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#ifndef NODE_MANAGER_BASE_NODE_H_ +#define NODE_MANAGER_BASE_NODE_H_ +#include "./base_node.h" +#include +#include +#include +#include +#include +#include "caf/all.hpp" +#include "caf/io/all.hpp" +#include +#include "../common/ids.h" +#include "../utility/lock.h" +using std::pair; +using std::string; +using std::cout; +using std::endl; +using std::map; +using std::vector; +using caf::actor; +namespace claims { +using OkAtom = caf::atom_constant; +using RegisterAtom = caf::atom_constant; +using ExitAtom = caf::atom_constant; +using SendPlanAtom = caf::atom_constant; +using AskExchAtom = caf::atom_constant; +using BindingAtom = caf::atom_constant; +using UnBindingAtom = caf::atom_constant; +using StorageBudgetAtom = caf::atom_constant; +using BroadcastNodeAtom = caf::atom_constant; +using ReportSegESAtom = caf::atom_constant; +using CheckStmtESAtom = caf::atom_constant; +using CancelPlanAtom = caf::atom_constant; +using HeartBeatAtom = caf::atom_constant; +using Updatelist = caf::atom_constant; +using SyncNodeInfo = caf::atom_constant; + + + +const int kMaxTryTimes = 5; +using ReportSAtom = caf::atom_constant; +const int kTimeout = 5; +class MemoryInfo {}; +class DiskInfo {}; +typedef pair NodeAddr; +class BaseNode { + public: + BaseNode(); + BaseNode(string node_ip, uint16_t node_port); + virtual ~BaseNode(); + void set_node_id(unsigned int node_id) { node_id_ = node_id; } + unsigned int get_node_id() { return node_id_; } + string get_node_ip() { return node_addr_.first; } + uint16_t get_node_port() { return node_addr_.second; } + NodeAddr GetNodeAddr(); + void ReadNodeAddr(); + NodeAddr GetMasterAddr(); + void ReadMasterAddr(); + NodeAddr GetNodeAddrFromId(const unsigned int id); + actor& GetNodeActorFromId(const unsigned int id); + actor& GetMasterActor() { return master_actor_; } + vector GetAllNodeID(); + + bool operator==(const BaseNode& r) const { + if (r.node_id_to_addr_.size() != node_id_to_addr_.size()) { + return false; + } + auto ita = r.node_id_to_addr_.begin(); + auto itb = node_id_to_addr_.begin(); + for (; itb != node_id_to_addr_.end(); ++ita, ++itb) { + if (ita->first != itb->first || ita->second.first != itb->second.first || + ita->second.second != itb->second.second) { + return false; + } + } + return true; + } + + protected: + unsigned int node_id_; + NodeAddr node_addr_; + NodeAddr master_addr_; + Lock lock_; + actor master_actor_; + + public: + std::unordered_map> node_id_to_addr_; + std::unordered_map node_id_to_actor_; +}; + +} // namespace claims + +#endif // NODE_MANAGER_BASE_NODE_H_ diff --git a/node_manager/master_node.cpp b/node_manager/master_node.cpp new file mode 100644 index 000000000..9fca1758a --- /dev/null +++ b/node_manager/master_node.cpp @@ -0,0 +1,319 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/node_manager/master_node.cpp + * + * Created on: Jan 4, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#include "./master_node.h" + +#include +#include +#include +#include +#include +#include +#include "caf/all.hpp" +#include "caf/io/all.hpp" +#include "../common/error_define.h" +#include "../common/ids.h" +#include "../common/Message.h" +#include "../Environment.h" +using caf::io::remote_actor; +using caf::make_message; +using std::make_pair; +using claims::common::rConRemoteActorError; +using namespace claims::catalog; +namespace claims { +MasterNode* MasterNode::instance_ = 0; +class MasterNodeActor : public event_based_actor { + public: + MasterNodeActor(MasterNode* master_node) : master_node_(master_node) {} + + behavior make_behavior() override { + become(MainWork()); + return {}; + } + behavior MainWork() { + return { + [=](RegisterAtom, string ip, uint16_t port) -> caf::message { + /* To check if slave node is Reregister + * because some reason may let slave node resend RegisterAtom to master. + * like network shake, or slave is restarted but master node doesn't check + * this condition by heatbeat. + */ + unsigned int tmp_node_id = -1; + bool is_reregister = false; + for(auto it = master_node_->node_id_to_addr_.begin(); + it != master_node_->node_id_to_addr_.end();++it) + { + if((it->second.first == ip)) + { + is_reregister = true; + tmp_node_id = it->first; + } + } + if(is_reregister) + { + //find this slave is reregister, so remove old slave node info. + master_node_->RemoveOneNode(tmp_node_id,master_node_); + master_node_->node_id_to_heartbeat_.erase(tmp_node_id); + Environment::getInstance() + ->getResourceManagerMaster() + ->UnRegisterSlave(tmp_node_id); + LOG(INFO)<<"master remove old node :"<AddOneNode(ip, port); + Environment::getInstance() + ->getResourceManagerMaster() + ->RegisterNewSlave(id); + LOG(INFO)<<"master Register slave node :"<"<SyncNodeList(master_node_); + return make_message(OkAtom::value, id, *((BaseNode*)master_node_)); + }, + [=](HeartBeatAtom, unsigned int node_id_, string address_, uint16_t port_) -> caf::message { + auto it = master_node_->node_id_to_heartbeat_.find(node_id_); + //有可能再重启后两个不同的ip使用相同的nodeID 所以要避免 + if (it != master_node_->node_id_to_heartbeat_.end() && + !(master_node_->node_id_to_addr_.find(node_id_)->second.first.compare(address_))){ + //clear heartbeat count. + it->second = 0; + return make_message(OkAtom::value); + }else{ + LOG(INFO)<<"get heartbeat and register request from "<AddOneNode(address_, port_); + Environment::getInstance() + ->getResourceManagerMaster() + ->RegisterNewSlave(id); + LOG(INFO)<<"master Register slave node :"<BroastNodeInfo(id,address_,port_); + return make_message(OkAtom::value, id, *((BaseNode*)master_node_)); + } + }, + [=](Updatelist){ + bool is_losted = false; + if(master_node_->node_id_to_heartbeat_.size() > 0){ + for (auto it = master_node_->node_id_to_heartbeat_.begin();it != master_node_->node_id_to_heartbeat_.end();) + { + //Heartbeat count++ + it->second++; + { + if (it->second >= kMaxTryTimes){ + is_losted = true; + LOG(WARNING) <<"master : lost hearbeat from ( node "<first<<")"<first; + auto tmp_it = it; + it++; + master_node_->node_id_to_heartbeat_.erase(tmp_it); + master_node_->RemoveOneNode(node_id, master_node_); + Environment::getInstance() + ->getResourceManagerMaster() + ->UnRegisterSlave(node_id); + LOG(INFO)<<"master unRegister old node :"<SyncNodeList(master_node_); + is_losted=false; + } + delayed_send(this, std::chrono::seconds(kTimeout/5), Updatelist::value); + }, + [&](StorageBudgetAtom, const StorageBudgetMessage& message) { + Environment::getInstance() + ->getResourceManagerMaster() + ->RegisterDiskBuget(message.nodeid, message.disk_budget); + Environment::getInstance() + ->getResourceManagerMaster() + ->RegisterMemoryBuget(message.nodeid, message.memory_budget); + LOG(INFO) << "receive storage budget message!! node: " + << message.nodeid << " : disk = " << message.disk_budget + << " , mem = " << message.memory_budget << endl; + return make_message(OkAtom::value); + }, + [&](StorageBudgetAtom, const StorageBudgetMessage& message) + -> caf::message { + Environment::getInstance() + ->getResourceManagerMaster() + ->RegisterDiskBuget(message.nodeid, message.disk_budget); + Environment::getInstance() + ->getResourceManagerMaster() + ->RegisterMemoryBuget(message.nodeid, message.memory_budget); + LOG(INFO) << "receive storage budget message!! node: " + << message.nodeid << " : disk = " << message.disk_budget + << " , mem = " << message.memory_budget << endl; + return make_message(OkAtom::value); + }, + [=](ExitAtom) { + LOG(INFO) << "master " << master_node_->get_node_id() << " finish!" + << endl; + quit(); + }, + caf::others >> [=]() { + LOG(WARNING) << "master node receives unknown message" + << endl; + } + }; + } + MasterNode* master_node_; +}; +MasterNode* MasterNode::GetInstance() { + if (NULL == instance_) { + instance_ = new MasterNode(); + } + return instance_; +} + +MasterNode::MasterNode() : node_id_gen_(0) { + instance_ = this; + set_node_id(0); + ReadMasterAddr(); + node_addr_ = master_addr_; + CreateActor(); +} + +MasterNode::MasterNode(string node_ip, uint16_t node_port) + : BaseNode(node_ip, node_port), node_id_gen_(0) { + CreateActor(); +} + +MasterNode::~MasterNode() { instance_ = NULL; } +void MasterNode::CreateActor() { + master_actor_ = caf::spawn(this); + try { + caf::io::publish(master_actor_, get_node_port(), nullptr, 1); + LOG(INFO) << "master ip port" << get_node_port() << " publish succeed!"; + caf::scoped_actor self; + self->send(master_actor_,Updatelist::value); + } catch (caf::bind_failure& e) { + LOG(ERROR) << "the specified port " << get_node_port() << " is used!"; + } catch (caf::network_error& e) { + LOG(ERROR) << "connection error in publishing master actor port"; + } +} +void MasterNode::PrintNodeList() { + for (auto it = node_id_to_addr_.begin(); it != node_id_to_addr_.end(); ++it) { + std::cout << "node id = " << it->first << " ( " << it->second.first << " , " + << it->second.second << " )" << std::endl; + } +} +RetCode MasterNode::BroastNodeInfo(const unsigned int& node_id, + const string& node_ip, + const uint16_t& node_port) { + caf::scoped_actor self; + for (auto it = node_id_to_addr_.begin(); it != node_id_to_addr_.end(); ++it) { + self->send(node_id_to_actor_.at(it->first), BroadcastNodeAtom::value, + node_id, node_ip, node_port); + } + return rSuccess; +} +// should be atomic +unsigned int MasterNode::AddOneNode(string node_ip, uint16_t node_port) { + lock_.acquire(); + unsigned int node_id; + //If a slave has same ip with master, it get ID equals 0 + if (node_ip == get_node_ip()){ + node_id = 0; + }else{ + node_id = ++node_id_gen_; + } + node_id_to_addr_.insert( + make_pair((unsigned int)node_id, make_pair(node_ip, node_port))); + node_id_to_heartbeat_.insert(make_pair((unsigned int)node_id, 0)); + try { + auto actor = remote_actor(node_ip, node_port); + node_id_to_actor_.insert(make_pair((unsigned int)node_id, actor)); + } catch (caf::network_error& e) { + LOG(WARNING) << "cann't connect to node ( " << node_ip << " , " << node_port + << " ) and create remote actor failed!!"; + assert(false); + } + LOG(INFO) << "register one node( " << node_id << " < " << node_ip + << " " << node_port << " > )" << std::endl; + //BroastNodeInfo((unsigned int)node_id, node_ip, node_port); + lock_.release(); + return node_id; +} +/* + * + * + * */ +void MasterNode::RemoveOneNode(unsigned int node_id, MasterNode* master_node){ + master_node->lock_.acquire(); + master_node->node_id_to_addr_.erase(node_id); + master_node->node_id_to_actor_.erase(node_id); + master_node->lock_.release(); + + //clear the partition info of removed node. + Catalog* catalog = Catalog::getInstance(); + vector table_id_list = catalog->GetAllTablesID(); + for (auto table_id : table_id_list){ + TableDescriptor* table = catalog->getTable(table_id); + if(table != NULL){ + vector* projection_list = table ->GetProjectionList(); + if(projection_list != NULL) + { + for(auto projection : *projection_list){ + Partitioner* partitioner = projection->getPartitioner(); + if(partitioner != NULL){ + vector partition_info_list = partitioner->getPartitionList(); + if(partition_info_list.size() != 0){ + for(auto partition_info : partition_info_list){ + if(partition_info->get_location() == node_id){ + LOG(INFO)<unbind_all_blocks(); + } + } + } + } + } + } + } + } +} +void MasterNode::SyncNodeList(MasterNode* master_node) +{ + try{ + caf::scoped_actor self; + for (auto it = master_node->node_id_to_addr_.begin(); it != master_node->node_id_to_addr_.end(); ++it) + { + self->send(master_node->node_id_to_actor_.at(it->first), SyncNodeInfo::value,*((BaseNode*)master_node)); + LOG(INFO)<<" node info changed ,start sync to node: "<first<send(it->second, ExitAtom::value); + } + self->send(master_actor_, ExitAtom::value); +} +} // namespace claims diff --git a/node_manager/master_node.h b/node_manager/master_node.h new file mode 100644 index 000000000..00ee2fab2 --- /dev/null +++ b/node_manager/master_node.h @@ -0,0 +1,81 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/node_manager/master_node.h + * + * Created on: Jan 4, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#ifndef NODE_MANAGER_MASTER_NODE_H_ +#define NODE_MANAGER_MASTER_NODE_H_ +#include +#include +#include +#include +#include +#include +#include "caf/all.hpp" +#include "caf/io/all.hpp" +#include + +#include "./base_node.h" +#include "../common/error_define.h" +#include "../common/ids.h" +using caf::behavior; +using caf::event_based_actor; +using std::atomic_uint; +using std::map; +using std::pair; +using std::string; +using std::vector; +namespace claims { + +class MasterNode : public BaseNode { + public: + friend class MasterNodeActor; + // class MasterNodeActor; + static MasterNode* GetInstance(); + virtual ~MasterNode(); + void PrintNodeList(); + void FinishAllNode(); + unsigned int AddOneNode(string node_ip, uint16_t node_port); + void SyncNodeList(MasterNode* master_node); + void RemoveOneNode(unsigned int node_id, MasterNode* master_node); + RetCode BroastNodeInfo(const unsigned int& node_id, const string& node_ip, + const uint16_t& node_port); + MasterNode(); + MasterNode(string node_ip, uint16_t node_port); + + private: + void CreateActor(); + + private: + static MasterNode* instance_; + atomic_uint node_id_gen_; + std::unordered_map node_id_to_heartbeat_; +}; + +} // namespace claims + + +#endif // NODE_MANAGER_MASTER_NODE_H_ diff --git a/node_manager/node_main.cpp b/node_manager/node_main.cpp new file mode 100644 index 000000000..db8535ed3 --- /dev/null +++ b/node_manager/node_main.cpp @@ -0,0 +1,60 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/node_manager/node_main.cpp + * + * Created on: Jan 4, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ +#include +#include +#include + +#include "./master_node.h" +#include "./slave_node.h" +#include "../common/log/logging.h" +#include "../Config.h" +using claims::MasterNode; +using claims::SlaveNode; +int main(int argc, char** argv) { + claims::common::Logging claims_logging(argv[0]); + Config::getInstance(); + MasterNode* master_node = new MasterNode(); + // usleep(1000); + SlaveNode* slave_node1 = new SlaveNode(); + slave_node1->RegisterToMaster(true); + SlaveNode* slave_node2 = new SlaveNode(); + slave_node2->RegisterToMaster(true); + SlaveNode* slave_node3 = new SlaveNode(); + slave_node3->RegisterToMaster(true); + + master_node->PrintNodeList(); + // usleep(5000); + master_node->FinishAllNode(); + delete master_node; + delete slave_node1; + delete slave_node2; + delete slave_node3; + caf::await_all_actors_done(); + std::cout << "all actors done!" << std::endl; + return 0; +} diff --git a/node_manager/slave_node.cpp b/node_manager/slave_node.cpp new file mode 100644 index 000000000..71f05e87b --- /dev/null +++ b/node_manager/slave_node.cpp @@ -0,0 +1,409 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/node_manager/slave_node.cpp + * + * Created on: Jan 4, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ +#include +#include +#include +#include +#include "./slave_node.h" + +#include +#include + +#include "../common/Message.h" +#include "caf/io/all.hpp" + +#include "./base_node.h" +#include "../common/ids.h" +#include "../Environment.h" +#include "../storage/StorageLevel.h" +#include "caf/all.hpp" +#include +#include +#include "../common/error_define.h" +using caf::io::remote_actor; +using caf::make_message; +using caf::message; +using std::make_pair; +using std::unordered_map; +using claims::common::rConRemoteActorError; +using claims::common::rRegisterToMasterTimeOut; +using claims::common::rRegisterToMasterError; +namespace claims { +SlaveNode* SlaveNode::instance_ = 0; +class SlaveNodeActor : public event_based_actor { + public: + SlaveNodeActor(SlaveNode* slave_node) : slave_node_(slave_node) {} + + behavior make_behavior() override { + LOG(INFO) << "slave node actor is OK!" << std::endl; + return { + [=](ExitAtom) { + LOG(INFO) << "slave " << slave_node_->get_node_id() << " finish!" + << endl; + quit(); + }, + [=](SendPlanAtom, string str, u_int64_t query_id, + u_int32_t segment_id) { + LOG(INFO) << "coor node receive one plan " << query_id << " , " + << segment_id << " plan string size= " << str.length(); + PhysicalQueryPlan* new_plan = new PhysicalQueryPlan( + PhysicalQueryPlan::TextDeserializePlan(str)); + LOG(INFO) << "coor node deserialized plan " << query_id << " , " + << segment_id; + ticks start = curtick(); + Environment::getInstance() + ->getIteratorExecutorSlave() + ->createNewThreadAndRun(new_plan); + + string log_message = + "Slave: received plan segment and create new thread and run it! "; + LOG(INFO) << log_message << query_id << " , " << segment_id + << " , createNewThreadAndRun:" << getMilliSecond(start); + }, + [=](AskExchAtom, ExchangeID exch_id) -> message { + auto addr = + Environment::getInstance()->getExchangeTracker()->GetExchAddr( + exch_id); + return make_message(OkAtom::value, addr.ip, addr.port); + }, + [=](BindingAtom, const PartitionID partition_id, + const unsigned number_of_chunks, + const StorageLevel desirable_storage_level) -> message { + LOG(INFO) << "receive binding message!" << endl; + Environment::getInstance()->get_block_manager()->AddPartition( + partition_id, number_of_chunks, desirable_storage_level); + return make_message(OkAtom::value); + }, + [=](UnBindingAtom, const PartitionID partition_id) -> message { + LOG(INFO) << "receive unbinding message~!" << endl; + Environment::getInstance()->get_block_manager()->RemovePartition( + partition_id); + return make_message(OkAtom::value); + }, + [=](BroadcastNodeAtom, const unsigned int& node_id, + const string& node_ip, const uint16_t& node_port) { + LOG(INFO) << "receive broadcast message~!" << endl; + //check if this node is Reregister node + unsigned int tmp_node_id; + bool is_reregister = false; + for(auto it = slave_node_->node_id_to_addr_.begin(); + it != slave_node_->node_id_to_addr_.end(); ++it){ + if(it->second.first == node_ip){ + is_reregister = true; + tmp_node_id = it->first; + } + } + if(is_reregister){ + slave_node_->node_id_to_addr_.erase(tmp_node_id); + slave_node_->node_id_to_actor_.erase(tmp_node_id); + LOG(INFO)<<"slave "<get_node_id()<<"remove old node :"<AddOneNode(node_id, node_ip, node_port); + }, + [=](ReportSegESAtom, NodeSegmentID node_segment_id, int exec_status, + string exec_info) -> message { + bool ret = + Environment::getInstance() + ->get_stmt_exec_tracker() + ->UpdateSegExecStatus( + node_segment_id, + (SegmentExecStatus::ExecStatus)exec_status, exec_info); + LOG(INFO) << node_segment_id.first << " , " << node_segment_id.second + << " after receive: " << exec_status << " , " << exec_info; + if (false == ret) { + return make_message(CancelPlanAtom::value); + } + return make_message(OkAtom::value); + }, + [=](HeartBeatAtom){ + try{ + slave_node_->master_actor_= caf::io::remote_actor(slave_node_->master_addr_.first, + slave_node_->master_addr_.second); + sync_send(slave_node_->master_actor_, HeartBeatAtom::value, slave_node_->get_node_id(), + slave_node_->node_addr_.first,slave_node_->node_addr_.second).then( + [=](OkAtom){ + slave_node_->heartbeat_count_ = 0; + }, + [=](OkAtom ,unsigned int node_id,const BaseNode& node){ + /* + * In this condition, master is down, and restart quickly. + * The slave node is still send heartbeat. + * master will give is a new id like reregister. + */ + slave_node_->set_node_id(node_id); + Environment::getInstance()->setNodeID(node_id); + slave_node_->node_id_to_addr_.clear(); + slave_node_->node_id_to_actor_.clear(); + slave_node_->node_id_to_addr_.insert(node.node_id_to_addr_.begin(), + node.node_id_to_addr_.end()); + for (auto it = slave_node_->node_id_to_addr_.begin(); + it != slave_node_->node_id_to_addr_.end(); ++it) { + auto actor = + remote_actor(it->second.first, it->second.second); + slave_node_->node_id_to_actor_.insert(make_pair(it->first, actor)); + } + LOG(INFO) << "register node succeed in heartbeart stage! insert " + << node.node_id_to_addr_.size() << " nodes"; + slave_node_->heartbeat_count_ = 0; + BlockManager::getInstance()->initialize(); + } + ); + }catch(caf::network_error& e){ + LOG(WARNING) << "node"<get_node_id() + <<"can't send heartbeart to master"<get_node_id() + <<"occur bind failure"<heartbeat_count_++; + if(slave_node_->heartbeat_count_ > kTimeout*2){ + // slave lost master. + LOG(INFO)<<"slave"<node_id_<<"lost heartbeat from master, start register again"<node_id_<<"lost heartbeat from master, start register again"<RegisterToMaster(false); + if (ret == rSuccess){ + LOG(INFO)<<"reregister successfully , now the node id is "<get_node_id()<get_node_id()<initialize(); + is_success = true; + unbecome(); + }else{ + //when slave Register fails, + caf::scoped_actor self; + delayed_send(this,std::chrono::seconds(kTimeout),RegisterAtom::value); + LOG(WARNING)<<"register fail, slave will register in 5 seconds"<send(this,RegisterAtom::value); + } + } + delayed_send(this, std::chrono::seconds(kTimeout/5), HeartBeatAtom::value); + }, + [=](SyncNodeInfo, const BaseNode& node){ + slave_node_->node_id_to_addr_.clear(); + slave_node_->node_id_to_actor_.clear(); + slave_node_->node_id_to_addr_.insert(node.node_id_to_addr_.begin(), + node.node_id_to_addr_.end()); + for (auto it = slave_node_->node_id_to_addr_.begin(); + it != slave_node_->node_id_to_addr_.end(); ++it) { + try { + auto actor = + remote_actor(it->second.first, it->second.second); + slave_node_->node_id_to_actor_.insert(make_pair(it->first, actor)); + }catch (caf::network_error& e) { + LOG(WARNING) << "cann't connect to node ( " <first<< " , "<second.first << + it->second.second<< " ) and create remote actor failed!!"; + } + } + LOG(INFO) <<"node"<get_node_id() + <<"update nodelist info successfully, now size is"<node_id_to_addr_.size()<> + [=]() { LOG(WARNING) << "unknown message at slave node!!!" << endl; } + + }; + } + + SlaveNode* slave_node_; +}; + +SlaveNode* SlaveNode::GetInstance() { + if (NULL == instance_) { + instance_ = new SlaveNode(); + } + return instance_; +} +RetCode SlaveNode::AddOneNode(const unsigned int& node_id, + const string& node_ip, + const uint16_t& node_port) { + lock_.acquire(); + RetCode ret = rSuccess; + node_id_to_addr_.insert(make_pair(node_id, make_pair(node_ip, node_port))); + try { + auto actor = remote_actor(node_ip, node_port); + node_id_to_actor_.insert(make_pair(node_id, actor)); + } catch (caf::network_error& e) { + LOG(WARNING) << "cann't connect to node ( " << node_ip << " , " << node_port + << " ) and create remote actor failed!!"; + ret = rConRemoteActorError; + } + LOG(INFO) << "slave : get broadested node( " << node_id << " < " << node_ip + << " " << node_port << " > )" << std::endl; + lock_.release(); + return rSuccess; +} +SlaveNode::SlaveNode() : BaseNode() { + instance_ = this; + CreateActor(); +} +SlaveNode::SlaveNode(string node_ip, uint16_t node_port) + : BaseNode(node_ip, node_port) { + instance_ = this; + CreateActor(); +} +SlaveNode::~SlaveNode() { instance_ = NULL; } +void SlaveNode::CreateActor() { + auto slave_actor = caf::spawn(this); + bool is_done = false; + + for (int try_time = 0; try_time < 20 && !is_done; ++try_time) { + try { + master_actor_ = + caf::io::remote_actor(master_addr_.first, master_addr_.second); + is_done = true; + } catch (caf::network_error& e) { + cout << "slave node connect remote_actor error due to network " + "error! will try " << 19 - try_time << " times" << endl; + } + sleep(1); + } + if (!is_done) { + cout << "Node(" << get_node_ip() << " , " << get_node_port() + << ") register to master(" << master_addr_.first << " , " + << master_addr_.second + << ") failed after have tried 20 times! please check ip and " + "port, then try again!!!" << endl; + LOG(ERROR) << "Node(" << get_node_ip() << " , " << get_node_port() + << ") register to master(" << master_addr_.first << " , " + << master_addr_.second + << ") failed after have tried 20 times! please check ip and " + "port, then try again!!!" << endl; + exit(0); + } else { + LOG(INFO) << "the node connect to master succeed!!!"; + cout << "the node connect to master succeed!!!" << endl; + } + try { + caf::io::publish(slave_actor, get_node_port(), nullptr, 1); + LOG(INFO) << "slave node publish port " << get_node_port() + << " successfully!"; + } catch (caf::bind_failure& e) { + LOG(ERROR) << "slave node binds port error when publishing"; + } catch (caf::network_error& e) { + LOG(ERROR) << "slave node publish error due to network error!"; + } +} + +RetCode SlaveNode::RegisterToMaster(bool isFirstRegister) { + RetCode ret = rSuccess; + caf::scoped_actor self; + LOG(INFO)<<"slave just RegisterToMaster!!"<sync_send(master_actor_, RegisterAtom::value, get_node_ip(), + get_node_port()) + .await([=](OkAtom, const unsigned int& id, const BaseNode& node) { + set_node_id(id); + Environment::getInstance()->setNodeID(id); + heartbeat_count_ = 0; + node_id_to_addr_.clear(); + node_id_to_actor_.clear(); + node_id_to_addr_.insert(node.node_id_to_addr_.begin(), + node.node_id_to_addr_.end()); + for (auto it = node_id_to_addr_.begin(); + it != node_id_to_addr_.end(); ++it) { + auto actor = + remote_actor(it->second.first, it->second.second); + node_id_to_actor_.insert(make_pair(it->first, actor)); + } + LOG(INFO) << "register node succeed! insert " + << node.node_id_to_addr_.size() << " nodes"; + if(isFirstRegister){ + caf::scoped_actor self1; + auto slave_self = caf::io::remote_actor(get_node_ip(), get_node_port()); + self1->send(slave_self,HeartBeatAtom::value); + } + }, + [&](const caf::sync_exited_msg& msg) { + LOG(WARNING) << "register link fail"; + }, + caf::after(std::chrono::seconds(kTimeout)) >> + [&]() { + ret = rRegisterToMasterTimeOut; + LOG(WARNING) << "slave register timeout!"; + }); + } catch (caf::network_error& e) { + ret = rRegisterToMasterError; + LOG(WARNING) << "cann't connect to " << master_addr_.first << " , " + << master_addr_.second << " in register"; + } + return ret; +} +}/* namespace claims */ + +//RetCode SlaveNode::reRegisterToMaster() { +// RetCode ret = rSuccess; +// caf::scoped_actor self; +// LOG(INFO)<<"slave reRegisterToMaster!!"<sync_send(master_actor_, RegisterAtom::value, get_node_ip(), +// get_node_port()) +// .await([=](OkAtom, const unsigned int& id, const BaseNode& node) { +// set_node_id(id); +// Environment::getInstance()->setNodeID(id); +// heartbeat_count_ = 0; +// node_id_to_addr_.clear(); +// node_id_to_actor_.clear(); +// node_id_to_addr_.insert(node.node_id_to_addr_.begin(), +// node.node_id_to_addr_.end()); +// for (auto it = node_id_to_addr_.begin(); +// it != node_id_to_addr_.end(); ++it) { +// auto actor = +// remote_actor(it->second.first, it->second.second); +// node_id_to_actor_.insert(make_pair(it->first, actor)); +// } +// LOG(INFO) << "register node succeed! insert " +// << node.node_id_to_addr_.size() << " nodes"; +// }, +// [&](const caf::sync_exited_msg& msg) { +// LOG(WARNING) << "register link fail"; +// }, +// caf::after(std::chrono::seconds(kTimeout)) >> +// [&]() { +// ret = rRegisterToMasterTimeOut; +// LOG(WARNING) << "slave register timeout!"; +// }); +// } catch (caf::network_error& e) { +// ret = rRegisterToMasterError; +// LOG(WARNING) << "cann't connect to " << master_addr_.first << " , " +// << master_addr_.second << " in register"; +// } +// return ret; +//} diff --git a/node_manager/slave_node.h b/node_manager/slave_node.h new file mode 100644 index 000000000..e4459936f --- /dev/null +++ b/node_manager/slave_node.h @@ -0,0 +1,62 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/node_manager/slave_node.h + * + * Created on: Jan 4, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#ifndef NODE_MANAGER_SLAVE_NODE_H_ +#define NODE_MANAGER_SLAVE_NODE_H_ +#include +#include "./base_node.h" +#include "../common/error_define.h" +#include "caf/all.hpp" +#include "caf/behavior.hpp" +#include +using caf::event_based_actor; +using std::string; +using caf::behavior; + +namespace claims { +class SlaveNode : public BaseNode { + public: + friend class SlaveNodeActor; + // class SlaveNodeActor; + SlaveNode(); + SlaveNode(string node_ip, uint16_t node_port); + void CreateActor(); + virtual ~SlaveNode(); + RetCode RegisterToMaster(bool isFirstRegister); + RetCode reRegisterToMaster(); + static SlaveNode* GetInstance(); + RetCode AddOneNode(const unsigned int& node_id, const string& node_ip, + const uint16_t& node_port); + private: + static SlaveNode* instance_; + unsigned int heartbeat_count_; +}; + +} // namespace claims + +#endif // NODE_MANAGER_SLAVE_NODE_H_ diff --git a/physical_operator/Makefile.am b/physical_operator/Makefile.am index fd9a75352..a2de978f2 100644 --- a/physical_operator/Makefile.am +++ b/physical_operator/Makefile.am @@ -1,8 +1,7 @@ AM_CPPFLAGS= -fPIC -fpermissive \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${THERON_HOME}/Include \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux @@ -19,12 +18,12 @@ LDADD = ../Executor/libexecutor.a \ ../common/Schema/libschema.a \ ../common/Block/libblock.a \ ../utility/libutility.a \ - ../codegen/codegen.a\ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a \ + ../codegen/codegen.a \ + ../stmt_handler/libstmthandler.a \ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ - ${BOOST_HOME}/stage/lib/libboost_serialization.so \ - ${THERON_HOME}/Lib/libtherond.a + ${BOOST_HOME}/stage/lib/libboost_serialization.so noinst_LIBRARIES=libphysicalqueryplan.a libphysicalqueryplan_a_SOURCES = \ @@ -48,7 +47,9 @@ libphysicalqueryplan_a_SOURCES = \ physical_filter.cpp physical_filter.h \ physical_projection_scan.cpp physical_projection_scan.h \ physical_nest_loop_join.h physical_nest_loop_join.cpp \ - physical_delete_filter.h physical_delete_filter.cpp + physical_delete_filter.h physical_delete_filter.cpp \ + physical_outer_hash_join.h physical_outer_hash_join.cpp\ + segment.cpp segment.h SUBDIRS = DIST_SUBDIRS = diff --git a/physical_operator/combine_tuple.cpp b/physical_operator/combine_tuple.cpp index 9f58f84ee..3c0163314 100755 --- a/physical_operator/combine_tuple.cpp +++ b/physical_operator/combine_tuple.cpp @@ -40,7 +40,8 @@ CombineTuple::State::State(std::vector input_schemas, CombineTuple::~CombineTuple() {} -bool CombineTuple::Open(const PartitionOffset &partition_offset) { +bool CombineTuple::Open(SegmentExecStatus *const exec_status, + const PartitionOffset &partition_offset) { // first std::vector v_b; for (unsigned i = 0; i < state_.children_.size(); i++) { @@ -51,7 +52,7 @@ bool CombineTuple::Open(const PartitionOffset &partition_offset) { free_block_stream_list_.push_back(v_b); for (unsigned i = 0; i < state_.children_.size(); i++) { - if (!state_.children_[i]->Open(partition_offset)) { + if (!state_.children_[i]->Open(exec_status, partition_offset)) { // TODO: handle the failure return false; } @@ -59,7 +60,8 @@ bool CombineTuple::Open(const PartitionOffset &partition_offset) { return true; } -bool CombineTuple::Next(BlockStreamBase *block) { +bool CombineTuple::Next(SegmentExecStatus *const exec_status, + BlockStreamBase *block) { unsigned total_length_ = 0; for (unsigned i = 0; i < state_.input_schemas_.size(); i++) { total_length_ += state_.input_schemas_[i]->getTupleMaxSize(); @@ -76,7 +78,7 @@ bool CombineTuple::Next(BlockStreamBase *block) { for (unsigned j = 0; j < state_.children_.size(); j++) { if ((cur = rb.bsti_list_[j]->currentTuple()) == 0) { rb.buffer_[j]->setEmpty(); - if (state_.children_[j]->Next(rb.buffer_[j]) == false) { + if (state_.children_[j]->Next(exec_status, rb.buffer_[j]) == false) { if (!block->Empty()) { AtomicPushRemainingBlock(rb); return true; @@ -122,12 +124,12 @@ bool CombineTuple::Next(BlockStreamBase *block) { AtomicPushRemainingBlock(RemainingBlock(v_bsb, rb.bsti_list_)); - return Next(block); + return Next(exec_status, block); } -bool CombineTuple::Close() { +bool CombineTuple::Close(SegmentExecStatus *const exec_status) { for (unsigned i = 0; i < state_.children_.size(); i++) { - state_.children_[i]->Close(); + state_.children_[i]->Close(exec_status); } return true; } diff --git a/physical_operator/combine_tuple.h b/physical_operator/combine_tuple.h index 103e8de9c..ea11a7cdc 100755 --- a/physical_operator/combine_tuple.h +++ b/physical_operator/combine_tuple.h @@ -82,9 +82,10 @@ class CombineTuple : public PhysicalOperatorBase { CombineTuple(){}; virtual ~CombineTuple(); - bool Open(const PartitionOffset &partition_offset = 0); - bool Next(BlockStreamBase *block); - bool Close(); + bool Open(SegmentExecStatus *const exec_status, + const PartitionOffset &partition_offset = 0); + bool Next(SegmentExecStatus *const exec_status, BlockStreamBase *block); + bool Close(SegmentExecStatus *const exec_status); private: bool AtomicPopRemainingBlock(RemainingBlock &rb); diff --git a/physical_operator/exchange_merger.cpp b/physical_operator/exchange_merger.cpp old mode 100755 new mode 100644 index 13dffab89..1e39da6d3 --- a/physical_operator/exchange_merger.cpp +++ b/physical_operator/exchange_merger.cpp @@ -1,4 +1,9 @@ +#include +#include "../common/error_define.h" +#include "../common/Logging.h" +#include "../exec_tracker/segment_exec_status.h" +#include "../physical_operator/segment.h" /* * Copyright [2012-2015] DaSE@ECNU * @@ -62,11 +67,35 @@ namespace claims { namespace physical_operator { const int kBufferSizeInExchange = 1000; -ExchangeMerger::ExchangeMerger(State state) : state_(state) { +ExchangeMerger::ExchangeMerger(State state) + : state_(state), + all_merged_block_buffer_(NULL), + block_for_deserialization(NULL), + block_for_socket_(NULL), + is_registered_to_tracker_(false), + receiver_thread_id_(0), + sock_fd_(-1), + socket_port_(-1), + socket_fd_lower_list_(NULL), + epoll_fd_(-1) { + set_phy_oper_type(kPhysicalExchangeMerger); InitExpandedStatus(); + assert(state.partition_schema_.partition_key_index < 100); } -ExchangeMerger::ExchangeMerger() { InitExpandedStatus(); } +ExchangeMerger::ExchangeMerger() + : all_merged_block_buffer_(NULL), + block_for_deserialization(NULL), + block_for_socket_(NULL), + is_registered_to_tracker_(false), + receiver_thread_id_(0), + sock_fd_(-1), + socket_port_(-1), + socket_fd_lower_list_(NULL), + epoll_fd_(-1) { + InitExpandedStatus(); + set_phy_oper_type(kPhysicalExchangeMerger); +} ExchangeMerger::~ExchangeMerger() { if (NULL != state_.schema_) { delete state_.schema_; @@ -82,7 +111,10 @@ ExchangeMerger::~ExchangeMerger() { * exchange merger is at the end of one segment of plan, so it's the "stage_src" * for this stage */ -bool ExchangeMerger::Open(const PartitionOffset& partition_offset) { + +bool ExchangeMerger::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset) { + RETURN_IF_CANCELLED(exec_status); unsigned long long int start = curtick(); RegisterExpandedThreadToAllBarriers(); if (TryEntryIntoSerializedSection()) { // first arrived thread dose @@ -96,10 +128,12 @@ bool ExchangeMerger::Open(const PartitionOffset& partition_offset) { // buffer all deserialized blocks come from every socket all_merged_block_buffer_ = new BlockStreamBuffer( state_.block_size_, kBufferSizeInExchange, state_.schema_); + + RETURN_IF_CANCELLED(exec_status); + ExpanderTracker::getInstance()->addNewStageEndpoint( pthread_self(), LocalStageEndPoint(stage_src, "Exchange", all_merged_block_buffer_)); - // if one of block_for_socket is full, it will be deserialized into // block_for_deserialization and sended to all_merged_data_buffer block_for_deserialization = @@ -111,6 +145,9 @@ bool ExchangeMerger::Open(const PartitionOffset& partition_offset) { block_for_socket_[i] = new BlockContainer( block_for_deserialization->getSerializedBlockSize()); } + + RETURN_IF_CANCELLED(exec_status); + if (PrepareSocket() == false) return false; if (SetSocketNonBlocking(sock_fd_) == false) { return false; @@ -125,7 +162,12 @@ bool ExchangeMerger::Open(const PartitionOffset& partition_offset) { LOG(ERROR) << "Register Exchange with ID = " << state_.exchange_id_ << " fails!" << endl; } - + is_registered_to_tracker_ = true; +#ifdef CONNECTION_VERIFY + confirm_sender_time = 0; + frequence = 0; +#endif +#ifdef ExchangeSender if (IsMaster()) { /* According to a bug reported by dsc, the master exchange upper should * check whether other uppers have registered to exchangeTracker. @@ -143,20 +185,31 @@ bool ExchangeMerger::Open(const PartitionOffset& partition_offset) { "plan to all its lower senders" << endl; if (SerializeAndSendPlan() == false) return false; } +#endif + + RETURN_IF_CANCELLED(exec_status); + if (CreateReceiverThread() == false) { return false; } - CreatePerformanceInfo(); + if (!exec_status->is_cancelled()) { + CreatePerformanceInfo(); + } } /// A synchronization barrier, in case of multiple expanded threads + RETURN_IF_CANCELLED(exec_status); + BarrierArrive(); return true; } /** * return block from all_merged_block_buffer */ -bool ExchangeMerger::Next(BlockStreamBase* block) { +bool ExchangeMerger::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* block) { while (true) { + RETURN_IF_CANCELLED(exec_status); + /* * As Exchange merger is a local stage beginner, ExchangeMerger::next will * return false in order to shrink the current work thread, if the @@ -193,17 +246,19 @@ bool ExchangeMerger::Next(BlockStreamBase* block) { } } -bool ExchangeMerger::Close() { +bool ExchangeMerger::Close(SegmentExecStatus* const exec_status) { LOG(INFO) << " exchange_merger_id = " << state_.exchange_id_ << " closed!" << " exhausted lower senders num = " << exhausted_lowers << " lower sender num = " << lower_num_ << endl; CancelReceiverThread(); CloseSocket(); - for (unsigned i = 0; i < lower_num_; i++) { - if (NULL != block_for_socket_[i]) { - delete block_for_socket_[i]; - block_for_socket_[i] = NULL; + if (NULL != block_for_socket_) { + for (unsigned i = 0; i < lower_num_; i++) { + if (NULL != block_for_socket_[i]) { + delete block_for_socket_[i]; + block_for_socket_[i] = NULL; + } } } if (NULL != block_for_deserialization) { @@ -219,11 +274,17 @@ bool ExchangeMerger::Close() { * of open() and next() can act correctly. */ ResetStatus(); - - Environment::getInstance()->getExchangeTracker()->LogoutExchange( - ExchangeID(state_.exchange_id_, partition_offset_)); + if (is_registered_to_tracker_) { + Environment::getInstance()->getExchangeTracker()->LogoutExchange( + ExchangeID(state_.exchange_id_, partition_offset_)); + } LOG(INFO) << "exchange merger id = " << state_.exchange_id_ << " is closed!" - << endl; +#ifdef CONNECTION_VERIFY + << " CONFIRM frequence:" << frequence + << " CONFIRM TIME:" << confirm_sender_time +#endif + ; + return true; } @@ -248,7 +309,8 @@ void ExchangeMerger::Print() { } bool ExchangeMerger::PrepareSocket() { struct sockaddr_in my_addr; - + struct sockaddr_in my_addr2; + socklen_t len = sizeof(my_addr2); // sock_fd_ is the socket of this node if ((sock_fd_ = socket(AF_INET, SOCK_STREAM, 0)) == -1) { LOG(ERROR) << "socket creation error!" << endl; @@ -257,18 +319,18 @@ bool ExchangeMerger::PrepareSocket() { my_addr.sin_family = AF_INET; /* apply for the port dynamically.*/ - if ((socket_port_ = PortManager::getInstance()->applyPort()) == 0) { - LOG(ERROR) << " exchange_id = " << state_.exchange_id_ - << " partition_offset = " << partition_offset_ - << " Fails to apply a port for the socket. Reason: the " - " PortManager is exhausted !" << endl; - return false; - } - LOG(INFO) << " exchange_id = " << state_.exchange_id_ - << " partition_offset = " << partition_offset_ - << " succeed applying one port !" << endl; - - my_addr.sin_port = htons(socket_port_); + /* if ((socket_port_ = PortManager::getInstance()->applyPort()) == 0) { + LOG(ERROR) << " exchange_id = " << state_.exchange_id_ + << " partition_offset = " << partition_offset_ + << " Fails to apply a port for the socket. Reason: the " + " PortManager is exhausted !" << endl; + return false; + } + LOG(INFO) << " exchange_id = " << state_.exchange_id_ + << " partition_offset = " << partition_offset_ + << " succeed applying one port !" << endl; + */ + my_addr.sin_port = htons(0); my_addr.sin_addr.s_addr = INADDR_ANY; bzero(&(my_addr.sin_zero), 8); @@ -278,11 +340,19 @@ bool ExchangeMerger::PrepareSocket() { if (bind(sock_fd_, (struct sockaddr*)&my_addr, sizeof(struct sockaddr)) == -1) { + PLOG(ERROR) << " exchange_id = " << state_.exchange_id_ + << " partition_offset = " << partition_offset_ + << " bind errors!" << endl; + return false; + } + + if (getsockname(sock_fd_, (struct sockaddr*)&my_addr2, &len) == -1) { LOG(ERROR) << " exchange_id = " << state_.exchange_id_ - << " partition_offset = " << partition_offset_ << " bind errors!" - << endl; + << " partition_offset = " << partition_offset_ + << " getsockname error!" << endl; return false; } + socket_port_ = ntohs(my_addr2.sin_port); if (listen(sock_fd_, lower_num_) == -1) { LOG(ERROR) << " exchange_id = " << state_.exchange_id_ @@ -300,18 +370,33 @@ bool ExchangeMerger::PrepareSocket() { void ExchangeMerger::CloseSocket() { /* close the epoll fd */ - FileClose(epoll_fd_); + if (epoll_fd_ > 2) { + FileClose(epoll_fd_); + } /* colse the sockets of the lowers*/ - for (unsigned i = 0; i < lower_num_; i++) { - if (socket_fd_lower_list_[i] > 2) { - FileClose(socket_fd_lower_list_[i]); + if (socket_fd_lower_list_) { + for (unsigned i = 0; i < lower_num_; i++) { + if (socket_fd_lower_list_[i] > 2) { + FileClose(socket_fd_lower_list_[i]); + } } } +#ifdef CONNECTION_VERIFY + for (auto& fd : lower_sock_fd_list_) { + LOG(INFO) << " exchange_id = " << state_.exchange_id_ + << " partition_offset = " << partition_offset_ + << "CloseSocket:" << fd; + FileClose(fd); + } +#endif /* close the socket of this exchange*/ - FileClose(sock_fd_); - + if (sock_fd_ > 2) { + FileClose(sock_fd_); + } /* return the applied port to the port manager*/ - PortManager::getInstance()->returnPort(socket_port_); + // if (socket_port_ > 0) { + // PortManager::getInstance()->returnPort(socket_port_); + // } } bool ExchangeMerger::RegisterExchange() { @@ -412,18 +497,19 @@ bool ExchangeMerger::CreateReceiverThread() { int error = 0; error = pthread_create(&receiver_thread_id_, NULL, Receiver, this); if (0 != error) { - LOG(ERROR) << " exchange_id = " << state_.exchange_id_ - << " partition_offset = " << partition_offset_ - << " merger Failed to create receiver thread." << endl; + PLOG(ERROR) << " exchange_id = " << state_.exchange_id_ + << " partition_offset = " << partition_offset_ + << " merger Failed to create receiver thread." << endl; return false; } return true; } void ExchangeMerger::CancelReceiverThread() { - pthread_cancel(receiver_thread_id_); - void* res = 0; - while (res != PTHREAD_CANCELED) { + if (receiver_thread_id_ != 0) { + pthread_cancel(receiver_thread_id_); + void* res = 0; pthread_join(receiver_thread_id_, &res); + receiver_thread_id_ = 0; } } @@ -438,6 +524,11 @@ void* ExchangeMerger::Receiver(void* arg) { struct epoll_event event; struct epoll_event* events; int status; +#ifdef CONNECTION_VERIFY + stringstream ss; + ss << "EXCHID" << Pthis->state_.exchange_id_; + string lower_passwd = ss.str(); +#endif // create epoll Pthis->epoll_fd_ = epoll_create1(0); if (Pthis->epoll_fd_ == -1) { @@ -464,6 +555,7 @@ void* ExchangeMerger::Receiver(void* arg) { std::vector finish_times; // in ms while (true) { usleep(1); + pthread_testcancel(); const int event_count = epoll_wait(Pthis->epoll_fd_, events, Pthis->lower_num_, -1); for (int i = 0; i < event_count; i++) { @@ -474,7 +566,8 @@ void* ExchangeMerger::Receiver(void* arg) { } LOG(WARNING) << " exchange_id = " << Pthis->state_.exchange_id_ << " partition_offset = " << Pthis->partition_offset_ - << " epoll error,reason: " << strerror(errno) + << " epoll error=" << errno + << " reason: " << strerror(errno) << " close fd = " << events[i].data.fd << endl; FileClose(events[i].data.fd); continue; @@ -483,6 +576,8 @@ void* ExchangeMerger::Receiver(void* arg) { * more incoming connections. */ while (true) { + pthread_testcancel(); + sockaddr in_addr; socklen_t in_len; int infd; @@ -490,6 +585,11 @@ void* ExchangeMerger::Receiver(void* arg) { in_len = sizeof in_addr; infd = accept(Pthis->sock_fd_, &in_addr, &in_len); +#ifdef GLOG_STATUS + LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ + << " partition_offset = " << Pthis->partition_offset_ + << "After accept fd:" << infd; +#endif if (infd == -1) { if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) { /* all the incoming connections are processed.*/ @@ -501,19 +601,49 @@ void* ExchangeMerger::Receiver(void* arg) { break; } } + +// for debug useless now +#if 1 status = getnameinfo(&in_addr, in_len, hbuf, sizeof(hbuf), sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV); if (0 == status) { +#ifdef GLOG_STATUS LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ << " partition_offset = " << Pthis->partition_offset_ << " Accepted connection on descriptor " << infd << " host= " << hbuf << " port= " << sbuf << endl; +#endif Pthis->lower_ip_list_.push_back(hbuf); +#ifdef CONNECTION_VERIFY + Pthis->lower_fd_to_ip_[infd] = hbuf; +#else Pthis->lower_sock_fd_to_id_[infd] = Pthis->lower_ip_list_.size() - 1; +#endif +/* +for (auto &it : Pthis->lower_fd_to_ip_) { + LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ +<< " partition_offset = " << Pthis->partition_offset_ + << it.second << " fd=" << it.first; + } + +for_each(Pthis->state_.lower_id_list_.begin(), + Pthis->state_.lower_id_list_.end(), + [=](const int s) { + + LOG(INFO) << " exchange_id = " << +Pthis->state_.exchange_id_ + << " partition_offset = " << +Pthis->partition_offset_ + << " lower_id:" << s << " "; }); + */ + +#ifndef CONNECTION_VERIFY assert(Pthis->lower_ip_list_.size() <= Pthis->state_.lower_id_list_.size()); +#endif } +#endif /*Make the incoming socket non-blocking and add it to the list of fds * to monitor.*/ if (!Pthis->SetSocketNonBlocking(infd)) { @@ -528,13 +658,128 @@ void* ExchangeMerger::Receiver(void* arg) { << " epoll_ctl error2" << endl; return NULL; } +#ifdef CONNECTION_VERIFY + Pthis->lower_fd_to_passwd_[infd] = ""; +#ifdef GLOG_STATUS + LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ + << " partition_offset = " << Pthis->partition_offset_ + << " After epoll ctl a connect fd:" << infd; +#endif +/* +for (auto &it : Pthis->lower_fd_to_passwd_) { + LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ + << " partition_offset = " << Pthis->partition_offset_ + << " fd = " << it.first; + }*/ +#endif } continue; } else { /* We have data on the fd waiting to be read.*/ int done = 0; + int byte_received = 0; +#ifdef CONNECTION_VERIFY + char lower_passwd_buf[64]; +#endif + while (true) { - int byte_received; + pthread_testcancel(); +#ifdef CONNECTION_VERIFY // verify connection passwd + ticks startconfirm = curtick(); + + if (Pthis->lower_sock_fd_list_.find(events[i].data.fd) == + Pthis->lower_sock_fd_list_.end()) { + memset(lower_passwd_buf, 0, sizeof(lower_passwd_buf)); + + int lower_passwd_size = lower_passwd.length(); + int rest_passwd_size = + lower_passwd_size - + Pthis->lower_fd_to_passwd_[events[i].data.fd].length(); +#ifdef GLOG_STATUS + LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ + << " partition_offset = " << Pthis->partition_offset_ + << " DO CONFIRM THIS CONNECTION fd:[" << events[i].data.fd + << "] lower_passwd_size:[" << lower_passwd_size + << "] passwd:[" + << Pthis->lower_fd_to_passwd_[events[i].data.fd] + << "] passwd length:[" + << Pthis->lower_fd_to_passwd_[events[i].data.fd].length() + << "] rest_passwd_size:[" << rest_passwd_size << "]"; +#endif + byte_received = + read(events[i].data.fd, lower_passwd_buf, rest_passwd_size); + + if (byte_received == -1 || byte_received == 0) { +#ifdef GLOG_STATUS + LOG(WARNING) << "byte_received:" << byte_received << " error."; +#endif + break; + } +#ifdef GLOG_STATUS + LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ + << " partition_offset = " << Pthis->partition_offset_ + << " fd:" << events[i].data.fd << " byte_received: [" + << byte_received << "] lower_passwd_buf:[" + << lower_passwd_buf << "]"; +#endif + rest_passwd_size -= byte_received; + Pthis->lower_fd_to_passwd_[events[i].data.fd] += lower_passwd_buf; + if (rest_passwd_size > 0) { + continue; + } + + if (lower_passwd.compare( + Pthis->lower_fd_to_passwd_[events[i].data.fd]) == 0) { +#ifdef GLOG_STATUS + LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ + << " partition_offset = " << Pthis->partition_offset_ + << "this exchange pwd:[" << lower_passwd << "] and fd:[" + << events[i].data.fd << "]'s passwd:[" + << Pthis->lower_fd_to_passwd_[events[i].data.fd] + << "] add this fd into lower_sock_fd_list_"; +#endif + Pthis->lower_sock_fd_list_.insert(events[i].data.fd); + Pthis->lower_sock_fd_to_id_[events[i].data.fd] = + Pthis->lower_sock_fd_list_.size() - 1; + Pthis->ReplyAllBlocksConsumed(events[i].data.fd); + /* + for ( auto & fd : Pthis->lower_sock_fd_list_ ) { + LOG(INFO) << " exchange_id = " << + Pthis->state_.exchange_id_ + << " partition_offset = " << Pthis->partition_offset_ + << "Pthis->lower_sock_fd_list: " << fd; + }*/ + assert(Pthis->lower_sock_fd_list_.size() <= + Pthis->state_.lower_id_list_.size()); + } else { +#ifdef GLOG_STATUS + LOG(WARNING) << " exchange_id = " << Pthis->state_.exchange_id_ + << " partition_offset = " << Pthis->partition_offset_ + << "this exchange pwd:[" << lower_passwd + << "], Illegal connection passwd: [" + << Pthis->lower_fd_to_passwd_[events[i].data.fd] + << "], epoll del and close this connection"; +#endif + epoll_ctl(Pthis->epoll_fd_, EPOLL_CTL_DEL, events[i].data.fd, + &event); + FileClose(events[i].data.fd); + break; + } + } + + double endconfirm = getMilliSecond(startconfirm); + /* + LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ + << " partition_offset = " << + Pthis->partition_offset_ + << " fd:" << events[i].data.fd + << " find fd time:" << + endconfirm << "ms"; + */ + Pthis->confirm_sender_time += endconfirm; + Pthis->frequence++; + +#endif int socket_fd_index = Pthis->lower_sock_fd_to_id_[events[i].data.fd]; byte_received = read( events[i].data.fd, @@ -543,13 +788,15 @@ void* ExchangeMerger::Receiver(void* arg) { Pthis->block_for_socket_[socket_fd_index]->GetCurSize(), Pthis->block_for_socket_[socket_fd_index]->GetRestSizeToHandle()); if (byte_received == -1) { - if (errno == EAGAIN) { + if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) { /*We have read all the data,so go back to the loop.*/ break; } + LOG(WARNING) << " exchange_id = " << Pthis->state_.exchange_id_ << " partition_offset = " << Pthis->partition_offset_ - << " merger read error!" << endl; + << " merger read error!" + << " errno:" << errno << " errmsg:" << strerror(errno); done = 1; } else if (byte_received == 0) { /* End of file. The remote has closed the connection.*/ @@ -558,6 +805,10 @@ void* ExchangeMerger::Receiver(void* arg) { } /* The data is successfully read.*/ + // LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ + // << " partition_offset = " << Pthis->partition_offset_ << + // events[i].data.fd << " receive DATA byte_received: [" << + // byte_received << "]"; Pthis->block_for_socket_[socket_fd_index]->IncreaseActualSize( byte_received); @@ -598,11 +849,12 @@ void* ExchangeMerger::Receiver(void* arg) { Pthis->sem_new_block_or_eof_.post( Pthis->number_of_registered_expanded_threads_); } else { - /** The newly obtained data block is the end-of-file. **/ +/** The newly obtained data block is the end-of-file. **/ +#ifdef GLOG_STATUS LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ << " partition_offset = " << Pthis->partition_offset_ << " This block is the last one." << endl; - +#endif finish_times.push_back(static_cast(getMilliSecond(start))); /** update the exhausted senders count and post @@ -620,42 +872,59 @@ void* ExchangeMerger::Receiver(void* arg) { * that the input data is completely received. */ Pthis->all_merged_block_buffer_->setInputComplete(); - +#ifdef GLOG_STATUS /* print the finish times */ - // for (unsigned i = 0; i < finish_times.size(); i++) - // { - // printf("%d\t", finish_times[i]); - // } + for (unsigned i = 0; i < finish_times.size(); i++) { + // printf("%d\t", finish_times[i]); + LOG(INFO) << "FINISH TIMES:" << finish_times[i]; + } // printf("\t Var:%5.4f\n", get_stddev(finish_times)); + LOG(INFO) << "Var:" << get_stddev(finish_times); +#endif } - +#ifdef GLOG_STATUS LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ << " partition_offset = " << Pthis->partition_offset_ << " exhausted lowers = " << Pthis->exhausted_lowers << " senders have exhausted" << endl; - +#endif /** tell the Sender that all the block are consumed so that the * Sender can close the socket**/ - Pthis->ReplyAllBlocksConsumed(events[i].data.fd); + pthread_testcancel(); + Pthis->ReplyAllBlocksConsumed(events[i].data.fd); +#ifdef GLOG_STATUS LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ << " partition_offset = " << Pthis->partition_offset_ + << " fd = " << events[i].data.fd << " This notification (all the blocks in the socket buffer " - "are consumed) is replied to the lower " - << Pthis->lower_ip_list_[socket_fd_index] << endl; + "are consumed) is replied to the lower "; +// << Pthis->lower_ip_list_[socket_fd_index] << endl; +#endif } } if (done) { +#ifdef GLOG_STATUS LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ << " partition_offset = " << Pthis->partition_offset_ << " Closed connection on descriptor " << events[i].data.fd << " " << Pthis->lower_ip_list_ [Pthis->lower_sock_fd_to_id_[events[i].data.fd]]; + /* Closing the descriptor will make epoll remove it from the set of descriptors which are monitored. */ + LOG(INFO) << " exchange_id = " << Pthis->state_.exchange_id_ + << " partition_offset = " << Pthis->partition_offset_ + << " Closed connection on descriptor " << events[i].data.fd; +#endif + + // Pthis->lower_sock_fd_list_.erase(events[i].data.fd); + epoll_ctl(Pthis->epoll_fd_, EPOLL_CTL_DEL, events[i].data.fd, &event); +#ifndef CONNECTION_VERIFY FileClose(events[i].data.fd); +#endif } } } @@ -674,9 +943,10 @@ void ExchangeMerger::SendBlockBufferedNotification(int target_socket_fd) { void ExchangeMerger::ReplyAllBlocksConsumed(int target_socket_fd) { char content = 'e'; if (send(target_socket_fd, &content, sizeof(char), MSG_WAITALL) == -1) { - LOG(ERROR) << " exchange_id = " << state_.exchange_id_ - << " partition_offset = " << partition_offset_ - << " merger reply all blocks consumed error!" << endl; + PLOG(ERROR) << " exchange_id = " << state_.exchange_id_ + << " partition_offset = " << partition_offset_ + << " merger reply all blocks consumed error! fd:" + << target_socket_fd << endl; return; } } @@ -717,6 +987,34 @@ void ExchangeMerger::ResetStatus() { lower_sock_fd_to_id_.clear(); lower_ip_list_.clear(); +#ifdef CONNECTION_VERIFY + lower_sock_fd_list_.clear(); +#endif +} +RetCode ExchangeMerger::GetAllSegments(stack* all_segments) { + RetCode ret = rSuccess; + PhysicalOperatorBase* ret_plan = NULL; + if (NULL != state_.child_) { + state_.child_->GetAllSegments(all_segments); + if (Config::pipelined_exchange) { + ExchangeSenderPipeline::State EIELstate( + state_.schema_->duplicateSchema(), state_.child_, + state_.upper_id_list_, state_.block_size_, state_.exchange_id_, + state_.partition_schema_); + ret_plan = new ExchangeSenderPipeline(EIELstate); + } else { + ExchangeSenderMaterialized::State EIELstate( + state_.schema_->duplicateSchema(), state_.child_, + state_.upper_id_list_, state_.block_size_, state_.exchange_id_, + state_.partition_schema_); + ret_plan = new ExchangeSenderMaterialized(EIELstate); + } + all_segments->push( + std::move(new Segment(ret_plan, state_.lower_id_list_, + state_.upper_id_list_, state_.exchange_id_))); + state_.child_ = NULL; + } + return ret; } } // namespace physical_operator } // namespace claims diff --git a/physical_operator/exchange_merger.h b/physical_operator/exchange_merger.h old mode 100755 new mode 100644 index c7678fa0d..61a84aac2 --- a/physical_operator/exchange_merger.h +++ b/physical_operator/exchange_merger.h @@ -36,6 +36,8 @@ #include #include #include +#include + #include "../physical_operator/physical_operator_base.h" #include "../utility/lock.h" #include "../Executor/IteratorExecutorMaster.h" @@ -48,6 +50,9 @@ #include "../common/Block/BlockStream.h" #include "../common/Block/BlockStreamBuffer.h" #include "../physical_operator/physical_operator.h" + +#define CONNECTION_VERIFY + namespace claims { namespace physical_operator { /** @@ -102,11 +107,13 @@ class ExchangeMerger : public PhysicalOperator { * 4. create receive thread, if one block is enough, then will be put into * all_merged_block_buffer */ - bool Open(const PartitionOffset& partition_offset = 0); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset = 0); /// fetch block from all_merged_block_buffer and return. - bool Next(BlockStreamBase* block); - bool Close(); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); + bool Close(SegmentExecStatus* const exec_status); void Print(); + RetCode GetAllSegments(stack* all_segments); private: /// prepare socket at this node, waiting senders connect it @@ -152,13 +159,25 @@ class ExchangeMerger : public PhysicalOperator { int epoll_fd_; int* socket_fd_lower_list_; std::vector lower_ip_list_; +#ifdef CONNECTION_VERIFY + std::map lower_fd_to_ip_; + std::map lower_fd_to_passwd_; +#endif pthread_t receiver_thread_id_; pthread_t debug_thread_id_; unsigned exhausted_lowers; unsigned partition_offset_; semaphore sem_new_block_or_eof_; +#ifdef CONNECTION_VERIFY + set lower_sock_fd_list_; +#endif std::map lower_sock_fd_to_id_; PerformanceInfo* perf_info_; + bool is_registered_to_tracker_; +#ifdef CONNECTION_VERIFY + double confirm_sender_time; + int frequence; +#endif private: friend class boost::serialization::access; diff --git a/physical_operator/exchange_sender.cpp b/physical_operator/exchange_sender.cpp old mode 100755 new mode 100644 index 8c50420fd..75482794b --- a/physical_operator/exchange_sender.cpp +++ b/physical_operator/exchange_sender.cpp @@ -35,6 +35,9 @@ #include "../common/Logging.h" #include "../common/ids.h" #include "../physical_operator/exchange_sender.h" + +#define CONNECTION_VERIFY + namespace claims { namespace physical_operator { ExchangeSender::ExchangeSender() {} @@ -47,7 +50,7 @@ ExchangeSender::~ExchangeSender() {} */ bool ExchangeSender::ConnectToUpper(const ExchangeID& exchange_id, const NodeID& id, int& sock_fd) const { - struct hostent* host; + // struct hostent* host; ExchangeTracker* et = Environment::getInstance()->getExchangeTracker(); int upper_port; NodeAddress upper_addr; @@ -57,12 +60,12 @@ bool ExchangeSender::ConnectToUpper(const ExchangeID& exchange_id, << exchange_id.exchange_id << std::endl; return false; } - - if ((host = gethostbyname(upper_addr.ip.c_str())) == 0) { - LOG(ERROR) << "gethostbyname errors!" << std::endl; - return false; - } - +#ifdef CONNECTION_VERIFY + stringstream ss; + ss << "EXCHID" << exchange_id.exchange_id; + string upper_passwd = ss.str(); + int upper_passwd_len = upper_passwd.length(); +#endif if ((sock_fd = socket(AF_INET, SOCK_STREAM, 0)) == -1) { perror("socket creation errors!\n"); return false; @@ -70,8 +73,7 @@ bool ExchangeSender::ConnectToUpper(const ExchangeID& exchange_id, struct sockaddr_in serv_add; serv_add.sin_family = AF_INET; serv_add.sin_port = htons(atoi(upper_addr.port.c_str())); - serv_add.sin_addr = *((struct in_addr*)host->h_addr); - // serv_add.sin_addr.s_addr=inet_addr(host->h_name); + serv_add.sin_addr.s_addr = inet_addr(upper_addr.ip.c_str()); bzero(&(serv_add.sin_zero), 8); int returnvalue; @@ -79,12 +81,24 @@ bool ExchangeSender::ConnectToUpper(const ExchangeID& exchange_id, if ((returnvalue = connect(sock_fd, (struct sockaddr*)&serv_add, sizeof(struct sockaddr))) == -1) { LOG(ERROR) << "Fails to connect remote socket: " - << inet_ntoa(serv_add.sin_addr) << " , port= " << upper_port + << inet_ntoa(serv_add.sin_addr) << " , port= " << upper_addr.port << std::endl; return false; } - LOG(INFO) << "connected to the Master socket :" << returnvalue << std::endl; - + LOG(INFO) << "exchid=" << exchange_id.exchange_id + << "upper_offset=" << exchange_id.partition_offset + << " connected to the upper socket.("<< upper_addr.ip.c_str() <<":" << upper_addr.port.c_str() + << " sock_fd=" << sock_fd + <<") return value:" << returnvalue << std::endl; +#ifdef CONNECTION_VERIFY + if ((returnvalue = send(sock_fd, upper_passwd.c_str(), upper_passwd_len, 0)) == -1 ) { + LOG(ERROR) << "Failed to send acknowledgement to the upper socket. returnvalue:[" << returnvalue + << "] errno:[" << errno << "]"; + return false; + } + LOG(INFO) << "send acknowledgement to the upper socket: ("<< upper_passwd <<")" << std::endl; + WaitingForNotification(sock_fd); +#endif return true; } @@ -94,6 +108,7 @@ void ExchangeSender::WaitingForNotification(const int& target_socket_fd) const { if ((recvbytes = recv(target_socket_fd, &byte, sizeof(char), 0)) == -1) { LOG(ERROR) << "recv error!" << std::endl; } + LOG(INFO) << "wait for connection acknowledge notification:" << byte; } void ExchangeSender::WaitingForCloseNotification( @@ -101,9 +116,11 @@ void ExchangeSender::WaitingForCloseNotification( char byte; int recvbytes; if ((recvbytes = recv(target_socket_fd, &byte, sizeof(char), 0)) == -1) { - LOG(ERROR) << "recv error!" << std::endl; + LOG(ERROR) << "sock_fd:" << target_socket_fd + << " recv error!"; } else { - LOG(INFO) << " received close message from one merger" << endl; + LOG(INFO) << "sock_fd:" << target_socket_fd + << " received close message from one merger" << endl; } FileClose(target_socket_fd); } diff --git a/physical_operator/exchange_sender.h b/physical_operator/exchange_sender.h index 276747898..2bf9cb4bd 100755 --- a/physical_operator/exchange_sender.h +++ b/physical_operator/exchange_sender.h @@ -45,9 +45,12 @@ class ExchangeSender : public PhysicalOperatorBase { public: ExchangeSender(); virtual ~ExchangeSender(); - virtual bool Open(const PartitionOffset& part_off = 0) = 0; - virtual bool Next(BlockStreamBase* no_block) = 0; - virtual bool Close() = 0; + virtual bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& part_off = 0) = 0; + virtual bool Next(SegmentExecStatus* const exec_status, + BlockStreamBase* no_block) = 0; + virtual bool Close(SegmentExecStatus* const exec_status) = 0; + virtual void SetPartitionOffset(const int par_off) = 0; protected: // build socket connection with upper mergers diff --git a/physical_operator/exchange_sender_materialized.cpp b/physical_operator/exchange_sender_materialized.cpp index 617bff816..2d4660fb3 100755 --- a/physical_operator/exchange_sender_materialized.cpp +++ b/physical_operator/exchange_sender_materialized.cpp @@ -39,6 +39,7 @@ #include #include #include +#include #include #include "../configure.h" #include "../common/rename.h" @@ -46,16 +47,31 @@ #include "../Environment.h" #include "../Executor/exchange_tracker.h" #include "../physical_operator/exchange_sender.h" + namespace claims { namespace physical_operator { ExchangeSenderMaterialized::ExchangeSenderMaterialized(State state) - : state_(state), ExchangeSender() {} + : state_(state), + ExchangeSender(), + block_for_sending_(NULL), + block_for_serialization_(NULL), + block_stream_for_asking_(NULL), + partitioned_block_stream_(NULL), + partitioned_data_buffer_(NULL), + socket_fd_upper_list_(NULL) { + set_phy_oper_type(kphysicalExchangeSender); +} ExchangeSenderMaterialized::~ExchangeSenderMaterialized() {} -ExchangeSenderMaterialized::ExchangeSenderMaterialized() {} -bool ExchangeSenderMaterialized::Open(const PartitionOffset&) { - state_.child_->Open(state_.partition_offset_); +ExchangeSenderMaterialized::ExchangeSenderMaterialized() { + set_phy_oper_type(kphysicalExchangeSender); +} +bool ExchangeSenderMaterialized::Open(SegmentExecStatus* const exec_status, + const PartitionOffset&) { + RETURN_IF_CANCELLED(exec_status); + + state_.child_->Open(exec_status, state_.partition_offset_); /** get the number of mergers **/ nuppers_ = state_.upper_id_list_.size(); @@ -94,26 +110,36 @@ bool ExchangeSenderMaterialized::Open(const PartitionOffset&) { nuppers_, block_stream_for_asking_->getSerializedBlockSize(), 1000); /** connect to the mergers **/ + RETURN_IF_CANCELLED(exec_status); + for (unsigned upper_id = 0; upper_id < state_.upper_id_list_.size(); upper_id++) { + RETURN_IF_CANCELLED(exec_status); + if (!ConnectToUpper(ExchangeID(state_.exchange_id_, upper_id), state_.upper_id_list_[upper_id], socket_fd_upper_list_[upper_id])) { return false; } } + RETURN_IF_CANCELLED(exec_status); /** create the Sender thread **/ CreateWorkerThread(); return true; } -bool ExchangeSenderMaterialized::Next(BlockStreamBase* no_block) { +bool ExchangeSenderMaterialized::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* no_block) { void* tuple_from_child; void* tuple_in_cur_block_stream; while (true) { + RETURN_IF_CANCELLED(exec_status); + block_stream_for_asking_->setEmpty(); - if (state_.child_->Next(block_stream_for_asking_)) { + if (state_.child_->Next(exec_status, block_stream_for_asking_)) { + RETURN_IF_CANCELLED(exec_status); + /** a new block is obtained from child iterator **/ if (state_.partition_schema_.isHashPartition()) { BlockStreamBase::BlockStreamTraverseIterator* traverse_iterator = @@ -156,6 +182,8 @@ bool ExchangeSenderMaterialized::Next(BlockStreamBase* no_block) { } } } else { + RETURN_IF_CANCELLED(exec_status); + /* the child iterator is exhausted. We add the remaining data in * partitioned data blocks into the buffer*/ for (unsigned i = 0; i < nuppers_; i++) { @@ -184,6 +212,8 @@ bool ExchangeSenderMaterialized::Next(BlockStreamBase* no_block) { child_exhausted_ = true; while (!partitioned_data_buffer_->isEmpty()) { + RETURN_IF_CANCELLED(exec_status); + usleep(1); } /* @@ -194,6 +224,8 @@ bool ExchangeSenderMaterialized::Next(BlockStreamBase* no_block) { LOG(INFO) << "Waiting for close notification!" << std::endl; for (unsigned i = 0; i < nuppers_; i++) { + RETURN_IF_CANCELLED(exec_status); + WaitingForCloseNotification(socket_fd_upper_list_[i]); } return false; @@ -201,25 +233,25 @@ bool ExchangeSenderMaterialized::Next(BlockStreamBase* no_block) { } } -bool ExchangeSenderMaterialized::Close() { +bool ExchangeSenderMaterialized::Close(SegmentExecStatus* const exec_status) { Logging_ExpandableBlockStreamExchangeLM( "The sender thread is killed in the close() function!"); + assert(false); /* close the files*/ CloseDiskFiles(); /* Delete the files */ DeleteDiskFiles(); - state_.child_->Close(); - + state_.child_->Close(exec_status); delete block_stream_for_asking_; delete block_for_sending_; delete block_for_serialization_; for (unsigned i = 0; i < nuppers_; i++) { delete partitioned_block_stream_[i]; } - delete partitioned_data_buffer_; delete[] partitioned_block_stream_; + delete partitioned_data_buffer_; delete[] socket_fd_upper_list_; return true; @@ -361,7 +393,7 @@ void* ExchangeSenderMaterialized::debug(void* arg) { bool ExchangeSenderMaterialized::CreateWorkerThread() { if (true == g_thread_pool_used) { Environment::getInstance()->getThreadPool()->AddTask(MaterializeAndSend, - this); + this); } else { int error; error = pthread_create(&sender_thread_id_, NULL, MaterializeAndSend, this); @@ -404,5 +436,13 @@ std::string ExchangeSenderMaterialized::GetPartititionedFileName( << state_.partition_offset_ << "_" << partition_index; return file_name.str(); } +RetCode ExchangeSenderMaterialized::GetAllSegments( + stack* all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_) { + return state_.child_->GetAllSegments(all_segments); + } + return ret; +} } // namespace physical_operator } // namespace claims diff --git a/physical_operator/exchange_sender_materialized.h b/physical_operator/exchange_sender_materialized.h index 1a1ca7c75..999273dfb 100755 --- a/physical_operator/exchange_sender_materialized.h +++ b/physical_operator/exchange_sender_materialized.h @@ -34,6 +34,7 @@ #include #include #include +#include #include #include "../../common/Schema/Schema.h" #include "../../Executor/IteratorExecutorMaster.h" @@ -43,6 +44,7 @@ #include "../../common/Block/BlockStreamBuffer.h" #include "../../common/Logging.h" #include "../../common/partition_functions.h" +#include "../common/error_define.h" #include "../physical_operator/exchange_sender.h" namespace claims { @@ -86,9 +88,14 @@ class ExchangeSenderMaterialized : public ExchangeSender { explicit ExchangeSenderMaterialized(State state); ExchangeSenderMaterialized(); virtual ~ExchangeSenderMaterialized(); - bool Open(const PartitionOffset& part_off = 0); - bool Next(BlockStreamBase* no_block); - bool Close(); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& part_off = 0); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* no_block); + bool Close(SegmentExecStatus* const exec_status); + RetCode GetAllSegments(stack* all_segments); + void SetPartitionOffset(const int par_off) { + state_.partition_offset_ = par_off; + } private: void Send(); diff --git a/physical_operator/exchange_sender_pipeline.cpp b/physical_operator/exchange_sender_pipeline.cpp index 874dfaa23..66edc6b0c 100755 --- a/physical_operator/exchange_sender_pipeline.cpp +++ b/physical_operator/exchange_sender_pipeline.cpp @@ -27,7 +27,10 @@ #include "../physical_operator/exchange_sender_pipeline.h" +#include #include +#include + #include "../../configure.h" #include "../../common/rename.h" #include "../../common/Logging.h" @@ -40,11 +43,33 @@ namespace claims { namespace physical_operator { -ExchangeSenderPipeline::ExchangeSenderPipeline(State state) : state_(state) { +ExchangeSenderPipeline::ExchangeSenderPipeline(State state) + : state_(state), + sender_thread_id_(0), + partitioned_block_stream_(NULL), + partitioned_data_buffer_(NULL), + block_for_asking_(NULL), + block_for_sending_buffer_(NULL), + block_for_serialization_(NULL), + sending_buffer_(NULL), + partition_function_(NULL), + socket_fd_upper_list_(NULL) { + set_phy_oper_type(kphysicalExchangeSender); assert(state.partition_schema_.partition_key_index < 100); } -ExchangeSenderPipeline::ExchangeSenderPipeline() {} +ExchangeSenderPipeline::ExchangeSenderPipeline() + : sender_thread_id_(0), + partitioned_block_stream_(NULL), + partitioned_data_buffer_(NULL), + block_for_asking_(NULL), + block_for_sending_buffer_(NULL), + block_for_serialization_(NULL), + sending_buffer_(NULL), + partition_function_(NULL), + socket_fd_upper_list_(NULL) { + set_phy_oper_type(kphysicalExchangeSender); +} ExchangeSenderPipeline::~ExchangeSenderPipeline() { if (NULL != state_.schema_) { @@ -60,8 +85,12 @@ ExchangeSenderPipeline::~ExchangeSenderPipeline() { * pay attention to the work of different block buffer according to the * comments near it */ -bool ExchangeSenderPipeline::Open(const PartitionOffset&) { - state_.child_->Open(state_.partition_offset_); +bool ExchangeSenderPipeline::Open(SegmentExecStatus* const exec_status, + const PartitionOffset&) { + RETURN_IF_CANCELLED(exec_status); + state_.child_->Open(exec_status, state_.partition_offset_); + RETURN_IF_CANCELLED(exec_status); + upper_num_ = state_.upper_id_list_.size(); partition_function_ = PartitionFunctionFactory::createBoostHashFunction(upper_num_); @@ -110,10 +139,13 @@ bool ExchangeSenderPipeline::Open(const PartitionOffset&) { partitioned_block_stream_[i] = BlockStreamBase::createBlock(state_.schema_, state_.block_size_); } + RETURN_IF_CANCELLED(exec_status); /** connect to all the mergers **/ for (unsigned upper_offset = 0; upper_offset < state_.upper_id_list_.size(); ++upper_offset) { + RETURN_IF_CANCELLED(exec_status); + LOG(INFO) << "(exchane_id= " << state_.exchange_id_ << " partition_offset= " << state_.partition_offset_ << " ) try to connect to upper( " << upper_offset << " , " @@ -126,11 +158,12 @@ bool ExchangeSenderPipeline::Open(const PartitionOffset&) { return false; } } - LOG(INFO) << "successfully !" << std::endl; + LOG(INFO) << "connect to all mereger successfully !" << std::endl; + + RETURN_IF_CANCELLED(exec_status); /** create the Sender thread **/ - int error; - error = pthread_create(&sender_thread_id_, NULL, Sender, this); + int error = pthread_create(&sender_thread_id_, NULL, Sender, this); if (error != 0) { LOG(ERROR) << "(exchane_id= " << state_.exchange_id_ << " partition_offset= " << state_.partition_offset_ @@ -150,12 +183,17 @@ bool ExchangeSenderPipeline::Open(const PartitionOffset&) { * else the state_.partition_schema_ is broadcast, straightly insert the block * from child into each partition buffer. */ -bool ExchangeSenderPipeline::Next(BlockStreamBase* no_block) { +bool ExchangeSenderPipeline::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* no_block) { void* tuple_from_child; void* tuple_in_cur_block_stream; while (true) { + RETURN_IF_CANCELLED(exec_status); + block_for_asking_->setEmpty(); - if (state_.child_->Next(block_for_asking_)) { + if (state_.child_->Next(exec_status, block_for_asking_)) { + RETURN_IF_CANCELLED(exec_status); + /** * if a blocks is obtained from child, we repartition the tuples in the * block to corresponding partition_block_stream_. @@ -197,6 +235,7 @@ bool ExchangeSenderPipeline::Next(BlockStreamBase* no_block) { state_.schema_->copyTuple(tuple_from_child, tuple_in_cur_block_stream); } + DELETE_PTR(traverse_iterator); // by hAN MEMORY LEAK } else if (state_.partition_schema_.isBroadcastPartition()) { /** * for boardcast case, all block from child should inserted into all @@ -209,6 +248,8 @@ bool ExchangeSenderPipeline::Next(BlockStreamBase* no_block) { } } } else { + RETURN_IF_CANCELLED(exec_status); + if (state_.partition_schema_.isHashPartition()) { /* the child iterator is exhausted. We add the last block stream block * which would be not full into the buffer for hash partitioned case. @@ -249,7 +290,11 @@ bool ExchangeSenderPipeline::Next(BlockStreamBase* no_block) { << " partition_offset= " << state_.partition_offset_ << " ) Waiting until all the blocks in the buffer is sent!" << std::endl; + RETURN_IF_CANCELLED(exec_status); + while (!partitioned_data_buffer_->isEmpty()) { + RETURN_IF_CANCELLED(exec_status); + usleep(1); } @@ -263,7 +308,11 @@ bool ExchangeSenderPipeline::Next(BlockStreamBase* no_block) { << " partition_offset= " << state_.partition_offset_ << " ) Waiting for close notification from all merger!" << std::endl; + RETURN_IF_CANCELLED(exec_status); + for (unsigned i = 0; i < upper_num_; i++) { + RETURN_IF_CANCELLED(exec_status); + WaitingForCloseNotification(socket_fd_upper_list_[i]); } LOG(INFO) << " received all close notification, closing.. " << endl; @@ -272,9 +321,9 @@ bool ExchangeSenderPipeline::Next(BlockStreamBase* no_block) { } } -bool ExchangeSenderPipeline::Close() { +bool ExchangeSenderPipeline::Close(SegmentExecStatus* const exec_status) { CancelSenderThread(); - state_.child_->Close(); + state_.child_->Close(exec_status); // free temporary space if (NULL != partitioned_data_buffer_) { delete partitioned_data_buffer_; @@ -292,11 +341,13 @@ bool ExchangeSenderPipeline::Close() { delete sending_buffer_; sending_buffer_ = NULL; } + if (NULL != block_for_sending_buffer_) { delete block_for_sending_buffer_; block_for_sending_buffer_ = NULL; } - for (unsigned i = 0; i < upper_num_; i++) { + for (unsigned i = 0; NULL != partitioned_block_stream_ && i < upper_num_; + i++) { if (NULL != partitioned_block_stream_[i]) { delete partitioned_block_stream_[i]; partitioned_block_stream_[i] = NULL; @@ -324,9 +375,14 @@ bool ExchangeSenderPipeline::Close() { void* ExchangeSenderPipeline::Sender(void* arg) { ExchangeSenderPipeline* Pthis = reinterpret_cast(arg); - LOG(INFO) << "(exchange_id = " << Pthis->state_.exchange_id_ - << " , partition_offset = " << Pthis->state_.partition_offset_ - << " ) sender thread created successfully!" << std::endl; + pthread_testcancel(); + + // LOG(INFO) << "(exchange_id = " << Pthis->state_.exchange_id_ + // << " , partition_offset = " << Pthis->state_.partition_offset_ + // << " ) sender thread created successfully!"; + RAW_LOG(INFO, + "exchange_id= %d, par_off= %d sender thread is created successfully!", + Pthis->state_.exchange_id_, Pthis->state_.partition_offset_); Pthis->sending_buffer_->Initialized(); Pthis->sendedblocks_ = 0; try { @@ -359,13 +415,16 @@ void* ExchangeSenderPipeline::Sender(void* arg) { break; } else { if (recvbytes < block_for_sending->GetRestSizeToHandle()) { - /* the block is not entirely sent. */ +/* the block is not entirely sent. */ +#ifdef GLOG_STATUS + LOG(INFO) << "(exchange_id = " << Pthis->state_.exchange_id_ << " , partition_offset = " << Pthis->state_.partition_offset_ << " ) doesn't send a block completely, actual send bytes = " << recvbytes << " rest bytes = " << block_for_sending->GetRestSizeToHandle() << std::endl; +#endif block_for_sending->IncreaseActualSize(recvbytes); continue; } else { @@ -446,17 +505,26 @@ void* ExchangeSenderPipeline::Sender(void* arg) { } void ExchangeSenderPipeline::CancelSenderThread() { - pthread_cancel(sender_thread_id_); - void* res; - pthread_join(sender_thread_id_, &res); - if (res != PTHREAD_CANCELED) - LOG(WARNING) << "(exchange_id = " << state_.exchange_id_ - << " , partition_offset = " << state_.partition_offset_ - << " ) thread is not canceled!" << std::endl; - LOG(INFO) << "(exchange_id = " << state_.exchange_id_ - << " , partition_offset = " << state_.partition_offset_ - << " ) thread is canceled!" << std::endl; - sender_thread_id_ = 0; + if (0 != sender_thread_id_) { + pthread_cancel(sender_thread_id_); + void* res; + pthread_join(sender_thread_id_, &res); + if (res != PTHREAD_CANCELED) + LOG(WARNING) << "(exchange_id = " << state_.exchange_id_ + << " , partition_offset = " << state_.partition_offset_ + << " ) thread is not canceled!" << std::endl; + LOG(INFO) << "(exchange_id = " << state_.exchange_id_ + << " , partition_offset = " << state_.partition_offset_ + << " ) thread is canceled!" << std::endl; + sender_thread_id_ = 0; + } +} +RetCode ExchangeSenderPipeline::GetAllSegments(stack* all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_) { + return state_.child_->GetAllSegments(all_segments); + } + return ret; } } // namespace physical_operator } // namespace claims diff --git a/physical_operator/exchange_sender_pipeline.h b/physical_operator/exchange_sender_pipeline.h index 81f37e01e..9c3f074b2 100755 --- a/physical_operator/exchange_sender_pipeline.h +++ b/physical_operator/exchange_sender_pipeline.h @@ -43,6 +43,8 @@ #include #include #include +#include + #include "../../common/Schema/Schema.h" #include "../../Executor/IteratorExecutorMaster.h" #include "../../common/Block/PartitionedBlockBuffer.h" @@ -52,6 +54,7 @@ #include "../../common/hash.h" #include "../../common/Logging.h" #include "../../common/partition_functions.h" +#include "../common/error_define.h" #include "../physical_operator/exchange_sender.h" #include "../physical_operator/physical_operator_base.h" @@ -100,13 +103,18 @@ class ExchangeSenderPipeline : public ExchangeSender { * 2.build socket connection with uppder mergers * 3.create sender thread that sends blocks to different upper mergers. */ - bool Open(const PartitionOffset& partition_offset = 0); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset = 0); /** * divide block that come from child and insert them into corresponding * partition buffer */ - bool Next(BlockStreamBase* no_block); - bool Close(); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* no_block); + bool Close(SegmentExecStatus* const exec_status); + RetCode GetAllSegments(stack* all_segments); + void SetPartitionOffset(const int par_off) { + state_.partition_offset_ = par_off; + } private: /** diff --git a/physical_operator/expander.cpp b/physical_operator/expander.cpp index 3ef15e241..25aca03fa 100755 --- a/physical_operator/expander.cpp +++ b/physical_operator/expander.cpp @@ -43,13 +43,17 @@ Expander::Expander(State state) block_stream_buffer_(0), finished_thread_count_(0), thread_count_(0), - coordinate_pid_(0) {} + is_registered_(false) { + set_phy_oper_type(kphysicalExpander); +} Expander::Expander() : block_stream_buffer_(0), finished_thread_count_(0), thread_count_(0), - coordinate_pid_(0) {} + is_registered_(false) { + set_phy_oper_type(kphysicalExpander); +} Expander::~Expander() { if (NULL != state_.child_) { @@ -74,7 +78,8 @@ Expander::State::State(Schema* schema, PhysicalOperatorBase* child, * @param partitoin_offset means to solve corresponding partition * every Expander should register to ExpanderTracker */ -bool Expander::Open(const PartitionOffset& partitoin_offset) { +bool Expander::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partitoin_offset) { received_tuples_ = 0; state_.partition_offset_ = partitoin_offset; input_data_complete_ = false; @@ -84,13 +89,18 @@ bool Expander::Open(const PartitionOffset& partitoin_offset) { state_.block_size_, state_.block_count_in_buffer_ * 10, state_.schema_); in_work_expanded_thread_list_.clear(); + RETURN_IF_CANCELLED(exec_status); + expander_id_ = ExpanderTracker::getInstance()->registerNewExpander( block_stream_buffer_, this); + is_registered_ = true; LOG(INFO) << expander_id_ << "Expander open, thread count= " << state_.init_thread_count_ << std::endl; - + exec_status_ = exec_status; for (unsigned i = 0; i < state_.init_thread_count_; i++) { + RETURN_IF_CANCELLED(exec_status); + if (CreateWorkingThread() == false) { LOG(INFO) << "expander_id_ = " << expander_id_ << " Failed to create initial expanded thread*" << std::endl; @@ -102,8 +112,13 @@ bool Expander::Open(const PartitionOffset& partitoin_offset) { /** * fetch one block from buffer and return, until it is exhausted. */ -bool Expander::Next(BlockStreamBase* block) { +bool Expander::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* block) { + RETURN_IF_CANCELLED(exec_status); + while (!block_stream_buffer_->getBlock(*block)) { + RETURN_IF_CANCELLED(exec_status); + if (ChildExhausted()) { return false; } else { @@ -113,13 +128,15 @@ bool Expander::Next(BlockStreamBase* block) { return true; } -bool Expander::Close() { - LOG(INFO) << "Expander: " << expander_id_ << " received " - << block_stream_buffer_->getReceivedDataSizeInKbytes() << " kByte " - << received_tuples_ << " tuples!" << std::endl; - ExpanderTracker::getInstance()->unregisterExpander(expander_id_); +bool Expander::Close(SegmentExecStatus* const exec_status) { + // for making sure every thread have exited from next() if (true == g_thread_pool_used) { - // do nothing + while (!in_work_expanded_thread_list_.empty() || + !being_called_bacl_expanded_thread_list_.empty()) { + LOG(WARNING) << "there are thread working now when expander close(), so " + "waiting!!!"; + usleep(300); + } } else { for (std::set::iterator it = in_work_expanded_thread_list_.begin(); @@ -131,24 +148,35 @@ bool Expander::Close() { << " A expander thread is killed before close!" << std::endl; } } - - assert(input_data_complete_); - input_data_complete_ = false; - one_thread_finished_ = false; - assert(in_work_expanded_thread_list_.empty()); - assert(being_called_bacl_expanded_thread_list_.empty()); - finished_thread_count_ = 0; - + if (!exec_status->is_cancelled()) { + LOG(INFO) << "Expander: " << expander_id_ << " received " + << block_stream_buffer_->getReceivedDataSizeInKbytes() + << " kByte " << received_tuples_ << " tuples!" << std::endl; + } + if (is_registered_) { + ExpanderTracker::getInstance()->unregisterExpander(expander_id_); + is_registered_ = false; + } + if (!exec_status->is_cancelled()) { + assert(input_data_complete_); + input_data_complete_ = false; + one_thread_finished_ = false; + assert(in_work_expanded_thread_list_.empty()); + assert(being_called_bacl_expanded_thread_list_.empty()); + finished_thread_count_ = 0; + } /* * check if all the information in ExpanderTrack has properly removed */ - assert(!ExpanderTracker::getInstance()->trackExpander(expander_id_)); + if (!exec_status->is_cancelled()) { + assert(!ExpanderTracker::getInstance()->trackExpander(expander_id_)); + } if (NULL != block_stream_buffer_) { delete block_stream_buffer_; block_stream_buffer_ = NULL; } LOG(INFO) << expander_id_ << " Buffer is freed in Expander!" << std::endl; - state_.child_->Close(); + state_.child_->Close(exec_status); thread_count_ = 0; LOG(INFO) << expander_id_ << "<<<<<<>>>>>>>>>" << std::endl; return true; @@ -176,6 +204,7 @@ void* Expander::ExpandedWork(void* arg) { Pthis->AddIntoWorkingThreadList(pid); ExpanderTracker::getInstance()->registerNewExpandedThreadStatus( pid, Pthis->expander_id_); + unsigned block_count = 0; (reinterpret_cast(arg))->sem_.post(); @@ -188,7 +217,8 @@ void* Expander::ExpandedWork(void* arg) { << " begins to open child!" << std::endl; ticks start_open = curtick(); - Pthis->state_.child_->Open(Pthis->state_.partition_offset_); + Pthis->state_.child_->Open(Pthis->exec_status_, + Pthis->state_.partition_offset_); LOG(INFO) << Pthis->expander_id_ << ", pid= " << pid << " finished opening child" << std::endl; @@ -208,7 +238,7 @@ void* Expander::ExpandedWork(void* arg) { Pthis->state_.schema_, Pthis->state_.block_size_); block_for_asking->setEmpty(); - while (Pthis->state_.child_->Next(block_for_asking)) { + while (Pthis->state_.child_->Next(Pthis->exec_status_, block_for_asking)) { if (!block_for_asking->Empty()) { Pthis->lock_.acquire(); Pthis->received_tuples_ += block_for_asking->getTuplesInBlock(); @@ -226,7 +256,6 @@ void* Expander::ExpandedWork(void* arg) { delete block_for_asking; block_for_asking = NULL; } - if (ExpanderTracker::getInstance()->isExpandedThreadCallBack( pthread_self())) { LOG(INFO) << Pthis->expander_id_ << " <<<<<<<<<<<<<<<block_stream_buffer_->setInputComplete(); + if (NULL != Pthis->block_stream_buffer_) + Pthis->block_stream_buffer_->setInputComplete(); LOG(INFO) << pthread_self() << " Produced " << block_count << "blocks" << std::endl; Pthis->lock_.release(); @@ -286,12 +316,12 @@ bool Expander::ChildExhausted() { this->block_stream_buffer_->Empty(); lock_.release(); exclusive_expanding_.release(); - if (ret == true && coordinate_pid_ != 0) { - void* res; - pthread_join(coordinate_pid_, &res); - coordinate_pid_ = 0; - return ChildExhausted(); - } + // if (ret == true && coordinate_pid_ != 0) { + // void* res; + // pthread_join(coordinate_pid_, &res); + // coordinate_pid_ = 0; + // return ChildExhausted(); + // } if (ret) { LOG(INFO) << expander_id_ << " child iterator is exhausted!" << std::endl; } @@ -303,14 +333,15 @@ bool Expander::ChildExhausted() { */ bool Expander::CreateWorkingThread() { pthread_t tid = 0; - + if (exec_status_->is_cancelled()) { + return false; + } ExpanderContext para; para.pthis_ = this; ticks start = curtick(); if (exclusive_expanding_.try_acquire()) { if (true == g_thread_pool_used) { - Environment::getInstance()->getThreadPool()->AddTask(ExpandedWork, - ¶); + Environment::getInstance()->getThreadPool()->AddTask(ExpandedWork, ¶); } else { const int error = pthread_create(&tid, NULL, ExpandedWork, ¶); if (error != 0) { @@ -443,5 +474,12 @@ bool Expander::Shrink() { return true; } } +RetCode Expander::GetAllSegments(stack* all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_) { + return state_.child_->GetAllSegments(all_segments); + } + return ret; +} } // namespace physical_operator } // namespace claims diff --git a/physical_operator/expander.h b/physical_operator/expander.h index e03fe9ef2..a7f2e3c41 100755 --- a/physical_operator/expander.h +++ b/physical_operator/expander.h @@ -30,14 +30,18 @@ #include #include #include +#include + #include "../physical_operator/physical_operator_base.h" #include "../common/Schema/Schema.h" #include "../common/Block/BlockStreamBuffer.h" +#include "../common/error_define.h" #include "../utility/ExpandabilityShrinkability.h" #include "../common/Logging.h" #include "../utility/lock.h" #include "../utility/thread_pool.h" #include "../Environment.h" + namespace claims { namespace physical_operator { #define EXPANDER_BUFFER_SIZE 1000 @@ -79,13 +83,15 @@ class Expander : public PhysicalOperatorBase, * prepare block-buffer for collecting block from child and some thread list, * create one initial working thread. */ - bool Open(const PartitionOffset& partitoin_offset = 0); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partitoin_offset = 0); /** * fetch one block from buffer and return */ - bool Next(BlockStreamBase* block); - bool Close(); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); + bool Close(SegmentExecStatus* const exec_status); void Print(); + RetCode GetAllSegments(stack* all_segments); private: /** @@ -104,13 +110,13 @@ class Expander : public PhysicalOperatorBase, unsigned GetDegreeOfParallelism(); private: + bool is_registered_; State state_; - + SegmentExecStatus* exec_status_; /* * The set of threads that are working normally. */ std::set in_work_expanded_thread_list_; - pthread_t coordinate_pid_; ExpanderID expander_id_; Lock exclusive_expanding_; /* diff --git a/physical_operator/in_operator.cpp b/physical_operator/in_operator.cpp index 557265421..57e3e276b 100644 --- a/physical_operator/in_operator.cpp +++ b/physical_operator/in_operator.cpp @@ -78,9 +78,16 @@ InOperator::State::State(PhysicalOperatorBase* child_set, ht_nbuckets_(1024), ht_bucket_size_(64) {} -bool InOperator::Open(const PartitionOffset& partition_offset) { - state_.child_set_->Open(partition_offset); - state_.child_in_->Open(partition_offset); +bool InOperator::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset) { + RETURN_IF_CANCELLED(exec_status); + + state_.child_set_->Open(exec_status, partition_offset); + RETURN_IF_CANCELLED(exec_status); + + state_.child_in_->Open(exec_status, partition_offset); + RETURN_IF_CANCELLED(exec_status); + AtomicPushFreeHtBlockStream(BlockStreamBase::createBlock( state_.schema_child_set_, state_.block_size_)); AtomicPushFreeBlockStream(BlockStreamBase::createBlock( @@ -92,13 +99,20 @@ bool InOperator::Open(const PartitionOffset& partition_offset) { PartitionFunctionFactory::createBoostHashFunction(state_.ht_nbuckets_); vector ht_index; ht_index.push_back(state_.index_child_set_); + + RETURN_IF_CANCELLED(exec_status); + hash_table_ = new BasicHashTable( state_.ht_nbuckets_, state_.ht_bucket_size_, (state_.schema_child_set_->getSubSchema(ht_index))->getTupleMaxSize()); ht_index.clear(); open_finished_ = true; } else { - while (!open_finished_) usleep(1); + while (!open_finished_) { + RETURN_IF_CANCELLED(exec_status); + + usleep(1); + } } void* cur_tuple = NULL; @@ -106,7 +120,11 @@ bool InOperator::Open(const PartitionOffset& partition_offset) { unsigned bn = 0; BlockStreamBase* bsb = AtomicPopFreeHtBlockStream(); - while (state_.child_set_->Next(bsb)) { + RETURN_IF_CANCELLED(exec_status); + + while (state_.child_set_->Next(exec_status, bsb)) { + RETURN_IF_CANCELLED(exec_status); + BlockStreamBase::BlockStreamTraverseIterator* bsti = bsb->createIterator(); bsti->reset(); while (cur_tuple = bsti->nextTuple()) { @@ -128,7 +146,8 @@ bool InOperator::Open(const PartitionOffset& partition_offset) { return true; } -bool InOperator::Next(BlockStreamBase* block) { +bool InOperator::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* block) { unsigned bn; RemainingBlock rb; void* tuple_from_child_in = NULL; @@ -175,7 +194,11 @@ bool InOperator::Next(BlockStreamBase* block) { BlockStreamBase* block_for_asking = AtomicPopFreeBlockStream(); block_for_asking->setEmpty(); - while (state_.child_in_->Next(block_for_asking)) { + RETURN_IF_CANCELLED(exec_status); + + while (state_.child_in_->Next(exec_status, block_for_asking)) { + RETURN_IF_CANCELLED(exec_status); + BlockStreamBase::BlockStreamTraverseIterator* traverse_iterator = block_for_asking->createIterator(); while ((tuple_from_child_in = traverse_iterator->currentTuple()) > 0) { @@ -218,7 +241,7 @@ bool InOperator::Next(BlockStreamBase* block) { return false; } -bool InOperator::Close() { +bool InOperator::Close(SegmentExecStatus* const exec_status) { sema_open_.post(); open_finished_ = false; // barrier_->~Barrier(); @@ -227,8 +250,8 @@ bool InOperator::Close() { remaining_block_list_.clear(); // hash->~PartitionFunction(); hash_table_->~BasicHashTable(); - state_.child_set_->Close(); - state_.child_in_->Close(); + state_.child_set_->Close(exec_status); + state_.child_in_->Close(exec_status); return true; } diff --git a/physical_operator/in_operator.h b/physical_operator/in_operator.h index abd18cedc..2f72cd428 100644 --- a/physical_operator/in_operator.h +++ b/physical_operator/in_operator.h @@ -100,11 +100,12 @@ class InOperator : public PhysicalOperatorBase { InOperator(); virtual ~InOperator(); // buffer result of sub_query in a hash_table - bool Open(const PartitionOffset &partition_offset = 0); + bool Open(SegmentExecStatus *const exec_status, + const PartitionOffset &partition_offset = 0); // get block from child, and fetch each tuple, then compare with every tuple // in corresponding hash_bucket - bool Next(BlockStreamBase *block); - bool Close(); + bool Next(SegmentExecStatus *const exec_status, BlockStreamBase *block); + bool Close(SegmentExecStatus *const exec_status); private: bool AtomicPopRemainingBlock(RemainingBlock &rb); diff --git a/physical_operator/performance_monitor.cpp b/physical_operator/performance_monitor.cpp index d473487a6..e3bb16b1d 100755 --- a/physical_operator/performance_monitor.cpp +++ b/physical_operator/performance_monitor.cpp @@ -25,6 +25,8 @@ #include #include + +#include "../exec_tracker/segment_exec_status.h" #include "../utility/rdtsc.h" using std::endl; @@ -34,9 +36,10 @@ PerformanceMonitor::PerformanceMonitor(State state) : state_(state) {} PerformanceMonitor::PerformanceMonitor() {} PerformanceMonitor::~PerformanceMonitor() {} -bool PerformanceMonitor::Open(const PartitionOffset& partition_offset) { +bool PerformanceMonitor::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset) { start_ = curtick(); - state_.child_->Open(partition_offset); + state_.child_->Open(exec_status, partition_offset); block_ = BlockStreamBase::createBlock(state_.schema_, state_.block_size_); tuplecount_ = 0; int error; @@ -48,12 +51,13 @@ bool PerformanceMonitor::Open(const PartitionOffset& partition_offset) { return true; } -bool PerformanceMonitor::Next(BlockStreamBase*) { +bool PerformanceMonitor::Next(SegmentExecStatus* const exec_status, + BlockStreamBase*) { // PartitionFunction* // hash=PartitionFunctionFactory::createBoostHashFunction(4); // const int partition_index=3; block_->setEmpty(); - if (state_.child_->Next(block_)) { + if (state_.child_->Next(exec_status, block_)) { BlockStreamBase::BlockStreamTraverseIterator* it = block_->createIterator(); while (it->nextTuple()) { // tuplecount_++; @@ -68,7 +72,7 @@ bool PerformanceMonitor::Next(BlockStreamBase*) { return false; } -bool PerformanceMonitor::Close() { +bool PerformanceMonitor::Close(SegmentExecStatus* const exec_status) { pthread_cancel(report_tid_); double eclipsed_seconds = getSecond(start_); double processed_data_in_bytes = @@ -82,7 +86,7 @@ bool PerformanceMonitor::Close() { << (float)tuplecount_ / 2014 / 1024 / eclipsed_seconds << " M tuples/s" << endl; block_->~BlockStreamBase(); - state_.child_->Close(); + state_.child_->Close(exec_status); return true; } void PerformanceMonitor::Print() { diff --git a/physical_operator/performance_monitor.h b/physical_operator/performance_monitor.h index d68b0c3d1..de05aae22 100755 --- a/physical_operator/performance_monitor.h +++ b/physical_operator/performance_monitor.h @@ -52,9 +52,10 @@ class PerformanceMonitor : public PhysicalOperatorBase { }; PerformanceMonitor(State state_); virtual ~PerformanceMonitor(); - bool Open(const PartitionOffset& partition_offset = 0); - bool Next(BlockStreamBase* block); - bool Close(); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset = 0); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); + bool Close(SegmentExecStatus* const exec_status); void Print(); unsigned long int GetNumberOfTuples() const; diff --git a/physical_operator/physical_aggregation.cpp b/physical_operator/physical_aggregation.cpp index a16d90838..ff5bf3a02 100755 --- a/physical_operator/physical_aggregation.cpp +++ b/physical_operator/physical_aggregation.cpp @@ -31,16 +31,19 @@ #include "../physical_operator/physical_aggregation.h" #include +#include #include #include "../common/expression/expr_node.h" #include "../common/expression/data_type_oper.h" #include "../common/expression/expr_unary.h" +#include "../common/memory_handle.h" #include "../common/Schema/Schema.h" #include "../Debug.h" #include "../utility/rdtsc.h" #include "../Executor/expander_tracker.h" using claims::common::DataTypeOper; using claims::common::DataTypeOperFunc; +using claims::common::ExprEvalCnxt; using claims::common::ExprNode; using claims::common::ExprUnary; using std::vector; @@ -53,12 +56,14 @@ PhysicalAggregation::PhysicalAggregation(State state) hashtable_(NULL), hash_(NULL), bucket_cur_(0) { + set_phy_oper_type(kPhysicalAggregation); InitExpandedStatus(); assert(state_.hash_schema_); } PhysicalAggregation::PhysicalAggregation() : PhysicalOperator(4, 3), hashtable_(NULL), hash_(NULL), bucket_cur_(0) { + set_phy_oper_type(kPhysicalAggregation); InitExpandedStatus(); } @@ -132,7 +137,10 @@ PhysicalAggregation::State::State( * shared * hash table thread by thread synchronized by the hash table lock. */ -bool PhysicalAggregation::Open(const PartitionOffset &partition_offset) { +bool PhysicalAggregation::Open(SegmentExecStatus *const exec_status, + const PartitionOffset &partition_offset) { + RETURN_IF_CANCELLED(exec_status); + RegisterExpandedThreadToAllBarriers(); // copy expression and initialize them vector group_by_attrs; @@ -163,7 +171,9 @@ bool PhysicalAggregation::Open(const PartitionOffset &partition_offset) { UnregisterExpandedThreadToAllBarriers(1); return true; } - state_.child_->Open(partition_offset); + RETURN_IF_CANCELLED(exec_status); + + state_.child_->Open(exec_status, partition_offset); ticks start = curtick(); if (TryEntryIntoSerializedSection(1)) { hash_ = PartitionFunctionFactory::createGeneralModuloFunction( @@ -183,6 +193,8 @@ bool PhysicalAggregation::Open(const PartitionOffset &partition_offset) { * with small groups, as private aggregation avoids the contention to the * shared hash table. */ + RETURN_IF_CANCELLED(exec_status); + BasicHashTable *private_hashtable = new BasicHashTable(state_.num_of_buckets_, state_.bucket_size_, state_.hash_schema_->getTupleMaxSize()); @@ -199,19 +211,26 @@ bool PhysicalAggregation::Open(const PartitionOffset &partition_offset) { void *value_in_input_tuple; void *value_in_hash_table; void *new_tuple_in_hash_table; + ExprEvalCnxt eecnxt; + eecnxt.schema[0] = state_.input_schema_; + unsigned allocated_tuples_in_hashtable = 0; BasicHashTable::Iterator ht_it = hashtable_->CreateIterator(); BasicHashTable::Iterator pht_it = private_hashtable->CreateIterator(); int64_t one = 1; BlockStreamBase *block_for_asking = BlockStreamBase::createBlock(state_.input_schema_, state_.block_size_); + BlockStreamBase::BlockStreamTraverseIterator *bsti = NULL; block_for_asking->setEmpty(); start = curtick(); // traverse every block from child - while (state_.child_->Next(block_for_asking)) { - BlockStreamBase::BlockStreamTraverseIterator *bsti = - block_for_asking->createIterator(); + + RETURN_IF_CANCELLED(exec_status); + while (state_.child_->Next(exec_status, block_for_asking)) { + RETURN_IF_CANCELLED(exec_status); + DELETE_PTR(bsti); + bsti = block_for_asking->createIterator(); bsti->reset(); // traverse every tuple from block while (NULL != (cur = bsti->currentTuple())) { @@ -221,9 +240,9 @@ bool PhysicalAggregation::Open(const PartitionOffset &partition_offset) { */ bn = 0; // execute group by attributes and get partition key + eecnxt.tuple[0] = cur; if (state_.group_by_attrs_.size() > 0) { - group_by_expr_result = - group_by_attrs[0]->ExprEvaluate(cur, state_.input_schema_); + group_by_expr_result = group_by_attrs[0]->ExprEvaluate(eecnxt); bn = state_.hash_schema_->getcolumn(0).operate->getPartitionValue( group_by_expr_result, state_.num_of_buckets_); } @@ -236,8 +255,7 @@ bool PhysicalAggregation::Open(const PartitionOffset &partition_offset) { */ key_exist = true; for (int i = 0; i < group_by_attrs.size(); ++i) { - group_by_expr_result = - group_by_attrs[i]->ExprEvaluate(cur, state_.input_schema_); + group_by_expr_result = group_by_attrs[i]->ExprEvaluate(eecnxt); key_in_hash_table = state_.hash_schema_->getColumnAddess(i, tuple_in_hashtable); if (!state_.hash_schema_->getcolumn(i) @@ -252,9 +270,8 @@ bool PhysicalAggregation::Open(const PartitionOffset &partition_offset) { // update function for (int i = 0; i < agg_attrs.size(); ++i) { agg_attrs[i]->ExprEvaluate( - cur, state_.input_schema_, - state_.hash_schema_->getColumnAddess(i + group_by_size, - tuple_in_hashtable)); + eecnxt, state_.hash_schema_->getColumnAddess( + i + group_by_size, tuple_in_hashtable)); } bsti->increase_cur_(); break; @@ -265,11 +282,11 @@ bool PhysicalAggregation::Open(const PartitionOffset &partition_offset) { if (key_exist) { continue; } + eecnxt.tuple[0] = cur; new_tuple_in_hash_table = private_hashtable->allocate(bn); // set group-by's original value by expression for (int i = 0; i < group_by_attrs.size(); ++i) { - key_in_input_tuple = - group_by_attrs[i]->ExprEvaluate(cur, state_.input_schema_); + key_in_input_tuple = group_by_attrs[i]->ExprEvaluate(eecnxt); key_in_hash_table = state_.hash_schema_->getColumnAddess(i, new_tuple_in_hash_table); state_.hash_schema_->getcolumn(i) @@ -277,8 +294,7 @@ bool PhysicalAggregation::Open(const PartitionOffset &partition_offset) { } // get value_in_input_tuple from expression for (int i = 0; i < agg_attrs.size(); ++i) { - value_in_input_tuple = - agg_attrs[i]->arg0_->ExprEvaluate(cur, state_.input_schema_); + value_in_input_tuple = agg_attrs[i]->arg0_->ExprEvaluate(eecnxt); value_in_hash_table = state_.hash_schema_->getColumnAddess( group_by_size + i, new_tuple_in_hash_table); state_.hash_schema_->getcolumn(group_by_size + i) @@ -291,7 +307,11 @@ bool PhysicalAggregation::Open(const PartitionOffset &partition_offset) { // merge private_hash_table into hash_table for (int i = 0; i < state_.num_of_buckets_; i++) { - private_hashtable->placeIterator(pht_it, i); + RETURN_IF_CANCELLED(exec_status); + + if (!private_hashtable->placeIterator(pht_it, i)) { + LOG(INFO) << "placeIterator false"; + } // traverse every tuple from block while (NULL != (cur = pht_it.readCurrent())) { /* get the corresponding bucket index according to the first column in @@ -373,6 +393,8 @@ bool PhysicalAggregation::Open(const PartitionOffset &partition_offset) { UnregisterExpandedThreadToAllBarriers(2); return true; } + RETURN_IF_CANCELLED(exec_status); + BarrierArrive(2); if (TryEntryIntoSerializedSection(2)) { @@ -396,6 +418,11 @@ bool PhysicalAggregation::Open(const PartitionOffset &partition_offset) { delete private_hashtable; private_hashtable = NULL; } + for (auto &i : agg_attrs) DELETE_PTR(i); + agg_attrs.clear(); + for (auto &i : group_by_attrs) DELETE_PTR(i); + group_by_attrs.clear(); + LOG(INFO) << "Agg open() finish" << endl; return GetReturnStatus(); } @@ -404,7 +431,10 @@ bool PhysicalAggregation::Open(const PartitionOffset &partition_offset) { * hash table, which will definitely reduce the degree of parallelism. * But it is for now, assuming that the aggregated results are small. */ -bool PhysicalAggregation::Next(BlockStreamBase *block) { +bool PhysicalAggregation::Next(SegmentExecStatus *const exec_status, + BlockStreamBase *block) { + RETURN_IF_CANCELLED(exec_status); + if (ExpanderTracker::getInstance()->isExpandedThreadCallBack( pthread_self())) { UnregisterExpandedThreadToAllBarriers(3); @@ -462,14 +492,14 @@ bool PhysicalAggregation::Next(BlockStreamBase *block) { } } -bool PhysicalAggregation::Close() { +bool PhysicalAggregation::Close(SegmentExecStatus *const exec_status) { InitExpandedStatus(); if (NULL != hashtable_) { delete hashtable_; hashtable_ = NULL; } - state_.child_->Close(); + state_.child_->Close(exec_status); return true; } void PhysicalAggregation::Print() { @@ -486,6 +516,13 @@ void PhysicalAggregation::Print() { cout << "---------------" << std::endl; state_.child_->Print(); } +RetCode PhysicalAggregation::GetAllSegments(stack *all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_) { + return state_.child_->GetAllSegments(all_segments); + } + return ret; +} } // namespace physical_operator } // namespace claims diff --git a/physical_operator/physical_aggregation.h b/physical_operator/physical_aggregation.h index 3c4bef475..403e3c2ed 100755 --- a/physical_operator/physical_aggregation.h +++ b/physical_operator/physical_aggregation.h @@ -33,7 +33,9 @@ #include #include +#include +#include "../common/error_define.h" #include "../common/expression/expr_node.h" #include "../common/expression/expr_unary.h" #include "../physical_operator/physical_operator_base.h" @@ -45,8 +47,10 @@ #include "../common/Schema/Schema.h" #include "../common/Expression/queryfunc.h" #include "../physical_operator/physical_operator.h" + using claims::common::ExprUnary; using claims::common::ExprNode; + namespace claims { namespace physical_operator { #define NEWCONDI @@ -100,10 +104,12 @@ class PhysicalAggregation : public PhysicalOperator { PhysicalAggregation(); virtual ~PhysicalAggregation(); - bool Open(const PartitionOffset &partition_offset); - bool Next(BlockStreamBase *block); - bool Close(); + bool Open(SegmentExecStatus *const exec_status, + const PartitionOffset &partition_offset); + bool Next(SegmentExecStatus *const exec_status, BlockStreamBase *block); + bool Close(SegmentExecStatus *const exec_status); void Print(); + RetCode GetAllSegments(stack *all_segments); public: State state_; @@ -111,7 +117,6 @@ class PhysicalAggregation : public PhysicalOperator { private: BasicHashTable *hashtable_; PartitionFunction *hash_; - // hashtable traverse and in the next func Lock hashtable_cur_lock_; unsigned bucket_cur_; diff --git a/physical_operator/physical_delete_filter.cpp b/physical_operator/physical_delete_filter.cpp index 5cea6b174..4b5669f5e 100644 --- a/physical_operator/physical_delete_filter.cpp +++ b/physical_operator/physical_delete_filter.cpp @@ -35,6 +35,7 @@ #include "../../common/error_no.h" #include "./physical_delete_filter.h" +#include using namespace claims::common; namespace claims { namespace physical_operator { @@ -47,6 +48,7 @@ PhysicalDeleteFilter::PhysicalDeleteFilter() eftt_(NULL), memcpy_(NULL), memcat_(NULL) { + set_phy_oper_type(kPhysicalDeleteFilter); InitExpandedStatus(); } @@ -59,6 +61,7 @@ PhysicalDeleteFilter::PhysicalDeleteFilter(State state) eftt_(NULL), memcpy_(NULL), memcat_(NULL) { + set_phy_oper_type(kPhysicalDeleteFilter); InitExpandedStatus(); } @@ -83,10 +86,12 @@ PhysicalDeleteFilter::State::State(PhysicalOperatorBase* child_left, *hash manner and accelerate the probe phase * */ -bool PhysicalDeleteFilter::Open(const PartitionOffset& partition_offset) { +bool PhysicalDeleteFilter::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset) { #ifdef TIME startTimer(&timer); #endif + RETURN_IF_CANCELLED(exec_status); RegisterExpandedThreadToAllBarriers(); int ret = rSuccess; @@ -163,7 +168,7 @@ bool PhysicalDeleteFilter::Open(const PartitionOffset& partition_offset) { * in order to accelerate the open response time. */ LOG(INFO) << "delete filter operator begin to open left child" << endl; - state_.child_left_->Open(partition_offset); + state_.child_left_->Open(exec_status, partition_offset); LOG(INFO) << "delete filter operator finished opening left child" << endl; BarrierArrive(0); BasicHashTable::Iterator tmp_it = hashtable_->CreateIterator(); @@ -190,7 +195,10 @@ bool PhysicalDeleteFilter::Open(const PartitionOffset& partition_offset) { LOG(INFO) << "delete filter operator begin to call left child's next()" << endl; - while (state_.child_left_->Next(dftc->l_block_for_asking_)) { + RETURN_IF_CANCELLED(exec_status); + + while (state_.child_left_->Next(exec_status, dftc->l_block_for_asking_)) { + RETURN_IF_CANCELLED(exec_status); delete dftc->l_block_stream_iterator_; dftc->l_block_stream_iterator_ = dftc->l_block_for_asking_->createIterator(); @@ -237,13 +245,17 @@ bool PhysicalDeleteFilter::Open(const PartitionOffset& partition_offset) { // hashtable->report_status(); // printf("join open consume %d tuples\n",consumed_tuples_from_left); + RETURN_IF_CANCELLED(exec_status); + + state_.child_right_->Open(exec_status, partition_offset); + RETURN_IF_CANCELLED(exec_status); - state_.child_right_->Open(partition_offset); LOG(INFO) << "delete filter operator finished opening right child" << endl; return true; } -bool PhysicalDeleteFilter::Next(BlockStreamBase* block) { +bool PhysicalDeleteFilter::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* block) { void* result_tuple; void* tuple_from_right_child; void* tuple_in_hashtable; @@ -258,6 +270,8 @@ bool PhysicalDeleteFilter::Next(BlockStreamBase* block) { reinterpret_cast(GetContext()); while (true) { + RETURN_IF_CANCELLED(exec_status); + while ((tuple_from_right_child = dftc->r_block_stream_iterator_->currentTuple()) > 0) { unsigned bn = @@ -327,7 +341,8 @@ bool PhysicalDeleteFilter::Next(BlockStreamBase* block) { } dftc->r_block_for_asking_->setEmpty(); dftc->hashtable_iterator_ = hashtable_->CreateIterator(); - if (state_.child_right_->Next(dftc->r_block_for_asking_) == false) { + if (state_.child_right_->Next(exec_status, dftc->r_block_for_asking_) == + false) { if (block->Empty() == true) { free(joinedTuple); return false; @@ -350,10 +365,12 @@ bool PhysicalDeleteFilter::Next(BlockStreamBase* block) { hashtable_->placeIterator(dftc->hashtable_iterator_, bn); } } - return Next(block); + RETURN_IF_CANCELLED(exec_status); + + return Next(exec_status, block); } -bool PhysicalDeleteFilter::Close() { +bool PhysicalDeleteFilter::Close(SegmentExecStatus* const exec_status) { #ifdef TIME stopTimer(&timer); printf("time consuming: %lld, %f\n", timer, @@ -362,9 +379,12 @@ bool PhysicalDeleteFilter::Close() { LOG(INFO) << "Consumes %ld tuples from left child!" << endl; InitExpandedStatus(); DestoryAllContext(); - delete hashtable_; - state_.child_left_->Close(); - state_.child_right_->Close(); + if (NULL != hashtable_) { + delete hashtable_; + hashtable_ = NULL; + } + state_.child_left_->Close(exec_status); + state_.child_right_->Close(exec_status); return true; } @@ -439,6 +459,15 @@ ThreadContext* PhysicalDeleteFilter::CreateContext() { dftc->r_block_stream_iterator_ = dftc->r_block_for_asking_->createIterator(); return dftc; } - +RetCode PhysicalDeleteFilter::GetAllSegments(stack* all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_right_) { + ret = state_.child_right_->GetAllSegments(all_segments); + } + if (NULL != state_.child_left_) { + ret = state_.child_left_->GetAllSegments(all_segments); + } + return ret; +} } /* namespace physical_operator */ } /* namespace claims */ diff --git a/physical_operator/physical_delete_filter.h b/physical_operator/physical_delete_filter.h index b19b59da6..56fddd407 100644 --- a/physical_operator/physical_delete_filter.h +++ b/physical_operator/physical_delete_filter.h @@ -33,10 +33,13 @@ #include #include #include +#include + #include "../../common/hash.h" #include "../../common/hashtable.h" #include "../utility/rdtsc.h" #include "../codegen/ExpressionGenerator.h" +#include "../common/error_define.h" #include "../physical_operator/physical_operator_base.h" #include "../physical_operator/physical_operator.h" @@ -109,10 +112,12 @@ class PhysicalDeleteFilter : public PhysicalOperator { PhysicalDeleteFilter(); virtual ~PhysicalDeleteFilter(); - bool Open(const PartitionOffset& partition_offset = 0); - bool Next(BlockStreamBase* block); - bool Close(); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset = 0); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); + bool Close(SegmentExecStatus* const exec_status); void Print(); + RetCode GetAllSegments(stack* all_segments); private: ThreadContext* CreateContext(); diff --git a/physical_operator/physical_filter.cpp b/physical_operator/physical_filter.cpp index cab7deb40..bb1903dcb 100644 --- a/physical_operator/physical_filter.cpp +++ b/physical_operator/physical_filter.cpp @@ -30,6 +30,8 @@ #include #include +#include + #include "../utility/warmup.h" #include "../utility/rdtsc.h" #include "../common/Expression/execfunc.h" @@ -41,6 +43,7 @@ #include "../codegen/ExpressionGenerator.h" #include "../common/error_no.h" #include "../common/expression/expr_node.h" +#include "../common/memory_handle.h" using claims::common::rSuccess; using claims::common::rCodegenFailed; @@ -53,6 +56,7 @@ PhysicalFilter::PhysicalFilter(State state) state_(state), generated_filter_function_(NULL), generated_filter_processing_fucntoin_(NULL) { + set_phy_oper_type(kPhysicalFilter); InitExpandedStatus(); } @@ -60,10 +64,14 @@ PhysicalFilter::PhysicalFilter() : PhysicalOperator(1, 1), generated_filter_function_(NULL), generated_filter_processing_fucntoin_(NULL) { + set_phy_oper_type(kPhysicalFilter); InitExpandedStatus(); } -PhysicalFilter::~PhysicalFilter() {} +PhysicalFilter::~PhysicalFilter() { + DELETE_PTR(state_.child_); + DELETE_PTR(state_.schema_); +} PhysicalFilter::State::State(Schema* schema, PhysicalOperatorBase* child, vector qual, unsigned block_size) : schema_(schema), child_(child), qual_(qual), block_size_(block_size) {} @@ -86,7 +94,10 @@ PhysicalFilter::State::State(Schema* schema, PhysicalOperatorBase* child, *computerFilterwithGeneratedCode. * 3)If it can't be optimized by llvm , we still choose computerFilter. */ -bool PhysicalFilter::Open(const PartitionOffset& kPartitiontOffset) { +bool PhysicalFilter::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& kPartitiontOffset) { + RETURN_IF_CANCELLED(exec_status); + // set a Synchronization point. RegisterExpandedThreadToAllBarriers(); FilterThreadContext* ftc = reinterpret_cast( @@ -130,7 +141,9 @@ bool PhysicalFilter::Open(const PartitionOffset& kPartitiontOffset) { // should null #endif } - bool ret = state_.child_->Open(kPartitiontOffset); + RETURN_IF_CANCELLED(exec_status); + + bool ret = state_.child_->Open(exec_status, kPartitiontOffset); SetReturnStatus(ret); BarrierArrive(); return GetReturnStatus(); @@ -143,29 +156,38 @@ bool PhysicalFilter::Open(const PartitionOffset& kPartitiontOffset) { * (2) block_for_asking_ is exhausted (should fetch a new block from child * and continue to process) */ -bool PhysicalFilter::Next(BlockStreamBase* block) { +bool PhysicalFilter::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* block) { + RETURN_IF_CANCELLED(exec_status); + void* tuple_from_child; void* tuple_in_block; FilterThreadContext* tc = reinterpret_cast(GetContext()); while (true) { + RETURN_IF_CANCELLED(exec_status); if (NULL == (tc->block_stream_iterator_->currentTuple())) { /* mark the block as processed by setting it empty*/ tc->block_for_asking_->setEmpty(); - if (state_.child_->Next(tc->block_for_asking_)) { + + if (state_.child_->Next(exec_status, tc->block_for_asking_)) { + RETURN_IF_CANCELLED(exec_status); + delete tc->block_stream_iterator_; tc->block_stream_iterator_ = tc->block_for_asking_->createIterator(); } else { - if (!block->Empty()) + if (!block->Empty()) { return true; - else + } else { return false; + } } } ProcessInLogic(block, tc); - if (block->Full()) + if (block->Full()) { // for case (1) return true; + } } } @@ -210,8 +232,9 @@ void PhysicalFilter::ProcessInLogic(BlockStreamBase* block, } #endif #else - pass_filter = tc->thread_condi_[0]->MoreExprEvaluate( - tc->thread_condi_, tuple_from_child, state_.schema_); + tc->expr_eval_cnxt_.tuple[0] = tuple_from_child; + pass_filter = tc->thread_condi_[0]->MoreExprEvaluate(tc->thread_condi_, + tc->expr_eval_cnxt_); #endif if (pass_filter) { const unsigned bytes = @@ -232,10 +255,10 @@ void PhysicalFilter::ProcessInLogic(BlockStreamBase* block, } } -bool PhysicalFilter::Close() { +bool PhysicalFilter::Close(SegmentExecStatus* const exec_status) { InitExpandedStatus(); DestoryAllContext(); - state_.child_->Close(); + state_.child_->Close(exec_status); return true; } @@ -308,6 +331,7 @@ ThreadContext* PhysicalFilter::CreateContext() { ftc->temp_block_ = BlockStreamBase::createBlock(state_.schema_, state_.block_size_); ftc->block_stream_iterator_ = ftc->block_for_asking_->createIterator(); + ftc->expr_eval_cnxt_.schema[0] = state_.schema_; #ifdef NEWCONDI ftc->thread_qual_ = state_.qual_; for (int i = 0; i < state_.qual_.size(); i++) { @@ -336,6 +360,12 @@ int PhysicalFilter::DecideFilterFunction( return rCodegenFailed; } } - +RetCode PhysicalFilter::GetAllSegments(stack* all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_) { + ret = state_.child_->GetAllSegments(all_segments); + } + return ret; +} } // namespace claims } // namespace physical_operator diff --git a/physical_operator/physical_filter.h b/physical_operator/physical_filter.h index c4c68914e..962e23957 100644 --- a/physical_operator/physical_filter.h +++ b/physical_operator/physical_filter.h @@ -28,6 +28,9 @@ #ifndef PHYSICAL_OPERATOR_PHYSICAL_FILTER_H_ #define PHYSICAL_OPERATOR_PHYSICAL_FILTER_H_ +#include + +#include "../common/error_define.h" #define GLOG_NO_ABBREVIATED_SEVERITIES #include @@ -51,7 +54,9 @@ #include "../codegen/ExpressionGenerator.h" #include "../common/error_no.h" #include "../common/expression/expr_node.h" +using claims::common::ExprEvalCnxt; using claims::common::ExprNode; + namespace claims { namespace physical_operator { /** @@ -67,6 +72,7 @@ class PhysicalFilter : public PhysicalOperator { BlockStreamBase::BlockStreamTraverseIterator* block_stream_iterator_; vector thread_qual_; vector thread_condi_; + ExprEvalCnxt expr_eval_cnxt_; ~FilterThreadContext(); }; @@ -119,18 +125,20 @@ class PhysicalFilter : public PhysicalOperator { /** * @brief: choose which way to generate filter function */ - bool Open(const PartitionOffset& kPartitionOffset); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& kPartitionOffset); /** * @brief: fetch a block from child and execute ProcessInLogic */ - bool Next(BlockStreamBase* block); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); /** * @brief: revoke resource */ - bool Close(); + bool Close(SegmentExecStatus* const exec_status); void Print(); + RetCode GetAllSegments(stack* all_segments); private: /** diff --git a/physical_operator/physical_hash_join.cpp b/physical_operator/physical_hash_join.cpp index b098dfd9c..708d65b0e 100755 --- a/physical_operator/physical_hash_join.cpp +++ b/physical_operator/physical_hash_join.cpp @@ -32,11 +32,16 @@ #include "../physical_operator/physical_hash_join.h" #include +#include + #include "../codegen/ExpressionGenerator.h" +#include "../common/expression/expr_node.h" #include "../Config.h" #include "../Executor/expander_tracker.h" #include "../utility/rdtsc.h" +using claims::common::ExprNode; + // #define _DEBUG_ namespace claims { @@ -50,6 +55,7 @@ PhysicalHashJoin::PhysicalHashJoin(State state) eftt_(0), memcpy_(0), memcat_(0) { + set_phy_oper_type(kPhysicalHashJoin); // sema_open_.set_value(1); InitExpandedStatus(); } @@ -61,19 +67,34 @@ PhysicalHashJoin::PhysicalHashJoin() eftt_(0), memcpy_(0), memcat_(0) { + set_phy_oper_type(kPhysicalHashJoin); + // sema_open_.set_value(1); InitExpandedStatus(); } -PhysicalHashJoin::~PhysicalHashJoin() {} +PhysicalHashJoin::~PhysicalHashJoin() { + if (NULL != state_.child_right_) { + delete state_.child_right_; + state_.child_right_ = NULL; + } + if (NULL != state_.child_left_) { + delete state_.child_left_; + state_.child_left_ = NULL; + } + for (int i = 0; i < state_.join_condi_.size(); ++i) { + DELETE_PTR(state_.join_condi_[i]); + } + state_.join_condi_.clear(); +} PhysicalHashJoin::State::State( PhysicalOperatorBase* child_left, PhysicalOperatorBase* child_right, Schema* input_schema_left, Schema* input_schema_right, Schema* output_schema, Schema* ht_schema, std::vector joinIndex_left, std::vector joinIndex_right, - std::vector payload_left, std::vector payload_right, - unsigned ht_nbuckets, unsigned ht_bucketsize, unsigned block_size) + unsigned ht_nbuckets, unsigned ht_bucketsize, unsigned block_size, + vector join_condi) : child_left_(child_left), child_right_(child_right), input_schema_left_(input_schema_left), @@ -82,17 +103,19 @@ PhysicalHashJoin::State::State( hashtable_schema_(ht_schema), join_index_left_(joinIndex_left), join_index_right_(joinIndex_right), - payload_left_(payload_left), - payload_right_(payload_right), hashtable_bucket_num_(ht_nbuckets), hashtable_bucket_size_(ht_bucketsize), - block_size_(block_size) {} + block_size_(block_size), + join_condi_(join_condi) {} -bool PhysicalHashJoin::Open(const PartitionOffset& partition_offset) { +bool PhysicalHashJoin::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset) { #ifdef TIME startTimer(&timer); #endif + RETURN_IF_CANCELLED(exec_status); + RegisterExpandedThreadToAllBarriers(); unsigned long long int timer; @@ -101,29 +124,18 @@ bool PhysicalHashJoin::Open(const PartitionOffset& partition_offset) { winning_thread = true; ExpanderTracker::getInstance()->addNewStageEndpoint( pthread_self(), LocalStageEndPoint(stage_desc, "Hash join build", 0)); - unsigned output_index = 0; - for (unsigned i = 0; i < state_.join_index_left_.size(); i++) { - join_index_left_to_output_[i] = output_index; - output_index++; - } - for (unsigned i = 0; i < state_.payload_left_.size(); i++) { - payload_left_to_output_[i] = output_index; - output_index++; - } - for (unsigned i = 0; i < state_.payload_right_.size(); i++) { - payload_right_to_output_[i] = output_index; - output_index++; - } hash_func_ = PartitionFunctionFactory::createBoostHashFunction( state_.hashtable_bucket_num_); unsigned long long hash_table_build = curtick(); hashtable_ = new BasicHashTable( state_.hashtable_bucket_num_, state_.hashtable_bucket_size_, state_.input_schema_left_->getTupleMaxSize()); + #ifdef _DEBUG_ consumed_tuples_from_left = 0; #endif +#ifdef CodeGen QNode* expr = createEqualJoinExpression( state_.hashtable_schema_, state_.input_schema_right_, state_.join_index_left_, state_.join_index_right_); @@ -144,6 +156,7 @@ bool PhysicalHashJoin::Open(const PartitionOffset& partition_offset) { LOG(INFO) << "Codegen(Join) failed!" << endl; } delete expr; +#endif } /** @@ -156,7 +169,7 @@ bool PhysicalHashJoin::Open(const PartitionOffset& partition_offset) { * serialization, then continue processing. Tong */ LOG(INFO) << "join operator begin to open left child" << endl; - state_.child_left_->Open(partition_offset); + state_.child_left_->Open(exec_status, partition_offset); LOG(INFO) << "join operator finished opening left child" << endl; BarrierArrive(0); BasicHashTable::Iterator tmp_it = hashtable_->CreateIterator(); @@ -179,9 +192,12 @@ bool PhysicalHashJoin::Open(const PartitionOffset& partition_offset) { unsigned long long int start = curtick(); unsigned long long int processed_tuple_count = 0; + RETURN_IF_CANCELLED(exec_status); LOG(INFO) << "join operator begin to call left child's next()" << endl; - while (state_.child_left_->Next(jtc->l_block_for_asking_)) { + while (state_.child_left_->Next(exec_status, jtc->l_block_for_asking_)) { + RETURN_IF_CANCELLED(exec_status); + delete jtc->l_block_stream_iterator_; jtc->l_block_stream_iterator_ = jtc->l_block_for_asking_->createIterator(); while (cur = jtc->l_block_stream_iterator_->nextTuple()) { @@ -200,6 +216,8 @@ bool PhysicalHashJoin::Open(const PartitionOffset& partition_offset) { } jtc->l_block_for_asking_->setEmpty(); } + DELETE_PTR(input_schema); + DELETE_PTR(oper); #ifdef _DEBUG_ tuples_in_hashtable = 0; @@ -213,12 +231,15 @@ bool PhysicalHashJoin::Open(const PartitionOffset& partition_offset) { } BarrierArrive(1); - state_.child_right_->Open(partition_offset); + state_.child_right_->Open(exec_status, partition_offset); LOG(INFO) << "join operator finished opening right child" << endl; return true; } -bool PhysicalHashJoin::Next(BlockStreamBase* block) { +bool PhysicalHashJoin::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* block) { + RETURN_IF_CANCELLED(exec_status); + void* result_tuple = NULL; void* tuple_from_right_child; void* tuple_in_hashtable; @@ -244,6 +265,8 @@ bool PhysicalHashJoin::Next(BlockStreamBase* block) { * send was full, so we need hashtable_iterator_ preserved. */ while (true) { + RETURN_IF_CANCELLED(exec_status); + while (NULL != (tuple_from_right_child = jtc->r_block_stream_iterator_->currentTuple())) { unsigned bn = @@ -255,10 +278,14 @@ bool PhysicalHashJoin::Next(BlockStreamBase* block) { while (NULL != (tuple_in_hashtable = jtc->hashtable_iterator_.readCurrent())) { +#ifdef CodeGen cff_(tuple_in_hashtable, tuple_from_right_child, &key_exit, state_.join_index_left_, state_.join_index_right_, state_.hashtable_schema_, state_.input_schema_right_, eftt_); - +#else + key_exit = + JoinCondiProcess(tuple_in_hashtable, tuple_from_right_child, jtc); +#endif if (key_exit) { if (NULL != (result_tuple = block->allocateTuple( state_.output_schema_->getTupleMaxSize()))) { @@ -295,7 +322,8 @@ bool PhysicalHashJoin::Next(BlockStreamBase* block) { } jtc->r_block_for_asking_->setEmpty(); jtc->hashtable_iterator_ = hashtable_->CreateIterator(); - if (state_.child_right_->Next(jtc->r_block_for_asking_) == false) { + if (state_.child_right_->Next(exec_status, jtc->r_block_for_asking_) == + false) { if (block->Empty() == true) { return false; } else { @@ -317,7 +345,7 @@ bool PhysicalHashJoin::Next(BlockStreamBase* block) { } } -bool PhysicalHashJoin::Close() { +bool PhysicalHashJoin::Close(SegmentExecStatus* const exec_status) { #ifdef TIME stopTimer(&timer); LOG(INFO) << "time consuming: " << timer << ", " @@ -327,9 +355,12 @@ bool PhysicalHashJoin::Close() { << "tuples from left child!" << endl; InitExpandedStatus(); DestoryAllContext(); - delete hashtable_; - state_.child_left_->Close(); - state_.child_right_->Close(); + if (NULL != hashtable_) { + delete hashtable_; + hashtable_ = NULL; + } + state_.child_left_->Close(exec_status); + state_.child_right_->Close(exec_status); return true; } @@ -374,12 +405,29 @@ inline void PhysicalHashJoin::IsMatchCodegen( Schema* l_schema, Schema* r_schema, ExprFuncTwoTuples func) { func(l_tuple_addr, r_tuple_addr, return_addr); } - +inline bool PhysicalHashJoin::JoinCondiProcess(void* tuple_left, + void* tuple_right, + JoinThreadContext* const hjtc) { + hjtc->expr_eval_cnxt_.tuple[0] = tuple_left; + hjtc->expr_eval_cnxt_.tuple[1] = tuple_right; + bool pass = false; + for (int i = 0; i < hjtc->join_condi_.size(); ++i) { + pass = *(bool*)(hjtc->join_condi_[i]->ExprEvaluate(hjtc->expr_eval_cnxt_)); + if (pass == false) { + return false; + } + } + return true; +} PhysicalHashJoin::JoinThreadContext::~JoinThreadContext() { delete l_block_for_asking_; delete l_block_stream_iterator_; delete r_block_for_asking_; delete r_block_stream_iterator_; + for (int i = 0; i < join_condi_.size(); ++i) { + DELETE_PTR(join_condi_[i]); + } + join_condi_.clear(); } ThreadContext* PhysicalHashJoin::CreateContext() { @@ -390,9 +438,25 @@ ThreadContext* PhysicalHashJoin::CreateContext() { jtc->r_block_for_asking_ = BlockStreamBase::createBlock( state_.input_schema_right_, state_.block_size_); jtc->r_block_stream_iterator_ = jtc->r_block_for_asking_->createIterator(); - + ExprNode* new_node = NULL; + for (int i = 0; i < state_.join_condi_.size(); ++i) { + new_node = state_.join_condi_[i]->ExprCopy(); + new_node->InitExprAtPhysicalPlan(); + jtc->join_condi_.push_back(new_node); + } + jtc->expr_eval_cnxt_.schema[0] = state_.input_schema_left_; + jtc->expr_eval_cnxt_.schema[1] = state_.input_schema_right_; return jtc; } - +RetCode PhysicalHashJoin::GetAllSegments(stack* all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_right_) { + ret = state_.child_right_->GetAllSegments(all_segments); + } + if (NULL != state_.child_left_) { + ret = state_.child_left_->GetAllSegments(all_segments); + } + return ret; +} } // namespace physical_operator } // namespace claims diff --git a/physical_operator/physical_hash_join.h b/physical_operator/physical_hash_join.h index 736a46e83..86f4f7e72 100755 --- a/physical_operator/physical_hash_join.h +++ b/physical_operator/physical_hash_join.h @@ -38,14 +38,20 @@ #include #include #include +#include + #include "../Debug.h" #include "../utility/rdtsc.h" #include "../common/hash.h" #include "../common/hashtable.h" #include "../codegen/ExpressionGenerator.h" +#include "../common/error_define.h" #include "../common/expression/expr_node.h" #include "../physical_operator/physical_operator_base.h" #include "../physical_operator/physical_operator.h" +using claims::common::ExprNode; +using claims::common::ExprEvalCnxt; + namespace claims { namespace physical_operator { @@ -63,6 +69,8 @@ class PhysicalHashJoin : public PhysicalOperator { BlockStreamBase* r_block_for_asking_; BlockStreamBase::BlockStreamTraverseIterator* r_block_stream_iterator_; BasicHashTable::Iterator hashtable_iterator_; + std::vector join_condi_; + ExprEvalCnxt expr_eval_cnxt_; }; class State { @@ -77,18 +85,17 @@ class PhysicalHashJoin : public PhysicalOperator { Schema* input_schema_left, Schema* input_schema_right, Schema* output_schema, Schema* ht_schema, std::vector joinIndex_left, - std::vector joinIndex_right, - std::vector payload_left, - std::vector payload_right, unsigned ht_nbuckets, - unsigned ht_bucketsize, unsigned block_size); - State(){}; + std::vector joinIndex_right, unsigned ht_nbuckets, + unsigned ht_bucketsize, unsigned block_size, + vector join_condi); + State() {} friend class boost::serialization::access; template void serialize(Archive& ar, const unsigned int version) { ar& child_left_& child_right_& input_schema_left_& input_schema_right_& output_schema_& hashtable_schema_& join_index_left_& - join_index_right_& payload_left_& payload_right_& - hashtable_bucket_num_& hashtable_bucket_size_& block_size_; + join_index_right_& hashtable_bucket_num_& hashtable_bucket_size_& + block_size_& join_condi_; } public: @@ -100,9 +107,7 @@ class PhysicalHashJoin : public PhysicalOperator { // how to join std::vector join_index_left_; std::vector join_index_right_; - std::vector payload_left_; - std::vector payload_right_; - + std::vector join_condi_; // hashtable unsigned hashtable_bucket_num_; unsigned hashtable_bucket_size_; @@ -122,7 +127,8 @@ class PhysicalHashJoin : public PhysicalOperator { * partiton the function operates on. * @return true in all cases. */ - bool Open(const PartitionOffset& partition_offset = 0); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset = 0); /** * @brief Method description: Get tuples from right child, use algorithm to * find whether there's a left tuple that matches @@ -133,14 +139,15 @@ class PhysicalHashJoin : public PhysicalOperator { * @return false if there's no tuple to function and the block is empty, * otherwise true. */ - bool Next(BlockStreamBase* block); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); /** * @brief Method description: Initialize thread status, destroy contexts, * delete hashtable, and close childs. * @return true. */ - bool Close(); + bool Close(SegmentExecStatus* const exec_status); void Print(); + RetCode GetAllSegments(stack* all_segments); private: /** @@ -166,14 +173,11 @@ class PhysicalHashJoin : public PhysicalOperator { vector& r_join_index, Schema* l_schema, Schema* r_schema, ExprFuncTwoTuples func); // static void copy_to_hashtable(void* desc, void* src, Schema* ); + bool JoinCondiProcess(void* tuple_left, void* tuple_right, + JoinThreadContext* const jtc); + private: State state_; - /* joinIndex map to the output*/ - std::map join_index_left_to_output_; - /* payload_left map to the output*/ - std::map payload_left_to_output_; - /* payload_right map to the output*/ - std::map payload_right_to_output_; PartitionFunction* hash_func_; BasicHashTable* hashtable_; diff --git a/physical_operator/physical_limit.cpp b/physical_operator/physical_limit.cpp index 590ef281f..d3b76405e 100644 --- a/physical_operator/physical_limit.cpp +++ b/physical_operator/physical_limit.cpp @@ -27,14 +27,23 @@ */ #include "../physical_operator/physical_limit.h" +#include "../common/memory_handle.h" +#include +#include "../common/error_define.h" +#include "../physical_operator/physical_operator_base.h" #include "../utility/rdtsc.h" +using claims::common::rSuccess; namespace claims { namespace physical_operator { -PhysicalLimit::PhysicalLimit() : received_tuples_(0), block_for_asking_(NULL) {} +PhysicalLimit::PhysicalLimit() : received_tuples_(0), block_for_asking_(NULL) { + set_phy_oper_type(kPhysicalLimit); +} PhysicalLimit::PhysicalLimit(State state) - : state_(state), received_tuples_(0), block_for_asking_(NULL) {} + : state_(state), received_tuples_(0), block_for_asking_(NULL) { + set_phy_oper_type(kPhysicalLimit); +} PhysicalLimit::State::State(Schema* schema, PhysicalOperatorBase* child, unsigned long limits, unsigned block_size, @@ -53,12 +62,17 @@ PhysicalLimit::~PhysicalLimit() { /** * Initialize of the position of current tuple and target tuple */ -bool PhysicalLimit::Open(const PartitionOffset& kPartitionOffset) { +bool PhysicalLimit::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& kPartitionOffset) { + RETURN_IF_CANCELLED(exec_status); + tuple_cur_ = 0; block_for_asking_ = BlockStreamBase::createBlock(state_.schema_, state_.block_size_); received_tuples_ = 0; - return state_.child_->Open(kPartitionOffset); + RETURN_IF_CANCELLED(exec_status); + + return state_.child_->Open(exec_status, kPartitionOffset); } /** * if the limit has already been exhausted, the current loop breaks @@ -69,11 +83,19 @@ bool PhysicalLimit::Open(const PartitionOffset& kPartitionOffset) { // that the limit is exhausted is not necessary. However, in the current // implementation, the child iterator sub-tree leaded by exchange // lower iterator cannot be closed if not all the blocks are called. -bool PhysicalLimit::Next(BlockStreamBase* block) { - while (state_.child_->Next(block_for_asking_)) { +// if the size of the result after limit overflow one block, error may occur +// (fzh) +bool PhysicalLimit::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* block) { + RETURN_IF_CANCELLED(exec_status); + BlockStreamBase::BlockStreamTraverseIterator* it = NULL; + + while (state_.child_->Next(exec_status, block_for_asking_)) { + RETURN_IF_CANCELLED(exec_status); + void* tuple_from_child; - BlockStreamBase::BlockStreamTraverseIterator* it = - block_for_asking_->createIterator(); + DELETE_PTR(it); + it = block_for_asking_->createIterator(); while (NULL != (tuple_from_child = it->currentTuple())) { if (!LimitExhausted()) { if (!ShouldSkip()) { @@ -86,7 +108,7 @@ bool PhysicalLimit::Next(BlockStreamBase* block) { tuple_cur_++; it->increase_cur_(); } else { - it->~BlockStreamTraverseIterator(); + DELETE_PTR(it); return true; } } else { @@ -103,8 +125,8 @@ bool PhysicalLimit::Next(BlockStreamBase* block) { return !block->Empty(); } -bool PhysicalLimit::Close() { - state_.child_->Close(); +bool PhysicalLimit::Close(SegmentExecStatus* const exec_status) { + state_.child_->Close(exec_status); block_for_asking_->~BlockStreamBase(); return true; } @@ -114,6 +136,12 @@ void PhysicalLimit::Print() { state_.limit_tuples_); state_.child_->Print(); } - +RetCode PhysicalLimit::GetAllSegments(stack* all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_) { + ret = state_.child_->GetAllSegments(all_segments); + } + return ret; +} } // namespace physical_operator } // namespace claims diff --git a/physical_operator/physical_limit.h b/physical_operator/physical_limit.h index 73f3bcb85..6f8809bb6 100644 --- a/physical_operator/physical_limit.h +++ b/physical_operator/physical_limit.h @@ -29,6 +29,8 @@ #ifndef PHYSICAL_OPERATOR_PHYSICAL_LIMIT_H_ #define PHYSICAL_OPERATOR_PHYSICAL_LIMIT_H_ +#include +#include "../common/error_define.h" #include "../physical_operator/physical_operator_base.h" namespace claims { @@ -71,20 +73,22 @@ class PhysicalLimit : public PhysicalOperatorBase { * @brief Method description: Initialize the position of current tuple and * target tuple */ - bool Open(const PartitionOffset& kPartitionOffset); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& kPartitionOffset); /** * @brief Method description:find limit_tuple tuples from start_position and * return them * @return : given tuples. */ - bool Next(BlockStreamBase* block); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); /** * @brief Method description: revoke resource */ - bool Close(); + bool Close(SegmentExecStatus* const exec_status); void Print(); + RetCode GetAllSegments(stack* all_segments); private: /** diff --git a/physical_operator/physical_nest_loop_join.cpp b/physical_operator/physical_nest_loop_join.cpp index c6c1659cf..816e0cc6d 100644 --- a/physical_operator/physical_nest_loop_join.cpp +++ b/physical_operator/physical_nest_loop_join.cpp @@ -26,38 +26,58 @@ * */ +#include +#include #include "../physical_operator/physical_nest_loop_join.h" - #include "../Executor/expander_tracker.h" #include "../common/Block/BlockStream.h" +#include "../physical_operator/physical_operator_base.h" +#include "../common/expression/expr_node.h" +#include "../common/memory_handle.h" +using claims::common::ExprNode; #define GLOG_NO_ABBREVIATED_SEVERITIES #include "../common/log/logging.h" namespace claims { namespace physical_operator { -PhysicalNestLoopJoin::PhysicalNestLoopJoin() : PhysicalOperator(2, 2) { +PhysicalNestLoopJoin::PhysicalNestLoopJoin() + : PhysicalOperator(2, 2), block_buffer_(NULL), join_condi_process_(NULL) { + set_phy_oper_type(kPhysicalNestLoopJoin); InitExpandedStatus(); } PhysicalNestLoopJoin::~PhysicalNestLoopJoin() { - // TODO Auto-generated destructor stub + DELETE_PTR(state_.child_left_); + DELETE_PTR(state_.child_right_); + // DELETE_PTR(state_.input_schema_left_); + // DELETE_PTR(state_.input_schema_right_); + // for (int i = 0; i < state_.join_condi_.size(); ++i) { + // DELETE_PTR(state_.join_condi_[i]); + // } + // state_.join_condi_.clear(); } PhysicalNestLoopJoin::PhysicalNestLoopJoin(State state) - : PhysicalOperator(2, 2), state_(state) { + : PhysicalOperator(2, 2), + state_(state), + block_buffer_(NULL), + join_condi_process_(NULL) { + set_phy_oper_type(kPhysicalNestLoopJoin); InitExpandedStatus(); } PhysicalNestLoopJoin::State::State(PhysicalOperatorBase *child_left, PhysicalOperatorBase *child_right, Schema *input_schema_left, Schema *input_schema_right, - Schema *output_schema, unsigned block_size) + Schema *output_schema, unsigned block_size, + std::vector join_condi) : child_left_(child_left), child_right_(child_right), input_schema_left_(input_schema_left), input_schema_right_(input_schema_right), output_schema_(output_schema), - block_size_(block_size) {} + block_size_(block_size), + join_condi_(join_condi) {} /** * @brief Method description : describe the open method which gets results from @@ -65,7 +85,10 @@ PhysicalNestLoopJoin::State::State(PhysicalOperatorBase *child_left, * block buffer is a dynamic block buffer since all the expanded threads will * share the same block buffer. */ -bool PhysicalNestLoopJoin::Open(const PartitionOffset &partition_offset) { +bool PhysicalNestLoopJoin::Open(SegmentExecStatus *const exec_status, + const PartitionOffset &partition_offset) { + RETURN_IF_CANCELLED(exec_status); + RegisterExpandedThreadToAllBarriers(); unsigned long long int timer; bool winning_thread = false; @@ -75,21 +98,63 @@ bool PhysicalNestLoopJoin::Open(const PartitionOffset &partition_offset) { winning_thread = true; timer = curtick(); block_buffer_ = new DynamicBlockBuffer(); + if (state_.join_condi_.size() == 0) { + join_condi_process_ = WithoutJoinCondi; + } else { + join_condi_process_ = WithJoinCondi; + } LOG(INFO) << "[NestloopJoin]: [the first thread opens the nestloopJoin " "physical operator]" << std::endl; } - state_.child_left_->Open(partition_offset); + RETURN_IF_CANCELLED(exec_status); + + state_.child_left_->Open(exec_status, partition_offset); + RETURN_IF_CANCELLED(exec_status); + BarrierArrive(0); - NestLoopJoinContext *jtc = new NestLoopJoinContext(); + + NestLoopJoinContext *jtc = CreateOrReuseContext(crm_numa_sensitive); // create a new block to hold the results from the left child // and add results to the dynamic buffer + // jtc->block_for_asking_ == BlockStreamBase::createBlock( + // state_.input_schema_left_, + // state_.block_size_); CreateBlockStream(jtc->block_for_asking_, state_.input_schema_left_); - while (state_.child_left_->Next(jtc->block_for_asking_)) { + // auto temp = jtc->block_for_asking_->getBlock(); + // cout << "temp start" << temp << endl; + // + // cout << "init block_for_asking_ : " << jtc->block_for_asking_->getBlock() + // << " is reference : " << jtc->block_for_asking_->isIsReference() << + // endl; + while (state_.child_left_->Next(exec_status, jtc->block_for_asking_)) { + if (exec_status->is_cancelled()) { + if (NULL != jtc->block_for_asking_) { + delete jtc->block_for_asking_; + jtc->block_for_asking_ = NULL; + } + return false; + } + // cout << "after assgin start :" << jtc->block_for_asking_->getBlock() + // << " is reference : " << jtc->block_for_asking_->isIsReference() + // << endl; block_buffer_->atomicAppendNewBlock(jtc->block_for_asking_); + // if (!jtc->block_for_asking_->isIsReference()) { CreateBlockStream(jtc->block_for_asking_, state_.input_schema_left_); + // } else { + // // cout << "temp after" << temp << endl; + // // delete temp; + // CreateBlockStream(jtc->block_for_asking_, + // state_.input_schema_left_); + // jtc->block_for_asking_->setIsReference(false); + // } + // cout << "new start :" << jtc->block_for_asking_->getBlock() + // << " is reference : " << jtc->block_for_asking_->isIsReference() + // << endl; } + // cout << "buffer_size_ : " << block_buffer_->GetBufferSize() << endl; // the last block is created without storing the results from the left // child + if (NULL != jtc->block_for_asking_) { delete jtc->block_for_asking_; jtc->block_for_asking_ = NULL; @@ -106,7 +171,11 @@ bool PhysicalNestLoopJoin::Open(const PartitionOffset &partition_offset) { } BarrierArrive(1); // ??ERROR // join_thread_context* jtc=new join_thread_context(); + // jtc->block_for_asking_ == BlockStreamBase::createBlock( + // state_.input_schema_right_, + // state_.block_size_); CreateBlockStream(jtc->block_for_asking_, state_.input_schema_right_); + jtc->block_for_asking_->setEmpty(); jtc->block_stream_iterator_ = jtc->block_for_asking_->createIterator(); jtc->buffer_iterator_ = block_buffer_->createIterator(); @@ -117,13 +186,13 @@ bool PhysicalNestLoopJoin::Open(const PartitionOffset &partition_offset) { InitContext(jtc); // rename this function, here means to store the thread // context in the operator context - if (block_buffer_->GetBufferSize() > 0) { - state_.child_right_->Open(partition_offset); - } + RETURN_IF_CANCELLED(exec_status); + state_.child_right_->Open(exec_status, partition_offset); return true; } -bool PhysicalNestLoopJoin::Next(BlockStreamBase *block) { +bool PhysicalNestLoopJoin::Next(SegmentExecStatus *const exec_status, + BlockStreamBase *block) { /** * @brief it describes the sequence of the nestloop join. As the intermediate * result of the left child has been stored in the dynamic block buffer in the @@ -137,36 +206,51 @@ bool PhysicalNestLoopJoin::Next(BlockStreamBase *block) { * @ return * @details   (additional) */ + RETURN_IF_CANCELLED(exec_status); + void *tuple_from_buffer_child = NULL; void *tuple_from_right_child = NULL; void *result_tuple = NULL; + bool pass = false; BlockStreamBase *buffer_block = NULL; NestLoopJoinContext *jtc = reinterpret_cast(GetContext()); while (1) { + RETURN_IF_CANCELLED(exec_status); + while (NULL != (tuple_from_right_child = jtc->block_stream_iterator_->currentTuple())) { while (1) { while (NULL != (tuple_from_buffer_child = jtc->buffer_stream_iterator_->currentTuple())) { - if (NULL != (result_tuple = block->allocateTuple( - state_.output_schema_->getTupleMaxSize()))) { - const unsigned copyed_bytes = state_.input_schema_left_->copyTuple( - tuple_from_buffer_child, result_tuple); - state_.input_schema_right_->copyTuple( - tuple_from_right_child, - reinterpret_cast(result_tuple + copyed_bytes)); - } else { - // LOG(INFO) << "[NestloopJoin]: [a block of the result - // is full of " - // "the nest loop join result ]" << - // std::endl; - return true; + pass = join_condi_process_(tuple_from_buffer_child, + tuple_from_right_child, jtc); + if (pass) { + if (NULL != (result_tuple = block->allocateTuple( + state_.output_schema_->getTupleMaxSize()))) { + const unsigned copyed_bytes = + state_.input_schema_left_->copyTuple(tuple_from_buffer_child, + result_tuple); + state_.input_schema_right_->copyTuple( + tuple_from_right_child, + reinterpret_cast(result_tuple + copyed_bytes)); + } else { + // LOG(INFO) << "[NestloopJoin]: [a block of the + // result + // is full of " + // "the nest loop join result ]" << + // std::endl; + return true; + } } jtc->buffer_stream_iterator_->increase_cur_(); } - jtc->buffer_stream_iterator_->~BlockStreamTraverseIterator(); + // jtc->buffer_stream_iterator_->~BlockStreamTraverseIterator(); + if (jtc->buffer_stream_iterator_ != NULL) { + delete jtc->buffer_stream_iterator_; + jtc->buffer_stream_iterator_ = NULL; + } if (NULL != (buffer_block = jtc->buffer_iterator_.nextBlock())) { jtc->buffer_stream_iterator_ = buffer_block->createIterator(); } else { @@ -182,6 +266,10 @@ bool PhysicalNestLoopJoin::Next(BlockStreamBase *block) { false && "[NestloopJoin]: this block shouldn't be NULL in nest loop join!"); } + if (jtc->buffer_stream_iterator_ != NULL) { + delete jtc->buffer_stream_iterator_; + jtc->buffer_stream_iterator_ = NULL; + } jtc->buffer_stream_iterator_ = buffer_block->createIterator(); jtc->block_stream_iterator_->increase_cur_(); } @@ -189,14 +277,24 @@ bool PhysicalNestLoopJoin::Next(BlockStreamBase *block) { // if buffer is empty, return false directly jtc->buffer_iterator_.ResetCur(); if (NULL == (buffer_block = jtc->buffer_iterator_.nextBlock())) { - LOG(INFO) << "[NestloopJoin]: the buffer is empty in nest loop join!"; + LOG(WARNING) << "[NestloopJoin]: the buffer is empty in nest loop join!"; + // for getting all right child's data + jtc->block_for_asking_->setEmpty(); + while (state_.child_right_->Next(exec_status, jtc->block_for_asking_)) { + jtc->block_for_asking_->setEmpty(); + } return false; } + if (jtc->buffer_stream_iterator_ != NULL) { + delete jtc->buffer_stream_iterator_; + jtc->buffer_stream_iterator_ = NULL; + } jtc->buffer_stream_iterator_ = buffer_block->createIterator(); // ask block from right child jtc->block_for_asking_->setEmpty(); - if (false == state_.child_right_->Next(jtc->block_for_asking_)) { + if (false == + state_.child_right_->Next(exec_status, jtc->block_for_asking_)) { if (true == block->Empty()) { LOG(WARNING) << "[NestloopJoin]: [no join result is stored in the " "block after traverse the right child operator]" @@ -208,23 +306,40 @@ bool PhysicalNestLoopJoin::Next(BlockStreamBase *block) { return true; } } - jtc->block_stream_iterator_->~BlockStreamTraverseIterator(); + if (jtc->block_stream_iterator_ != NULL) { + delete jtc->block_stream_iterator_; + jtc->block_stream_iterator_ = NULL; + } jtc->block_stream_iterator_ = jtc->block_for_asking_->createIterator(); } - return Next(block); + return Next(exec_status, block); } -bool PhysicalNestLoopJoin::Close() { +bool PhysicalNestLoopJoin::Close(SegmentExecStatus *const exec_status) { InitExpandedStatus(); DestoryAllContext(); - if (NULL != block_buffer_) { - delete block_buffer_; - block_buffer_ = NULL; + DELETE_PTR(block_buffer_); + state_.child_left_->Close(exec_status); + state_.child_right_->Close(exec_status); + return true; +} +bool PhysicalNestLoopJoin::WithJoinCondi(void *tuple_left, void *tuple_right, + NestLoopJoinContext *const nljcnxt) { + nljcnxt->expr_eval_cnxt_.tuple[0] = tuple_left; + nljcnxt->expr_eval_cnxt_.tuple[1] = tuple_right; + bool pass = false; + for (int i = 0; i < nljcnxt->join_condi_.size(); ++i) { + pass = *(bool *)(nljcnxt->join_condi_[i]->ExprEvaluate( + nljcnxt->expr_eval_cnxt_)); + if (pass == false) { + return false; + } } - state_.child_left_->Close(); - state_.child_right_->Close(); return true; } - +bool PhysicalNestLoopJoin::WithoutJoinCondi( + void *tuple_left, void *tuple_right, NestLoopJoinContext *const nljcnxt) { + return true; +} /** * @brief Method description : create a block buffer based on the given left * or right input schema @@ -233,7 +348,11 @@ bool PhysicalNestLoopJoin::Close() { */ bool PhysicalNestLoopJoin::CreateBlockStream(BlockStreamBase *&target, Schema *&schema) const { + // if (target->isIsReference()) { + // target = BlockStreamBase::createBlock2(schema, state_.block_size_); + // } else { target = BlockStreamBase::createBlock(schema, state_.block_size_); + // } if (NULL == target) { assert(false); return false; @@ -241,7 +360,36 @@ bool PhysicalNestLoopJoin::CreateBlockStream(BlockStreamBase *&target, return true; } } - +PhysicalNestLoopJoin::NestLoopJoinContext::NestLoopJoinContext( + const vector &join_condi, const Schema *left_schema, + const Schema *right_schema) + : block_for_asking_(NULL), + block_stream_iterator_(NULL), + buffer_stream_iterator_(NULL) { + ExprNode *new_node = NULL; + for (int i = 0; i < join_condi.size(); ++i) { + new_node = join_condi[i]->ExprCopy(); + new_node->InitExprAtPhysicalPlan(); + join_condi_.push_back(new_node); + } + expr_eval_cnxt_.schema[0] = left_schema; + expr_eval_cnxt_.schema[1] = right_schema; +} +PhysicalNestLoopJoin::NestLoopJoinContext::~NestLoopJoinContext() { + DELETE_PTR(block_for_asking_); + DELETE_PTR(block_stream_iterator_); + DELETE_PTR(buffer_stream_iterator_); + for (int i = 0; i < join_condi_.size(); ++i) { + DELETE_PTR(join_condi_[i]); + } + join_condi_.clear(); +} +ThreadContext *PhysicalNestLoopJoin::CreateContext() { + NestLoopJoinContext *jtc = + new NestLoopJoinContext(state_.join_condi_, state_.input_schema_left_, + state_.input_schema_right_); + return jtc; +} void PhysicalNestLoopJoin::Print() { printf("NestLoopJoin\n"); printf("------Join Left-------\n"); @@ -249,6 +397,16 @@ void PhysicalNestLoopJoin::Print() { printf("------Join Right-------\n"); state_.child_right_->Print(); } +RetCode PhysicalNestLoopJoin::GetAllSegments(stack *all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_right_) { + ret = state_.child_right_->GetAllSegments(all_segments); + } + if (NULL != state_.child_left_) { + ret = state_.child_left_->GetAllSegments(all_segments); + } + return ret; +} } // namespace physical_operator } // namespace claims diff --git a/physical_operator/physical_nest_loop_join.h b/physical_operator/physical_nest_loop_join.h index 64b14b0e8..dea77614f 100644 --- a/physical_operator/physical_nest_loop_join.h +++ b/physical_operator/physical_nest_loop_join.h @@ -27,47 +27,33 @@ */ #ifndef PHYSICAL_OPERATOR_PHYSICAL_NEST_LOOP_JOIN_H_ #define PHYSICAL_OPERATOR_PHYSICAL_NEST_LOOP_JOIN_H_ +#include +#include #include "../physical_operator/physical_nest_loop_join.h" - #include +#include "../common/expression/expr_node.h" #include "../physical_operator/physical_operator_base.h" #include "../physical_operator/physical_operator.h" #include "../Debug.h" +using claims::common::ExprEvalCnxt; +using claims::common::ExprNode; namespace claims { namespace physical_operator { + class PhysicalNestLoopJoin : public PhysicalOperator { protected: class NestLoopJoinContext : public ThreadContext { public: + NestLoopJoinContext(const vector &join_condi, + const Schema *left_schema, const Schema *right_schema); + ~NestLoopJoinContext(); BlockStreamBase *block_for_asking_; BlockStreamBase::BlockStreamTraverseIterator *block_stream_iterator_; DynamicBlockBuffer::Iterator buffer_iterator_; BlockStreamBase::BlockStreamTraverseIterator *buffer_stream_iterator_; - }; - - struct RemainingBlock { - RemainingBlock(BlockStreamBase *bsb_right, - BlockStreamBase::BlockStreamTraverseIterator *bsti) - : bsb_right_(bsb_right), - blockstream_iterator_(bsti), - buffer_iterator_(NULL), - buffer_stream_iterator_(NULL) {} - RemainingBlock() - : bsb_right_(NULL), - blockstream_iterator_(NULL), - buffer_iterator_(NULL), - buffer_stream_iterator_(NULL) {} - RemainingBlock(const RemainingBlock &r) { - bsb_right_ = r.bsb_right_; - blockstream_iterator_ = r.blockstream_iterator_; - buffer_iterator_ = r.buffer_iterator_; - buffer_stream_iterator_ = r.buffer_stream_iterator_; - } - BlockStreamBase *bsb_right_; - BlockStreamBase::BlockStreamTraverseIterator *blockstream_iterator_; - DynamicBlockBuffer::Iterator *buffer_iterator_; - BlockStreamBase::BlockStreamTraverseIterator *buffer_stream_iterator_; + vector join_condi_; + ExprEvalCnxt expr_eval_cnxt_; }; public: @@ -77,52 +63,50 @@ class PhysicalNestLoopJoin : public PhysicalOperator { public: State(PhysicalOperatorBase *child_left, PhysicalOperatorBase *child_right, Schema *input_schema_left, Schema *input_schema_right, - Schema *output_schema, unsigned block_size); + Schema *output_schema, unsigned block_size, + std::vector join_condi); State() {} friend class boost::serialization::access; template void serialize(const Archive &ar, const unsigned int version) { ar &child_left_ &child_right_ &input_schema_left_ &input_schema_right_ & - output_schema_ &block_size_; + output_schema_ &block_size_ &join_condi_; } public: PhysicalOperatorBase *child_left_, *child_right_; Schema *input_schema_left_, *input_schema_right_; Schema *output_schema_; + std::vector join_condi_; unsigned block_size_; }; + typedef bool (*JoinCondiProcess)(void *tuple_left, void *tuple_right, + NestLoopJoinContext *const nljcnxt); public: PhysicalNestLoopJoin(); virtual ~PhysicalNestLoopJoin(); PhysicalNestLoopJoin(State state); - bool Open(const PartitionOffset &partition_offset = 0); - bool Next(BlockStreamBase *block); - bool Close(); + bool Open(SegmentExecStatus *const exec_status, + const PartitionOffset &partition_offset = 0); + bool Next(SegmentExecStatus *const exec_status, BlockStreamBase *block); + bool Close(SegmentExecStatus *const exec_status); void Print(); + RetCode GetAllSegments(stack *all_segments); + + State state_; private: + static bool WithJoinCondi(void *tuple_left, void *tuple_right, + NestLoopJoinContext *const nljcnxt); + static bool WithoutJoinCondi(void *tuple_left, void *tuple_right, + NestLoopJoinContext *const nljcnxt); bool CreateBlockStream(BlockStreamBase *&, Schema *&schema) const; - bool AtomicPopRemainingBlock(RemainingBlock &rb); - void AtomicPushRemainingBlock(RemainingBlock rb); - BlockStreamBase *AtomicPopFreeBlockStream(); - void AtomicPushFreeBlockStream(BlockStreamBase *block); - BlockStreamBase *AtomicPopFreeHtBlockStream(); - void AtomicPushFreeHtBlockStream(BlockStreamBase *block); + ThreadContext *CreateContext(); DynamicBlockBuffer *block_buffer_; - std::map joinIndex_left_to_output_; - /* payload_left map to the output*/ - std::map payload_left_to_output_; - /* payload_right map to the output*/ - std::map payload_right_to_output_; + JoinCondiProcess join_condi_process_; - State state_; - Lock lock_; - unsigned produced_tuples_; - unsigned consumed_tuples_from_right_; - unsigned consumed_tuples_from_left_; friend class boost::serialization::access; template void serialize(const Archive &ar, const unsigned int version) { diff --git a/physical_operator/physical_operator.h b/physical_operator/physical_operator.h index 0bbeefa2e..6df4faf13 100644 --- a/physical_operator/physical_operator.h +++ b/physical_operator/physical_operator.h @@ -80,9 +80,14 @@ class PhysicalOperator : public PhysicalOperatorBase { * when deserializing, and hence the following three virtual method cannot be * pure. */ - virtual bool Open(const PartitionOffset& part_off = 0) { assert(false); }; - virtual bool Next(BlockStreamBase*) { assert(false); }; - virtual bool Close() { assert(false); }; + virtual bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& part_off = 0) { + assert(false); + }; + virtual bool Next(SegmentExecStatus* const exec_status, BlockStreamBase*) { + assert(false); + }; + virtual bool Close(SegmentExecStatus* const exec_status) { assert(false); }; virtual void Print() { printf("??\n"); }; /** As different elastic iterators differs from each other in the structure of diff --git a/physical_operator/physical_operator_base.h b/physical_operator/physical_operator_base.h index 6e8de8c3a..df1eb9c90 100755 --- a/physical_operator/physical_operator_base.h +++ b/physical_operator/physical_operator_base.h @@ -25,16 +25,40 @@ #define PHYSICAL_OPERATOR_PHYSICAL_OPERATOR_BASE_H_ #include #include +#include #include #include "../common/Block/ResultSet.h" #include "../common/Block/BlockStream.h" +#include "../common/data_type.h" +#include "../common/error_define.h" #include "../common/ids.h" +#include "../exec_tracker/segment_exec_status.h" +using claims::common::rSuccess; namespace claims { namespace physical_operator { +class Segment; + /** * This is the base class for the block stream iterators. */ - +enum PhysicalOperatorType { + kPhysicalNull, + kPhysicalScan, + kPhysicalFilter, + kPhysicalAggregation, + kPhysicalHashJoin, + kPhysicalProject, + kPhysicalSort, + kPhysicalQueryPlanRoot, + kPhysicalNestLoopJoin, + kPhysicalLimit, + kPhysicalSubquery, + kPhysicalDeleteFilter, + kPhysicalExchangeMerger, + kphysicalExchangeSender, + kphysicalExpander, + kPhysicalResult +}; class PhysicalOperatorBase { public: PhysicalOperatorBase(); @@ -42,15 +66,23 @@ class PhysicalOperatorBase { static PhysicalOperatorBase* createIterator(const string& IteratorName); - virtual bool Open(const PartitionOffset& part_off = 0) = 0; - virtual bool Next(BlockStreamBase*) = 0; - virtual bool Close() = 0; - virtual void Print() { printf("??\n"); }; - + virtual bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& part_off = 0) = 0; + virtual bool Next(SegmentExecStatus* const exec_status, BlockStreamBase*) = 0; + virtual bool Close(SegmentExecStatus* const exec_status) = 0; + virtual void Print() { printf("??\n"); } + virtual RetCode GetAllSegments(stack* all_segments) { + cout << "Get All Segments error!" << endl; + return rSuccess; + } /* * Suggested by scdong, this method can be removed. */ virtual ResultSet* GetResultSet(); + void set_phy_oper_type(PhysicalOperatorType phy_oper_type) { + phy_oper_type_ = phy_oper_type; + } + PhysicalOperatorType phy_oper_type_; private: friend class boost::serialization::access; diff --git a/physical_operator/physical_outer_hash_join.cpp b/physical_operator/physical_outer_hash_join.cpp new file mode 100644 index 000000000..e17b568d5 --- /dev/null +++ b/physical_operator/physical_outer_hash_join.cpp @@ -0,0 +1,623 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/physical_operator/physical_outer_hash_join.cpp + * + * Created on: Mar 19, 2016 + * Author: yuyang + * Email: youngfish93@hotmail.com + * + * Description: + * + */ + +#include +#include +#include + +#include "../physical_operator/physical_outer_hash_join.h" +#include "../codegen/ExpressionGenerator.h" +#include "../Config.h" +#include "../Executor/expander_tracker.h" +#include "../utility/rdtsc.h" + +// #define _DEBUG_ + +namespace claims { +namespace physical_operator { + +PhysicalOuterHashJoin::PhysicalOuterHashJoin(State state) + : state_(state), + hash_func_(0), + hashtable_(0), + PhysicalOperator(barrier_number(2), serialized_section_number(1)), + eftt_(0), + memcpy_(0), + memcat_(0) { + // sema_open_.set_value(1); + InitExpandedStatus(); +} + +PhysicalOuterHashJoin::PhysicalOuterHashJoin() + : hash_func_(0), + hashtable_(0), + PhysicalOperator(barrier_number(2), serialized_section_number(1)), + eftt_(0), + memcpy_(0), + memcat_(0) { + // sema_open_.set_value(1); + InitExpandedStatus(); +} + +PhysicalOuterHashJoin::~PhysicalOuterHashJoin() { + for (int i = 0; i < state_.join_condi_.size(); ++i) { + DELETE_PTR(state_.join_condi_[i]); + } + state_.join_condi_.clear(); +} + +PhysicalOuterHashJoin::State::State( + PhysicalOperatorBase* child_left, PhysicalOperatorBase* child_right, + Schema* input_schema_left, Schema* input_schema_right, + Schema* output_schema, Schema* ht_schema, + std::vector joinIndex_left, std::vector joinIndex_right, + unsigned ht_nbuckets, unsigned ht_bucketsize, unsigned block_size, + vector join_condi, int join_type) + : child_left_(child_left), + child_right_(child_right), + input_schema_left_(input_schema_left), + input_schema_right_(input_schema_right), + output_schema_(output_schema), + hashtable_schema_(ht_schema), + join_index_left_(joinIndex_left), + join_index_right_(joinIndex_right), + hashtable_bucket_num_(ht_nbuckets), + hashtable_bucket_size_(ht_bucketsize), + block_size_(block_size), + join_condi_(join_condi), + join_type_(join_type) {} + +bool PhysicalOuterHashJoin::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset) { +#ifdef TIME + startTimer(&timer); +#endif + + RegisterExpandedThreadToAllBarriers(); + + unsigned long long int timer; + bool winning_thread = false; + if (TryEntryIntoSerializedSection(0)) { + winning_thread = true; + ExpanderTracker::getInstance()->addNewStageEndpoint( + pthread_self(), LocalStageEndPoint(stage_desc, "Hash join build", 0)); + hash_func_ = PartitionFunctionFactory::createBoostHashFunction( + state_.hashtable_bucket_num_); + unsigned long long hash_table_build = curtick(); + hashtable_ = new BasicHashTable( + state_.hashtable_bucket_num_, state_.hashtable_bucket_size_, + state_.input_schema_left_->getTupleMaxSize()); +#ifdef _DEBUG_ + consumed_tuples_from_left = 0; +#endif + +#ifdef CodeGen + QNode* expr = createEqualJoinExpression( + state_.hashtable_schema_, state_.input_schema_right_, + state_.join_index_left_, state_.join_index_right_); + ticks start = curtick(); + if (Config::enable_codegen) { + eftt_ = getExprFuncTwoTuples(expr, state_.hashtable_schema_, + state_.input_schema_right_); + memcpy_ = getMemcpy(state_.hashtable_schema_->getTupleMaxSize()); + memcat_ = getMemcat(state_.hashtable_schema_->getTupleMaxSize(), + state_.input_schema_right_->getTupleMaxSize()); + } + if (eftt_) { + cff_ = PhysicalOuterHashJoin::IsMatchCodegen; + LOG(INFO) << "Codegen(Join) succeed(" << setw(8) << fixed + << setprecision(3) << getMilliSecond(start) << endl; + } else { + cff_ = PhysicalOuterHashJoin::IsMatch; + LOG(INFO) << "Codegen(Join) failed!" << endl; + } + delete expr; +#endif + } + + /** + * For performance concern, the following line should place just after + *"RegisterNewThreadToAllBarriers();" + * in order to accelerate the open response time. + * + * I suppose not moving it before TryEntryIntoSerializedSection(0), in that + * case, all the other threads must wait until the main thread finished + * serialization, then continue processing. Tong + */ + LOG(INFO) << "join operator begin to open left child" << endl; + state_.child_left_->Open(exec_status, partition_offset); + LOG(INFO) << "join operator finished opening left child" << endl; + BarrierArrive(0); + BasicHashTable::Iterator tmp_it = hashtable_->CreateIterator(); + + void* cur; + void* tuple_in_hashtable; + unsigned bn; + + void* key_in_input; + void* key_in_hashtable; + void* value_in_input; + void* value_in_hashtable; + + JoinThreadContext* jtc = CreateOrReuseContext(crm_numa_sensitive); + + working_.acquire(); + working_threads_.insert(pthread_self()); + working_.release(); + const Schema* input_schema = state_.input_schema_left_->duplicateSchema(); + const Operate* oper = input_schema->getcolumn(state_.join_index_left_[0]) + .operate->duplicateOperator(); + const unsigned buckets = state_.hashtable_bucket_num_; + + unsigned long long int start = curtick(); + unsigned long long int processed_tuple_count = 0; + RETURN_IF_CANCELLED(exec_status); + + LOG(INFO) << "join operator begin to call left child's next()" << endl; + while (state_.child_left_->Next(exec_status, jtc->l_block_for_asking_)) { + RETURN_IF_CANCELLED(exec_status); + + delete jtc->l_block_stream_iterator_; + jtc->l_block_stream_iterator_ = jtc->l_block_for_asking_->createIterator(); + while (cur = jtc->l_block_stream_iterator_->nextTuple()) { +#ifdef _DEBUG_ + processed_tuple_count++; + lock_.acquire(); + consumed_tuples_from_left++; + lock_.release(); +#endif + const void* key_addr = + input_schema->getColumnAddess(state_.join_index_left_[0], cur); + bn = oper->getPartitionValue(key_addr, buckets); + tuple_in_hashtable = hashtable_->atomicAllocate(bn); + /* copy join index columns*/ + input_schema->copyTuple(cur, tuple_in_hashtable); + } + jtc->l_block_for_asking_->setEmpty(); + } +#ifdef _DEBUG_ + tuples_in_hashtable = 0; + produced_tuples = 0; + consumed_tuples_from_right = 0; +#endif + if (ExpanderTracker::getInstance()->isExpandedThreadCallBack( + pthread_self())) { + UnregisterExpandedThreadToAllBarriers(1); + return true; + } + + BarrierArrive(1); + state_.child_right_->Open(exec_status, partition_offset); + LOG(INFO) << "join operator finished opening right child" << endl; + return true; +} + +bool PhysicalOuterHashJoin::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* block) { + void* result_tuple = NULL; + void* tuple_from_right_child; + void* tuple_in_hashtable; + void* tuple_from_left_child; + void* key_in_input; + void* key_in_hashtable; + void* column_in_joined_tuple; + bool key_exit; + int hash_tuple_size = state_.hashtable_schema_->getTupleMaxSize(); + + JoinThreadContext* jtc = (JoinThreadContext*)GetContext(); + /** + * A specific method suitable for physical_join. + * In this case, it preserves the r_block_stream_iterator_ and + * hashtable_iterator_ status for physical_join's father to call Next() from + * the previous call. + * e.g.: Let's suppose that when physical_join's father first called Next(), + * it returned true when the block for sending was full whereas half of the + * tuples of a block from its right child was read. The next time + * physical_join's father calls Next(), it should go on operates on that + * right child block, so we need r_block_stream_iterator_ preserved, moreover + * it should go on operates on last matched tuple not sent due to block for + * send was full, so we need hashtable_iterator_ preserved. + */ + while (true) { + RETURN_IF_CANCELLED(exec_status); + // Right join uses right table(child) as outer loop tuple + while (NULL != (tuple_from_right_child = + jtc->r_block_stream_iterator_->currentTuple())) { + bool nothing_join = true; + unsigned bn = + state_.input_schema_right_->getcolumn(state_.join_index_right_[0]) + .operate->getPartitionValue( + state_.input_schema_right_->getColumnAddess( + state_.join_index_right_[0], tuple_from_right_child), + state_.hashtable_bucket_num_); + while (NULL != + (tuple_in_hashtable = jtc->hashtable_iterator_.readCurrent())) { +#ifdef CodeGen + cff_(tuple_in_hashtable, tuple_from_right_child, &key_exit, + state_.join_index_left_, state_.join_index_right_, + state_.hashtable_schema_, state_.input_schema_right_, eftt_); +#else + key_exit = + JoinCondiProcess(tuple_in_hashtable, tuple_from_right_child, jtc); +#endif + if (key_exit) { + nothing_join = false; + // Put the row_id of hash table(left table) which has been matched + // into a set. + if (state_.join_type_ == 2) { + unsigned long joined_row_id = 0; + memcpy(&joined_row_id, tuple_in_hashtable, sizeof(unsigned long)); + set_.acquire(); + joined_tuple_.insert(joined_row_id); + set_.release(); + } + if (NULL != (result_tuple = block->allocateTuple( + state_.output_schema_->getTupleMaxSize()))) { + produced_tuples++; + // cout << "joined tuple here!" << endl; + if (memcat_) { + memcat_(result_tuple, tuple_in_hashtable, tuple_from_right_child); + } else { + const unsigned copyed_bytes = + state_.input_schema_left_->copyTuple(tuple_in_hashtable, + result_tuple); + state_.input_schema_right_->copyTuple( + tuple_from_right_child, (char*)result_tuple + copyed_bytes); + } + } else { + return true; + } + } + jtc->hashtable_iterator_.increase_cur_(); + } + // As for right join, if nothing_join is true, we should produce a null + // left tuple with right tuple. + if (nothing_join == true) { + if (NULL != (result_tuple = block->allocateTuple( + state_.output_schema_->getTupleMaxSize()))) { + unsigned null_tuple_size = 0; + void* null_tuple = + malloc(state_.input_schema_left_->getTupleMaxSize()); + + // Generate a null left tuple + for (int count = 0; count < state_.input_schema_left_->columns.size(); + count++) { + void* temp_record = + malloc(state_.input_schema_left_->columns[count].get_length()); + if (state_.input_schema_left_->columns[count].operate->setNull( + temp_record)) { + unsigned temp_record_length = + state_.input_schema_left_->columns[count].get_length(); + memcpy((char*)null_tuple + null_tuple_size, temp_record, + state_.input_schema_left_->columns[count].get_length()); + null_tuple_size += + state_.input_schema_left_->columns[count].get_length(); + } + delete temp_record; + temp_record = NULL; + } + + const unsigned copyed_bytes = + state_.input_schema_left_->copyTuple(null_tuple, result_tuple); + state_.input_schema_right_->copyTuple( + tuple_from_right_child, (char*)result_tuple + copyed_bytes); + produced_tuples++; + // cout << "NULL tuple here!" << endl; + delete null_tuple; + null_tuple = NULL; + } else { + return true; + } + } + + // right_table_num_++; + jtc->r_block_stream_iterator_->increase_cur_(); +#ifdef _DEBUG_ + consumed_tuples_from_right++; +#endif + if (NULL != (tuple_from_right_child = + jtc->r_block_stream_iterator_->currentTuple())) { + bn = state_.input_schema_right_->getcolumn(state_.join_index_right_[0]) + .operate->getPartitionValue( + state_.input_schema_right_->getColumnAddess( + state_.join_index_right_[0], tuple_from_right_child), + state_.hashtable_bucket_num_); + hashtable_->placeIterator(jtc->hashtable_iterator_, bn); + } + } + jtc->r_block_for_asking_->setEmpty(); + if (false == first_done_) + jtc->hashtable_iterator_ = hashtable_->CreateIterator(); + + // Get another right block. + if (state_.child_right_->Next(exec_status, jtc->r_block_for_asking_) == + false) { + // Mark the first thread that can not get data from + // Next(jtc->r_block_for_asking).It means no blocks from right child + lock_thread_.acquire(); + if (first_arrive_thread_ == 0) first_arrive_thread_ = pthread_self(); + lock_thread_.release(); + + // Once thread finds no blocks to use, remove the thread from + // "working_thread_". + working_.acquire(); + auto worker_it = working_threads_.find(pthread_self()); + if (worker_it != working_threads_.end()) { + working_threads_.erase(worker_it); + } + working_.release(); + + // As for full join, the first arrived thread should wait until other + // threads finish their jobs.The other threads should wait until the first + // arrived thread turn the first_done_ into true. + if (state_.join_type_ == 2) { + while (first_arrive_thread_ == pthread_self() && + working_threads_.size() != 0) { + usleep(1); + } + while (first_arrive_thread_ != pthread_self() && (!first_done_)) { + usleep(1); + } + } + + // Once left or right join finds block is empty, it will exit. + if ((state_.join_type_ != 2) && (block->Empty() == true)) { + return false; + } else if ((state_.join_type_ != 2) && (block->Empty() == false)) { + return true; + } + + // Full join exits when the block is empty and all the hash bucket has + // been looped + // through. + if ((state_.join_type_ == 2) && block->Empty() == true && + first_done_ == true && + jtc->current_bucket_ >= (state_.hashtable_bucket_num_)) { + return false; + } else if (state_.join_type_ == 2) { + // the first arrived thread will turn the first_done_ into true to let + // other threads know they can do the extra job + first_done_ = true; + + // Full outer join will scan the hash table(left table) again, + // and find which tuples are not in the joined_tuple set. + // Then generate new tuple with hash table tuple + null right tuple. + // TODO(yuyang) :Not all bucket number is used. + while (true) { + // Every thread has a hash bucket. The thread should loop through + // its bucket. + while (NULL != (tuple_in_hashtable = + jtc->hashtable_iterator_.readCurrent())) { + unsigned long row_id_in_hashtable = 0; + memcpy(&row_id_in_hashtable, tuple_in_hashtable, + sizeof(unsigned long)); + auto it = joined_tuple_.find(row_id_in_hashtable); + if (it == joined_tuple_.end()) { + if (NULL != (result_tuple = block->allocateTuple( + state_.output_schema_->getTupleMaxSize()))) { + unsigned null_tuple_size = 0; + void* null_tuple = + malloc(state_.input_schema_right_->getTupleMaxSize()); + + // Generate a null right tuple + for (int count = 0; + count < state_.input_schema_right_->columns.size(); + count++) { + void* temp_record = malloc( + state_.input_schema_right_->columns[count].get_length()); + if (state_.input_schema_right_->columns[count] + .operate->setNull(temp_record)) { + unsigned temp_record_length = + state_.input_schema_right_->columns[count].get_length(); + memcpy((char*)null_tuple + null_tuple_size, temp_record, + state_.input_schema_right_->columns[count] + .get_length()); + null_tuple_size += + state_.input_schema_right_->columns[count].get_length(); + } + DELETE_PTR(temp_record); + } + + const unsigned copyed_bytes = + state_.input_schema_left_->copyTuple(tuple_in_hashtable, + result_tuple); + state_.input_schema_right_->copyTuple( + null_tuple, (char*)result_tuple + copyed_bytes); + produced_tuples++; + DELETE_PTR(null_tuple); + } else { + // cout << "!!!!!!!!!!!!!!!!!!!!!!!!!!" << endl; + // cout << "block has no space!!!!" << endl; + // cout << "!!!!!!!!!!!!!!!!!!!!!!!!!!" << endl; + return true; + } + } + // hash_table_num_++; + jtc->hashtable_iterator_.increase_cur_(); + } + jtc->hashtable_iterator_ = hashtable_->CreateIterator(); + + // After looping through the bucket, we give it a new bucket until + // all has been looped through. + left_join_.acquire(); + jtc->current_bucket_ = bucket_num_; + hashtable_->placeIterator(jtc->hashtable_iterator_, + jtc->current_bucket_); + if (bucket_num_ < state_.hashtable_bucket_num_) { + bucket_num_++; + } + // checked_bucket_[jtc->current_bucket_] = true; + left_join_.release(); + if (jtc->current_bucket_ >= state_.hashtable_bucket_num_) { + break; + } + } + return true; + } + } + delete jtc->r_block_stream_iterator_; + jtc->r_block_stream_iterator_ = jtc->r_block_for_asking_->createIterator(); + if ((tuple_from_right_child = + jtc->r_block_stream_iterator_->currentTuple())) { + unsigned bn = + state_.input_schema_right_->getcolumn(state_.join_index_right_[0]) + .operate->getPartitionValue( + state_.input_schema_right_->getColumnAddess( + state_.join_index_right_[0], tuple_from_right_child), + state_.hashtable_bucket_num_); + hashtable_->placeIterator(jtc->hashtable_iterator_, bn); + } + } +} + +bool PhysicalOuterHashJoin::Close(SegmentExecStatus* const exec_status) { +#ifdef TIME + stopTimer(&timer); + LOG(INFO) << "time consuming: " << timer << ", " + << timer / static_cast CPU_FRE << endl; +#endif + LOG(INFO) << "Consumes" << consumed_tuples_from_left + << "tuples from left child!" << endl; + // cout << "hash table num :" << hash_table_num_ << endl; + // cout << "bucket num : " << bucket_num_ << endl; + // cout << "joined_tuple num is: " << joined_tuple_.size() << endl; + // cout << "right table num is: " << right_table_num_ << endl; + // cout << "produced tuple num is: " << produced_tuples << endl; + // for (int i = 0; i < 1048577; i++) { + // if (checked_bucket_[i] == false) { + // cout << "checked bucket " << i << " failed!!" << endl; + // } + // } + InitExpandedStatus(); + DestoryAllContext(); + if (NULL != hashtable_) { + delete hashtable_; + hashtable_ = NULL; + } + state_.child_left_->Close(exec_status); + state_.child_right_->Close(exec_status); + return true; +} + +void PhysicalOuterHashJoin::Print() { + LOG(INFO) << "Join: buckets:" << state_.hashtable_bucket_num_ << endl; + cout << "Join: buckets:" << state_.hashtable_bucket_num_ << endl; + + LOG(INFO) << "------Join Left-------" << endl; + cout << "------Join Left-------" << endl; + + state_.child_left_->Print(); + LOG(INFO) << "------Join Right-------" << endl; + cout << "------Join Right-------" << endl; + + state_.child_right_->Print(); +} + +inline void PhysicalOuterHashJoin::IsMatch( + void* l_tuple_addr, void* r_tuple_addr, void* return_addr, + vector& l_join_index, vector& r_join_index, + Schema* l_schema, Schema* r_schema, ExprFuncTwoTuples func) { + bool key_exit = true; + for (unsigned i = 0; i < r_join_index.size(); i++) { + void* key_in_input = + r_schema->getColumnAddess(r_join_index[i], r_tuple_addr); + void* key_in_hashtable = + l_schema->getColumnAddess(l_join_index[i], l_tuple_addr); + if (!r_schema->getcolumn(r_join_index[i]) + .operate->equal(key_in_input, key_in_hashtable)) { + key_exit = false; + break; + } + } + *(bool*)return_addr = key_exit; +} + +inline void PhysicalOuterHashJoin::IsMatchCodegen( + void* l_tuple_addr, void* r_tuple_addr, void* return_addr, + vector& l_join_index, vector& r_join_index, + Schema* l_schema, Schema* r_schema, ExprFuncTwoTuples func) { + func(l_tuple_addr, r_tuple_addr, return_addr); +} + +inline bool PhysicalOuterHashJoin::JoinCondiProcess( + void* tuple_left, void* tuple_right, JoinThreadContext* const hjtc) { + hjtc->expr_eval_cnxt_.tuple[0] = tuple_left; + hjtc->expr_eval_cnxt_.tuple[1] = tuple_right; + bool pass = false; + for (int i = 0; i < hjtc->join_condi_.size(); ++i) { + pass = *(bool*)(hjtc->join_condi_[i]->ExprEvaluate(hjtc->expr_eval_cnxt_)); + if (pass == false) { + return false; + } + } + return true; +} + +PhysicalOuterHashJoin::JoinThreadContext::~JoinThreadContext() { + delete l_block_for_asking_; + delete l_block_stream_iterator_; + delete r_block_for_asking_; + delete r_block_stream_iterator_; + for (int i = 0; i < join_condi_.size(); ++i) { + DELETE_PTR(join_condi_[i]); + } + join_condi_.clear(); +} + +ThreadContext* PhysicalOuterHashJoin::CreateContext() { + JoinThreadContext* jtc = new JoinThreadContext(); + jtc->l_block_for_asking_ = BlockStreamBase::createBlock( + state_.input_schema_left_, state_.block_size_); + jtc->l_block_stream_iterator_ = jtc->l_block_for_asking_->createIterator(); + jtc->r_block_for_asking_ = BlockStreamBase::createBlock( + state_.input_schema_right_, state_.block_size_); + jtc->r_block_stream_iterator_ = jtc->r_block_for_asking_->createIterator(); + ExprNode* new_node = NULL; + // cout << "state_.join_condi_ = " << state_.join_condi_.size() << endl; + for (int i = 0; i < state_.join_condi_.size(); ++i) { + new_node = state_.join_condi_[i]->ExprCopy(); + new_node->InitExprAtPhysicalPlan(); + jtc->join_condi_.push_back(new_node); + } + jtc->expr_eval_cnxt_.schema[0] = state_.input_schema_left_; + jtc->expr_eval_cnxt_.schema[1] = state_.input_schema_right_; + return jtc; +} + +RetCode PhysicalOuterHashJoin::GetAllSegments(stack* all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_right_) { + ret = state_.child_right_->GetAllSegments(all_segments); + } + if (NULL != state_.child_left_) { + ret = state_.child_left_->GetAllSegments(all_segments); + } + return ret; +} +} // namespace physical_operator +} // namespace claims diff --git a/physical_operator/physical_outer_hash_join.h b/physical_operator/physical_outer_hash_join.h new file mode 100644 index 000000000..f9219ed40 --- /dev/null +++ b/physical_operator/physical_outer_hash_join.h @@ -0,0 +1,231 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/physical_operator/physical_outer_hash_join.h + * + * Created on: Mar 19, 2016 + * Author: yuyang + * Email: youngfish93@hotmail.com + * + * Description: + * + */ + +#ifndef PHYSICAL_OPERATOR_PHYSICAL_OUTER_HASH_JOIN_H_ +#define PHYSICAL_OPERATOR_PHYSICAL_OUTER_HASH_JOIN_H_ + +#include +#include +#include +#include +#include +#include +#include +#include "../Debug.h" +#include "../utility/rdtsc.h" +#include "../codegen/ExpressionGenerator.h" +#include "../common/hash.h" +#include "../common/hashtable.h" +#include "../common/expression/expr_node.h" +#include "../common/error_define.h" +#include "../physical_operator/physical_operator_base.h" +#include "../physical_operator/physical_operator.h" +using std::atomic; +using claims::common::ExprNode; +using claims::common::ExprEvalCnxt; +namespace claims { +namespace physical_operator { + +/** + * @brief Method description: Physical Operator "join", is used to join two + * tables due to current rules. + */ +class PhysicalOuterHashJoin : public PhysicalOperator { + public: + class JoinThreadContext : public ThreadContext { + public: + ~JoinThreadContext(); + BlockStreamBase* l_block_for_asking_; + BlockStreamBase::BlockStreamTraverseIterator* l_block_stream_iterator_; + BlockStreamBase* r_block_for_asking_; + BlockStreamBase::BlockStreamTraverseIterator* r_block_stream_iterator_; + BasicHashTable::Iterator hashtable_iterator_; + std::vector join_condi_; + ExprEvalCnxt expr_eval_cnxt_; + unsigned long current_bucket_{0}; + }; + + class State { + friend class PhysicalOuterHashJoin; + + public: + /** + * @brief Method description: Construct the State with paras, not used in + * the current version. + */ + State(PhysicalOperatorBase* child_left, PhysicalOperatorBase* child_right, + Schema* input_schema_left, Schema* input_schema_right, + Schema* output_schema, Schema* ht_schema, + std::vector joinIndex_left, + std::vector joinIndex_right, unsigned ht_nbuckets, + unsigned ht_bucketsize, unsigned block_size, + vector join_condi, int join_type); + State() {} + friend class boost::serialization::access; + template + void serialize(Archive& ar, const unsigned int version) { + ar& child_left_& child_right_& input_schema_left_& input_schema_right_& + output_schema_& hashtable_schema_& join_index_left_& + join_index_right_& hashtable_bucket_num_& hashtable_bucket_size_& + block_size_& join_condi_& join_type_; + } + + public: + // input and output + PhysicalOperatorBase* child_left_, *child_right_; + Schema* input_schema_left_, *input_schema_right_; + Schema* output_schema_, *hashtable_schema_; + + // how to join + std::vector join_index_left_; + std::vector join_index_right_; + std::vector join_condi_; + + // hashtable + unsigned hashtable_bucket_num_; + unsigned hashtable_bucket_size_; + unsigned block_size_; + + // 0 means left join; 1 means right join; 2 means full join + int join_type_; + }; + PhysicalOuterHashJoin(State state); + PhysicalOuterHashJoin(); + virtual ~PhysicalOuterHashJoin(); + + /** + * @brief Method description: Map between join_index_left_, payload_left_, + * payload_right_ and output_index_. Initialize + * hash, hashtable, and the functions. Get tuples + * from left child, and put them into the proper + * buckets. + * @param const PartitionOffset& partition_offset, to identify which + * partiton the function operates on. + * @return true in all cases. + */ + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset = 0); + /** + * @brief Method description: Get tuples from right child, use algorithm to + * find whether there's a left tuple that matches + * on the key value, then combine them and put the + * result into a block, and send the block to its + * father. + * @param BlockStreamBase *block, the info of block + * @return false if there's no tuple to function and the block is empty, + * otherwise true. + */ + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); + /** + * @brief Method description: Initialize thread status, destroy contexts, + * delete hashtable, and close childs. + * @return true. + */ + bool Close(SegmentExecStatus* const exec_status); + void Print(); + RetCode GetAllSegments(stack* all_segments); + + private: + /** + * @brief Method description: Allocate memory for jtc. + */ + ThreadContext* CreateContext(); + /** + * @brief Method description: To see if right child tuple and left child + * tuple matches on the key value. result stores + * in key_exit. + */ + static void IsMatch(void* l_tuple_addr, void* r_tuple_addr, void* return_addr, + vector& l_join_index, + vector& r_join_index, Schema* l_schema, + Schema* r_schema, ExprFuncTwoTuples func); + /** + * @brief Method description: Another way to see if right child tuple and left + * child tuple matches on the key value by using + * func result stores in key_exit. + */ + static void IsMatchCodegen(void* l_tuple_addr, void* r_tuple_addr, + void* return_addr, vector& l_join_index, + vector& r_join_index, Schema* l_schema, + Schema* r_schema, ExprFuncTwoTuples func); + // static void copy_to_hashtable(void* desc, void* src, Schema* ); + bool JoinCondiProcess(void* tuple_left, void* tuple_right, + JoinThreadContext* const jtc); + + private: + State state_; + + PartitionFunction* hash_func_; + BasicHashTable* hashtable_; + Schema* hashtable_schema_; + std::set joined_tuple_; + + typedef void (*ConditionFilterFunc)(void*, void*, void*, vector&, + vector&, Schema*, Schema*, + ExprFuncTwoTuples); + ConditionFilterFunc cff_; + ExprFuncTwoTuples eftt_; + LLVMMemcpy memcpy_; + LLVMMemcat memcat_; + + // debug + unsigned produced_tuples = 0; + unsigned consumed_tuples_from_right; + unsigned consumed_tuples_from_left; + unsigned tuples_in_hashtable; + unsigned water_mark; + // atomic working_thread_count_{0}; + // atomic hash_table_num_{0}; + // atomic right_table_num_{0}; + // bool checked_bucket_[1048577]{false}; + + // outer join sync + unsigned long int first_arrive_thread_ = 0; + std::set working_threads_; + Lock working_; + atomic bucket_num_{0}; + atomic first_done_{false}; + Lock lock_thread_; + Lock set_; + Lock left_join_; + +#ifdef TIME + unsigned long long timer; +#endif + + friend class boost::serialization::access; + template + void serialize(Archive& ar, const unsigned int version) { + ar& boost::serialization::base_object(*this) & state_; + } +}; + +} // namespace physical_operator +} // namespace claims + +#endif // PHYSICAL_OPERATOR_PHYSICAL_OUTER_HASH_JOIN_H_ diff --git a/physical_operator/physical_project.cpp b/physical_operator/physical_project.cpp index 3f12cbe8c..63231457f 100644 --- a/physical_operator/physical_project.cpp +++ b/physical_operator/physical_project.cpp @@ -28,17 +28,35 @@ #include "../physical_operator/physical_project.h" +#include #include using claims::common::ExprNode; #include "../common/expression/expr_node.h" namespace claims { namespace physical_operator { -PhysicalProject::PhysicalProject() { InitExpandedStatus(); } +PhysicalProject::PhysicalProject() { + set_phy_oper_type(kPhysicalProject); + InitExpandedStatus(); +} -PhysicalProject::~PhysicalProject() {} +PhysicalProject::~PhysicalProject() { + if (NULL != state_.schema_output_) { + delete state_.schema_output_; + state_.schema_output_ = NULL; + } + if (NULL != state_.schema_input_) { + delete state_.schema_input_; + state_.schema_input_ = NULL; + } + if (NULL != state_.child_) { + delete state_.child_; + state_.child_ = NULL; + } +} PhysicalProject::PhysicalProject(State state) : state_(state) { + set_phy_oper_type(kPhysicalProject); InitExpandedStatus(); } @@ -65,11 +83,14 @@ PhysicalProject::State::State(Schema* schema_input, Schema* schema_output, * Call back child Open(). */ -bool PhysicalProject::Open(const PartitionOffset& kPartitionOffset) { +bool PhysicalProject::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& kPartitionOffset) { + RETURN_IF_CANCELLED(exec_status); + RegisterExpandedThreadToAllBarriers(); ProjectThreadContext* ptc = reinterpret_cast( CreateOrReuseContext(crm_core_sensitive)); - bool ret = state_.child_->Open(kPartitionOffset); + bool ret = state_.child_->Open(exec_status, kPartitionOffset); SetReturnStatus(ret); BarrierArrive(); // Synchronization point return GetReturnStatus(); @@ -82,7 +103,10 @@ bool PhysicalProject::Open(const PartitionOffset& kPartitionOffset) { * case(2): block_for_asking_ is exhausted (should fetch a new block from * child and continue to process) */ -bool PhysicalProject::Next(BlockStreamBase* block) { +bool PhysicalProject::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* block) { + RETURN_IF_CANCELLED(exec_status); + unsigned total_length_ = state_.schema_output_->getTupleMaxSize(); void* tuple_from_child; @@ -90,10 +114,12 @@ bool PhysicalProject::Next(BlockStreamBase* block) { ProjectThreadContext* tc = reinterpret_cast(GetContext()); while (true) { + RETURN_IF_CANCELLED(exec_status); + if (tc->block_stream_iterator_->currentTuple() == 0) { /* mark the block as processed by setting it empty*/ tc->block_for_asking_->setEmpty(); - if (state_.child_->Next(tc->block_for_asking_)) { + if (state_.child_->Next(exec_status, tc->block_for_asking_)) { delete tc->block_stream_iterator_; tc->block_stream_iterator_ = tc->block_for_asking_->createIterator(); } else { @@ -109,12 +135,13 @@ bool PhysicalProject::Next(BlockStreamBase* block) { // for case (1) return true; } + return false; } -bool PhysicalProject::Close() { +bool PhysicalProject::Close(SegmentExecStatus* const exec_status) { InitExpandedStatus(); DestoryAllContext(); - return state_.child_->Close(); + return state_.child_->Close(exec_status); } bool PhysicalProject::CopyNewValue(void* tuple, void* result, int length) { @@ -152,9 +179,9 @@ void PhysicalProject::ProcessInLogic(BlockStreamBase* block, void* result = tc->thread_qual_[i]->FuncId( tc->thread_qual_[i], tuple_from_child, state_.schema_input_); #else + tc->expr_eval_cnxt_.tuple[0] = tuple_from_child; for (int i = 0; i < tc->thread_expr_.size(); ++i) { - void* result = tc->thread_expr_[i]->ExprEvaluate(tuple_from_child, - state_.schema_input_); + void* result = tc->thread_expr_[i]->ExprEvaluate(tc->expr_eval_cnxt_); #endif CopyNewValue(tuple, result, @@ -182,6 +209,8 @@ ThreadContext* PhysicalProject::CreateContext() { ptc->temp_block_ = BlockStreamBase::createBlock(state_.schema_output_, state_.block_size_); ptc->block_stream_iterator_ = ptc->block_for_asking_->createIterator(); + + ptc->expr_eval_cnxt_.schema[0] = state_.schema_input_; #ifdef NEWCONDI ptc->thread_qual_ = state_.expr_tree_; for (int i = 0; i < state_.expr_tree_.size(); i++) { @@ -197,6 +226,12 @@ ThreadContext* PhysicalProject::CreateContext() { #endif return ptc; } - +RetCode PhysicalProject::GetAllSegments(stack* all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_) { + ret = state_.child_->GetAllSegments(all_segments); + } + return ret; +} } // namespace physical_operator } // namespace claims diff --git a/physical_operator/physical_project.h b/physical_operator/physical_project.h index 34d9ec6d1..704cad97e 100644 --- a/physical_operator/physical_project.h +++ b/physical_operator/physical_project.h @@ -33,6 +33,7 @@ #include #include #include +#include #include "../common/expression/expr_node.h" #include "../common/Mapping.h" @@ -42,6 +43,7 @@ #include "../common/Expression/execfunc.h" #include "../physical_operator/physical_operator_base.h" #include "../physical_operator/physical_operator.h" +using claims::common::ExprEvalCnxt; using claims::common::ExprNode; namespace claims { namespace physical_operator { @@ -60,6 +62,7 @@ class PhysicalProject : public PhysicalOperator { BlockStreamBase::BlockStreamTraverseIterator *block_stream_iterator_; vector thread_qual_; vector thread_expr_; + ExprEvalCnxt expr_eval_cnxt_; ~ProjectThreadContext() { if (NULL != block_for_asking_) { @@ -132,18 +135,20 @@ class PhysicalProject : public PhysicalOperator { /** * @brief: construct iterator of project operator */ - bool Open(const PartitionOffset &kPartitionOffset = 0); + bool Open(SegmentExecStatus *const exec_status, + const PartitionOffset &kPartitionOffset = 0); /** * @brief: fetch a block from child and ProcessInLogic(). */ - bool Next(BlockStreamBase *block); + bool Next(SegmentExecStatus *const exec_status, BlockStreamBase *block); /** * @brief: revoke resource. */ - bool Close(); + bool Close(SegmentExecStatus *const exec_status); void Print(); + RetCode GetAllSegments(stack *all_segments); private: /** diff --git a/physical_operator/physical_projection_scan.cpp b/physical_operator/physical_projection_scan.cpp index 3803d5435..3b1cc2ef2 100644 --- a/physical_operator/physical_projection_scan.cpp +++ b/physical_operator/physical_projection_scan.cpp @@ -34,6 +34,8 @@ #include #include #include +#include + #include "../common/rename.h" #include "../storage/BlockManager.h" #include "../Config.h" @@ -49,11 +51,13 @@ namespace claims { namespace physical_operator { PhysicalProjectionScan::PhysicalProjectionScan(State state) : state_(state), partition_reader_iterator_(NULL), perf_info_(NULL) { + set_phy_oper_type(kPhysicalScan); InitExpandedStatus(); } PhysicalProjectionScan::PhysicalProjectionScan() : partition_reader_iterator_(NULL), perf_info_(NULL) { + set_phy_oper_type(kPhysicalScan); InitExpandedStatus(); } @@ -62,10 +66,10 @@ PhysicalProjectionScan::~PhysicalProjectionScan() { delete state_.schema_; state_.schema_ = NULL; } - if (NULL != perf_info_) { - delete perf_info_; - perf_info_ = NULL; - } + // if (NULL != perf_info_) { + // delete perf_info_; + // perf_info_ = NULL; + // } } PhysicalProjectionScan::State::State(ProjectionID projection_id, Schema* schema, @@ -81,14 +85,17 @@ PhysicalProjectionScan::State::State(ProjectionID projection_id, Schema* schema, * decide if it generates a buffer. */ -bool PhysicalProjectionScan::Open(const PartitionOffset& kPartitionOffset) { +bool PhysicalProjectionScan::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& kPartitionOffset) { + RETURN_IF_CANCELLED(exec_status); + RegisterExpandedThreadToAllBarriers(); if (TryEntryIntoSerializedSection()) { /* this is the first expanded thread*/ - PartitionStorage* partition_handle_; + PartitionStorage* partition_handle_ = NULL; if (NULL == - (partition_handle_ = BlockManager::getInstance()->getPartitionHandle( + (partition_handle_ = BlockManager::getInstance()->GetPartitionHandle( PartitionID(state_.projection_id_, kPartitionOffset)))) { LOG(ERROR) << PartitionID(state_.projection_id_, kPartitionOffset) .getName() @@ -96,7 +103,7 @@ bool PhysicalProjectionScan::Open(const PartitionOffset& kPartitionOffset) { SetReturnStatus(false); } else { partition_reader_iterator_ = - partition_handle_->createAtomicReaderIterator(); + partition_handle_->CreateAtomicReaderIterator(); SetReturnStatus(true); } @@ -105,9 +112,9 @@ bool PhysicalProjectionScan::Open(const PartitionOffset& kPartitionOffset) { ChunkReaderIterator* chunk_reader_it; ChunkReaderIterator::block_accessor* ba; - while (chunk_reader_it = partition_reader_iterator_->nextChunk()) { - while (chunk_reader_it->getNextBlockAccessor(ba)) { - ba->getBlockSize(); + while (chunk_reader_it = partition_reader_iterator_->NextChunk()) { + while (chunk_reader_it->GetNextBlockAccessor(ba)) { + ba->GetBlockSize(); input_dataset_.input_data_blocks_.push_back(ba); } } @@ -132,8 +139,14 @@ bool PhysicalProjectionScan::Open(const PartitionOffset& kPartitionOffset) { // TODO(Hanzhang): According to AVOID_CONTENTION_IN_SCAN, we choose the // strategy. We need finish case(1). -bool PhysicalProjectionScan::Next(BlockStreamBase* block) { +bool PhysicalProjectionScan::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* block) { + RETURN_IF_CANCELLED(exec_status); + unsigned long long total_start = curtick(); + if (!block->isIsReference()) { + block->setIsReference(false); + } #ifdef AVOID_CONTENTION_IN_SCAN ScanThreadContext* stc = reinterpret_cast(GetContext()); if (NULL == stc) { @@ -145,7 +158,7 @@ bool PhysicalProjectionScan::Next(BlockStreamBase* block) { input_dataset_.AtomicPut(stc->assigned_data_); delete stc; destorySelfContext(); - kPerfInfo->report_instance_performance_in_millibytes(); + // kPerfInfo->report_instance_performance_in_millibytes(); return false; } @@ -153,7 +166,7 @@ bool PhysicalProjectionScan::Next(BlockStreamBase* block) { ChunkReaderIterator::block_accessor* ba = stc->assigned_data_.front(); stc->assigned_data_.pop_front(); - ba->getBlock(block); + ba->GetBlock(block); // whether delete InMemeryBlockAccessor::target_block_start_address // is depend on whether use copy in ba->getBlock(block); @@ -177,14 +190,15 @@ bool PhysicalProjectionScan::Next(BlockStreamBase* block) { pthread_self())) { return false; } - perf_info_->processed_one_block(); + // perf_info_->processed_one_block(); // case(2) - return partition_reader_iterator_->nextBlock(block); + RETURN_IF_CANCELLED(exec_status); + return partition_reader_iterator_->NextBlock(block); #endif } -bool PhysicalProjectionScan::Close() { +bool PhysicalProjectionScan::Close(SegmentExecStatus* const exec_status) { if (NULL != partition_reader_iterator_) { delete partition_reader_iterator_; partition_reader_iterator_ = NULL; @@ -204,6 +218,8 @@ bool PhysicalProjectionScan::PassSample() const { if ((rand() / (float)RAND_MAX) < state_.sample_rate_) return true; return false; } - +RetCode PhysicalProjectionScan::GetAllSegments(stack* all_segments) { + return rSuccess; +} } // namespace physical_operator } // namespace claims diff --git a/physical_operator/physical_projection_scan.h b/physical_operator/physical_projection_scan.h index feb1b8bc7..de5ecbe9b 100644 --- a/physical_operator/physical_projection_scan.h +++ b/physical_operator/physical_projection_scan.h @@ -33,6 +33,7 @@ #ifndef PHYSICAL_OPERATOR_PHYSICAL_PROJECTION_SCAN_H_ #define PHYSICAL_OPERATOR_PHYSICAL_PROJECTION_SCAN_H_ +#include #define GLOG_NO_ABBREVIATED_SEVERITIES #include @@ -44,6 +45,7 @@ #include "../physical_operator/physical_operator_base.h" #include "../common/Schema/Schema.h" #include "../storage/ChunkStorage.h" +#include "../storage/PartitionReaderIterator.h" #include "../storage/PartitionStorage.h" #include "../physical_operator/physical_operator.h" #include "../common/ExpandedThreadTracker.h" @@ -122,25 +124,27 @@ class PhysicalProjectionScan : public PhysicalOperator { * @brief Method description: Initialize the operator and get the initial * position of chunk read iterator. */ - bool Open(const PartitionOffset& partition_offset = 0); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& partition_offset = 0); /** * @brief: fetch block from child operator. */ - bool Next(BlockStreamBase* block); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); /** * @brief: revoke resource. */ - bool Close(); + bool Close(SegmentExecStatus* const exec_status); void Print(); + RetCode GetAllSegments(stack* all_segments); private: bool PassSample() const; private: State state_; - PartitionStorage::PartitionReaderItetaor* partition_reader_iterator_; + PartitionStorage::PartitionReaderIterator* partition_reader_iterator_; std::list remaining_chunk_iterator_list_; Lock chunk_reader_container_lock_; // like a buffer diff --git a/physical_operator/physical_sort.cpp b/physical_operator/physical_sort.cpp index 3d4cbd6b7..9d87611d4 100644 --- a/physical_operator/physical_sort.cpp +++ b/physical_operator/physical_sort.cpp @@ -32,6 +32,7 @@ #include "../physical_operator/physical_sort.h" #include +#include #include #include @@ -49,13 +50,16 @@ namespace physical_operator { unsigned PhysicalSort::order_by_pos_ = 0; PhysicalSort::State *PhysicalSort::cmp_state_ = NULL; OperFuncInfo PhysicalSort::fcinfo = NULL; -PhysicalSort::PhysicalSort() : PhysicalOperator(3, 2) { +PhysicalSort::PhysicalSort() : PhysicalOperator(3, 2), block_buffer_(NULL) { + set_phy_oper_type(kPhysicalSort); lock_ = new Lock(); + cmp_state_ = &state_; InitExpandedStatus(); } PhysicalSort::PhysicalSort(State state) - : PhysicalOperator(3, 2), state_(state) { + : PhysicalOperator(3, 2), state_(state), block_buffer_(NULL) { + set_phy_oper_type(kPhysicalSort); cmp_state_ = &state_; lock_ = new Lock(); InitExpandedStatus(); @@ -84,12 +88,14 @@ PhysicalSort::State::State(Schema *input_schema, PhysicalOperatorBase *child, // TODO(FZH): every time compare 2 tuples, it should be calculated, it may be // calculated before there and fetch the result straightly here. bool PhysicalSort::Compare(void *a_tuple, void *b_tuple) { + cmp_state_->eecnxt_.tuple[0] = a_tuple; + cmp_state_->eecnxt1_.tuple[0] = b_tuple; void *a_result = cmp_state_->order_by_attrs_[order_by_pos_].first->ExprEvaluate( - a_tuple, cmp_state_->input_schema_); + cmp_state_->eecnxt_); void *b_result = cmp_state_->order_by_attrs_copy_[order_by_pos_].first->ExprEvaluate( - b_tuple, cmp_state_->input_schema_); + cmp_state_->eecnxt1_); fcinfo->args_[0] = a_result; fcinfo->args_[1] = b_result; fcinfo->args_num_ = 2; @@ -137,21 +143,26 @@ void PhysicalSort::Order() { * by specifying the column to be sorted * 3, whether to register the buffer into the blockmanager. * */ -bool PhysicalSort::Open(const PartitionOffset &part_off) { +bool PhysicalSort::Open(SegmentExecStatus *const exec_status, + const PartitionOffset &part_off) { + RETURN_IF_CANCELLED(exec_status); + RegisterExpandedThreadToAllBarriers(); if (TryEntryIntoSerializedSection(0)) { all_cur_ = 0; thread_id_ = -1; all_tuples_.clear(); + block_buffer_ = new DynamicBlockBuffer(); } BarrierArrive(0); - BlockStreamBase *block_for_asking = NULL; + BlockStreamBase *block_for_asking; if (CreateBlock(block_for_asking) == false) { LOG(ERROR) << "error in the create block stream!!!" << endl; return 0; } // state_.partition_offset_ = part_off; - state_.child_->Open(part_off); + state_.child_->Open(exec_status, part_off); + RETURN_IF_CANCELLED(exec_status); /** * phase 1: store the data in the buffer! @@ -162,18 +173,28 @@ bool PhysicalSort::Open(const PartitionOffset &part_off) { void *tuple_ptr = NULL; BlockStreamBase::BlockStreamTraverseIterator *block_it; - while (state_.child_->Next(block_for_asking)) { - block_buffer_.atomicAppendNewBlock(block_for_asking); + while (state_.child_->Next(exec_status, block_for_asking)) { + RETURN_IF_CANCELLED(exec_status); + + block_buffer_->atomicAppendNewBlock(block_for_asking); block_it = block_for_asking->createIterator(); while (NULL != (tuple_ptr = block_it->nextTuple())) { thread_tuple.push_back(tuple_ptr); } + if (NULL != block_it) { + delete block_it; + block_it = NULL; + } if (CreateBlock(block_for_asking) == false) { LOG(ERROR) << "error in the create block stream!!!" << endl; return 0; } } + if (NULL != block_for_asking) { + delete block_for_asking; + block_for_asking = NULL; + } lock_->acquire(); all_tuples_.insert(all_tuples_.end(), thread_tuple.begin(), thread_tuple.end()); @@ -207,13 +228,21 @@ bool PhysicalSort::Open(const PartitionOffset &part_off) { [state_.order_by_attrs_[i].first->get_type_][OperType::oper_great]; } // int64_t time = curtick(); + state_.eecnxt_.schema[0] = state_.input_schema_; + state_.eecnxt1_.schema[0] = state_.input_schema_; + RETURN_IF_CANCELLED(exec_status); + cmp_state_ = &state_; + Order(); } BarrierArrive(2); return true; } // just only thread can fetch this result -bool PhysicalSort::Next(BlockStreamBase *block) { +bool PhysicalSort::Next(SegmentExecStatus *const exec_status, + BlockStreamBase *block) { + RETURN_IF_CANCELLED(exec_status); + lock_->acquire(); if (thread_id_ == -1) { thread_id_ = pthread_self(); @@ -249,8 +278,13 @@ bool PhysicalSort::Next(BlockStreamBase *block) { return false; } -bool PhysicalSort::Close() { - state_.child_->Close(); +bool PhysicalSort::Close(SegmentExecStatus *const exec_status) { + if (NULL != block_buffer_) { + delete block_buffer_; + block_buffer_ = NULL; + } + + state_.child_->Close(exec_status); return true; } @@ -271,5 +305,13 @@ bool PhysicalSort::CreateBlock(BlockStreamBase *&target) const { BlockStreamBase::createBlock(state_.input_schema_, state_.block_size_); return target != 0; } +RetCode PhysicalSort::GetAllSegments(stack *all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_) { + ret = state_.child_->GetAllSegments(all_segments); + } + return ret; +} + } // namespace physical_operator } // namespace claims diff --git a/physical_operator/physical_sort.h b/physical_operator/physical_sort.h index 78a4d0338..9ac8b3c9c 100644 --- a/physical_operator/physical_sort.h +++ b/physical_operator/physical_sort.h @@ -32,6 +32,7 @@ #include #include +#include #include #include @@ -41,13 +42,16 @@ #include "../common/Block/BlockStream.h" #include "../common/Block/DynamicBlockBuffer.h" #include "../common/expression/expr_node.h" + #include "../utility/lock.h" #include "../utility/rdtsc.h" using claims::common::DataTypeOperFunc; +using claims::common::ExprEvalCnxt; using std::vector; using std::pair; using claims::common::ExprNode; using claims::common::OperFuncInfo; + namespace claims { namespace physical_operator { #define NEWCONDI @@ -86,6 +90,7 @@ class PhysicalSort : public PhysicalOperator { vector> order_by_attrs_copy_; DataTypeOperFunc (*compare_funcs_)[2]; + ExprEvalCnxt eecnxt_, eecnxt1_; private: friend class boost::serialization::access; @@ -106,19 +111,20 @@ class PhysicalSort : public PhysicalOperator { * partiton the function operates on. * @return true in all cases. */ - bool Open(const PartitionOffset& part_off = 0); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& part_off = 0); /** * @brief Method description: Send the sorted data to father operator. * @param BlockStreamBase *block, the info of block * @return false if there's no tuple to function and the block is empty, * otherwise true. */ - bool Next(BlockStreamBase* block); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); /** * @brief Method description: Close child opertor. * @return true. */ - bool Close(); + bool Close(SegmentExecStatus* const exec_status); void Print(); private: @@ -143,11 +149,12 @@ class PhysicalSort : public PhysicalOperator { * state_.block_size_. */ bool CreateBlock(BlockStreamBase*&) const; + RetCode GetAllSegments(stack* all_segments); private: State state_; /* store the data in the buffer!*/ - DynamicBlockBuffer block_buffer_; + DynamicBlockBuffer* block_buffer_; unsigned all_cur_; int64_t thread_id_; vector all_tuples_; diff --git a/physical_operator/result_collector.cpp b/physical_operator/result_collector.cpp index dacc7da76..b2ee81f3d 100755 --- a/physical_operator/result_collector.cpp +++ b/physical_operator/result_collector.cpp @@ -27,6 +27,8 @@ #include #include #include // NOLINT +#include + #include "../utility/rdtsc.h" using std::vector; using std::string; @@ -36,13 +38,22 @@ using std::endl; namespace claims { namespace physical_operator { ResultCollector::ResultCollector() - : finished_thread_count_(0), registered_thread_count_(0) { + : finished_thread_count_(0), + registered_thread_count_(0), + block_buffer_(NULL), + thread_id_(0) { + set_phy_oper_type(kPhysicalResult); sema_open_.set_value(1); sema_open_finished_.set_value(0); sema_input_complete_.set_value(0); } ResultCollector::ResultCollector(State state) - : finished_thread_count_(0), registered_thread_count_(0), state_(state) { + : finished_thread_count_(0), + registered_thread_count_(0), + state_(state), + block_buffer_(NULL), + thread_id_(0) { + set_phy_oper_type(kPhysicalResult); sema_open_.set_value(1); sema_open_finished_.set_value(0); sema_input_complete_.set_value(0); @@ -70,7 +81,10 @@ ResultCollector::State::State(Schema* input, PhysicalOperatorBase* child, partition_offset_(partitoin_offset), column_header_(column_header) {} -bool ResultCollector::Open(const PartitionOffset& part_offset) { +bool ResultCollector::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& part_offset) { + RETURN_IF_CANCELLED(exec_status); + state_.partition_offset_ = part_offset; if (sema_open_.try_wait()) { @@ -83,22 +97,31 @@ bool ResultCollector::Open(const PartitionOffset& part_offset) { } } registered_thread_count_++; + RETURN_IF_CANCELLED(exec_status); + + exec_status_ = exec_status; if (true == g_thread_pool_used) { Environment::getInstance()->getThreadPool()->AddTask(CollectResult, this); } else { - pthread_t tid; - pthread_create(&tid, NULL, CollectResult, this); + pthread_create(&thread_id_, NULL, CollectResult, this); } unsigned long long int start = curtick(); + sema_input_complete_.wait(); block_buffer_->query_time_ = getSecond(start); return true; } -bool ResultCollector::Next(BlockStreamBase* block) { return false; } +bool ResultCollector::Next(SegmentExecStatus* const exec_status, + BlockStreamBase* block) { + return false; +} -bool ResultCollector::Close() { - state_.child_->Close(); +bool ResultCollector::Close(SegmentExecStatus* const exec_status) { + if (0 != thread_id_) { + pthread_join(thread_id_, NULL); + } + state_.child_->Close(exec_status); sema_input_complete_.set_value(0); return true; } @@ -134,8 +157,10 @@ void ResultCollector::DeallocateBlockStream(BlockStreamBase*& target) const { void* ResultCollector::CollectResult(void* arg) { ResultCollector* Pthis = (ResultCollector*)arg; - Pthis->state_.child_->Open(Pthis->state_.partition_offset_); - BlockStreamBase* block_for_asking; + + Pthis->state_.child_->Open(Pthis->exec_status_, + Pthis->state_.partition_offset_); + BlockStreamBase* block_for_asking = NULL; if (false == Pthis->CreateBlockStream(block_for_asking)) { assert(false); return 0; @@ -145,13 +170,17 @@ void* ResultCollector::CollectResult(void* arg) { unsigned long long start = 0; start = curtick(); - while (Pthis->state_.child_->Next(block_for_asking)) { + while (Pthis->state_.child_->Next(Pthis->exec_status_, block_for_asking)) { Pthis->block_buffer_->atomicAppendNewBlock(block_for_asking); if (false == Pthis->CreateBlockStream(block_for_asking)) { assert(false); return 0; } + if (Pthis->exec_status_->is_cancelled()) { + break; + } } + DELETE_PTR(block_for_asking); Pthis->sema_input_complete_.post(); double eclipsed_seconds = getSecond(start); Pthis->block_buffer_->query_time_ = eclipsed_seconds; @@ -179,5 +208,12 @@ ResultCollector::State::~State() { // delete input_; // delete child_; } +RetCode ResultCollector::GetAllSegments(stack* all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_) { + ret = state_.child_->GetAllSegments(all_segments); + } + return ret; +} } // namespace physical_operator } // namespace claims diff --git a/physical_operator/result_collector.h b/physical_operator/result_collector.h index 6a0bb4c17..4ce8863f1 100755 --- a/physical_operator/result_collector.h +++ b/physical_operator/result_collector.h @@ -22,6 +22,7 @@ * Author: wangli * Email: wangli1426@gmail.com */ +#include #ifndef PHYSICAL_QUERY_PLAN_BLOCKSTREAMRESULTCOLLECTOR_H_ #define PHYSICAL_QUERY_PLAN_BLOCKSTREAMRESULTCOLLECTOR_H_ @@ -48,8 +49,7 @@ class ResultCollector : public PhysicalOperatorBase { friend class ResultCollector; public: - State(Schema* input, PhysicalOperatorBase* child, - const unsigned block_size, + State(Schema* input, PhysicalOperatorBase* child, const unsigned block_size, vector column_header = vector(), const PartitionOffset partitoin_offset = 0); State(); @@ -76,10 +76,12 @@ class ResultCollector : public PhysicalOperatorBase { ResultCollector(); ResultCollector(State); virtual ~ResultCollector(); - bool Open(const PartitionOffset& part_off = 0); - bool Next(BlockStreamBase* block); - bool Close(); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& part_off = 0); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); + bool Close(SegmentExecStatus* const exec_status); void Print(); + RetCode GetAllSegments(stack* all_segments); /** * @brief Get query result data set. @@ -109,6 +111,9 @@ class ResultCollector : public PhysicalOperatorBase { private: State state_; + pthread_t thread_id_; + + SegmentExecStatus* exec_status_; /** * It is the resposibility of the user to free the resultset. */ @@ -126,8 +131,7 @@ class ResultCollector : public PhysicalOperatorBase { friend class boost::serialization::access; template void serialize(Archive& ar, const unsigned int version) { - ar& boost::serialization::base_object(*this) & - state_; + ar& boost::serialization::base_object(*this) & state_; } }; diff --git a/physical_operator/result_printer.cpp b/physical_operator/result_printer.cpp index 74a7681a9..2b7ad9a42 100755 --- a/physical_operator/result_printer.cpp +++ b/physical_operator/result_printer.cpp @@ -21,20 +21,33 @@ * Author: wangli */ +#include "../common/error_define.h" #include "../physical_operator/result_printer.h" +#include +using claims::common::rSuccess; namespace claims { namespace physical_operator { -ResultPrinter::ResultPrinter() : block_buffer_(0) {} -ResultPrinter::ResultPrinter(State state) : state_(state), block_buffer_(0) {} +ResultPrinter::ResultPrinter() : block_buffer_(0) { + set_phy_oper_type(kPhysicalResult); +} +ResultPrinter::ResultPrinter(State state) : state_(state), block_buffer_(0) { + set_phy_oper_type(kPhysicalResult); +} ResultPrinter::~ResultPrinter() {} -bool ResultPrinter::Open(const PartitionOffset& offset) { +bool ResultPrinter::Open(SegmentExecStatus* const exec_status, + const PartitionOffset& offset) { + RETURN_IF_CANCELLED(exec_status); + block_buffer_ = BlockStreamBase::createBlock(state_.schema_, state_.block_size_); tuple_count_ = 0; - return state_.child_->Open(offset); + return state_.child_->Open(exec_status, offset); } -bool ResultPrinter::Next(BlockStreamBase*) { +bool ResultPrinter::Next(SegmentExecStatus* const exec_status, + BlockStreamBase*) { + RETURN_IF_CANCELLED(exec_status); + printf("Query result:\n"); printf( "========================================================================" @@ -49,7 +62,9 @@ bool ResultPrinter::Next(BlockStreamBase*) { // getchar(); unsigned block_count(0); - while (state_.child_->Next(block_buffer_)) { + while (state_.child_->Next(exec_status, block_buffer_)) { + RETURN_IF_CANCELLED(exec_status); + unsigned tuple_in_block(0); BlockStreamBase::BlockStreamTraverseIterator* it = block_buffer_->createIterator(); @@ -66,11 +81,11 @@ bool ResultPrinter::Next(BlockStreamBase*) { } return false; } -bool ResultPrinter::Close() { +bool ResultPrinter::Close(SegmentExecStatus* const exec_status) { printf("tuple count:%d\n", tuple_count_); block_buffer_->~BlockStreamBase(); cout << "----------total tuples: " << tuple_count_ << "----------\n"; - return state_.child_->Close(); + return state_.child_->Close(exec_status); } void ResultPrinter::Print() { printf("Print:\n"); @@ -81,5 +96,13 @@ ResultPrinter::State::~State() { delete schema_; if (child_ > 0) delete child_; } + +RetCode ResultPrinter::GetAllSegments(stack* all_segments) { + RetCode ret = rSuccess; + if (NULL != state_.child_) { + ret = state_.child_->GetAllSegments(all_segments); + } + return ret; +} } // namespace physical_operator } // namespace claims diff --git a/physical_operator/result_printer.h b/physical_operator/result_printer.h index a8506c035..d8e96544b 100755 --- a/physical_operator/result_printer.h +++ b/physical_operator/result_printer.h @@ -25,8 +25,11 @@ #define PHYSICAL_OPERATOR_RESULT_PRINTER_H_ #include #include +#include + #include "../physical_operator/physical_operator_base.h" #include "../common/Schema/Schema.h" + namespace claims { namespace physical_operator { class ResultPrinter : public PhysicalOperatorBase { @@ -56,10 +59,12 @@ class ResultPrinter : public PhysicalOperatorBase { ResultPrinter(); ResultPrinter(State state); virtual ~ResultPrinter(); - bool Open(const PartitionOffset& offset); - bool Next(BlockStreamBase* block); - bool Close(); + bool Open(SegmentExecStatus* const exec_status, + const PartitionOffset& offset); + bool Next(SegmentExecStatus* const exec_status, BlockStreamBase* block); + bool Close(SegmentExecStatus* const exec_status); void Print(); + RetCode GetAllSegments(stack* all_segments); private: State state_; diff --git a/physical_operator/segment.cpp b/physical_operator/segment.cpp new file mode 100644 index 000000000..50993bf99 --- /dev/null +++ b/physical_operator/segment.cpp @@ -0,0 +1,56 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/stmt_handler/segment.cpp + * + * Created on: Mar 13, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#include "../physical_operator/segment.h" + +#include +#include +#include "../common/ids.h" +#include "../common/memory_handle.h" + +namespace claims { +namespace physical_operator { + +Segment::Segment() { + // TODO Auto-generated constructor stub +} + +Segment::~Segment() { + // TODO Auto-generated destructor stub + DELETE_PTR(plan_segment_); +} +Segment::Segment(PhysicalOperatorBase* plan_segment, + vector lower_node_id_list, + vector upper_node_id_list, u_int64_t exchange_id) + : plan_segment_(plan_segment), + lower_node_id_list_(lower_node_id_list), + upper_node_id_list_(upper_node_id_list), + exchange_id_(exchange_id) {} +RetCode Segment::Send_Plan_Segment() { return 0; } +} +} // namespace claims diff --git a/physical_operator/segment.h b/physical_operator/segment.h new file mode 100644 index 000000000..3e2a2ead2 --- /dev/null +++ b/physical_operator/segment.h @@ -0,0 +1,59 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/stmt_handler/segment.h + * + * Created on: Mar 13, 2016 + * Author: fzh + * Email: fzhedu@gmail.com + * + * Description: + * + */ + +#ifndef PHYSICAL_OPERATOR_SEGMENT_H_ +#define PHYSICAL_OPERATOR_SEGMENT_H_ +#include +#include "../common/error_define.h" +#include "../common/ids.h" +using std::vector; +namespace claims { + +namespace physical_operator { +class PhysicalOperatorBase; +class Segment { + public: + Segment(); + Segment(PhysicalOperatorBase* plan_segment_, + vector lower_node_id_list, vector upper_node_id_list, + u_int64_t exchange_id); + virtual ~Segment(); + RetCode Send_Plan_Segment(); + PhysicalOperatorBase* get_plan_segment() { return plan_segment_; } + + vector lower_node_id_list_; + vector upper_node_id_list_; + u_int64_t exchange_id_ = 0; + + private: + PhysicalOperatorBase* plan_segment_; +}; +} +} // namespace claims + +#endif // PHYSICAL_OPERATOR_SEGMENT_H_ diff --git a/sbin/1-compile.sh b/sbin/1-compile.sh index a58a0c957..6a40caa0a 100755 --- a/sbin/1-compile.sh +++ b/sbin/1-compile.sh @@ -1,24 +1,15 @@ #!/bin/sh -if [ "$1" = "" ]; then -export CLAIMS_HOME=$CLAIMS_HOME -else -export CLAIMS_HOME=$1 -fi - -echo "CLAIMS_HOME:[$CLAIMS_HOME]" -sed -i 's:^export CLAIMS_HOME=.*$:export CLAIMS_HOME='$CLAIMS_HOME':g' ~/.bashrc - - -cd $CLAIMS_HOME - +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../ +# now in CLAIMS_HOME ./build.sh clean ./build.sh init +./build.sh init mkdir install cd install -../configure --prefix=$CLAIMS_HOME/install +../configure gcc --version -make -j 3 -make install - +make -j all diff --git a/sbin/2-claims-conf/cluster-deploy.config b/sbin/2-claims-conf/cluster-deploy.config deleted file mode 100644 index 0f19268ea..000000000 --- a/sbin/2-claims-conf/cluster-deploy.config +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/sh -########################### -# claims deploy config -########################### -master="127.0.0.1" -#slaves="10.11.1.191 10.11.1.193 10.11.1.194" -deploypath=/home/claims -user=claims - -logfilepath=$CLAIMS_HOME/sbin/logs - -runclaimsprocid=$CLAIMS_HOME/sbin/.claimssserver.pid - -########################## -# claimsserver config -########################## -local_disk_mode=1 - -#data=/home/claims/data/test/sf-1/ -#data=/home/claims/data/test/mysqltest/ -#data=/home/claims/data/test/18-partition/sf-1/ -#data=/home/claims/data/test/SSE_demo/ -#data=/home/claims/data/demo/sse1day/ -#data=/home/claims/data/demo/sse1month/ -#data=/home/claims/data/demo/sse1week/ -#data=/home/imdb/data/yk/ -#data=/home/imdb/data/test/decimal/ -#data=/home/imdb/data/test/tpch-sf1/ -#data=/home/imdb/data/demo/poc/ -#data=/home/claims/data/test/ -#data=/home/claims/data/test/decimal/ -#data=/home/imdb/data/tpc-h/1-partition/sf-1/ -#data=/home/imdb/data/tpc-h/4-partition/sf-1/ -#data=/home/imdb/data/tpc-h/1-partition/sf-10/ -#data=/home/imdb/data/tpch/ -#data=/home/imdb/data/tpc-h/8-partition/sf-1/ -#data=/home/claims/data/test/sf-1/ -#data=/home/claims/data/demo/poc/ -#data=/home/imdb/data/test/load/ -################################ -# new tpch data for test -################################ -#data=/home/claims/data/tpc-h/1-partition/sf-1/ -#data=/home/claims/data/tpc-h/4-partition/sf-1/ -#data=/home/claims/data/tpc-h/8-partition/sf-1/ -#data=/home/claims/data/tpc-h/1-partition/sf-10/ -#data=/home/claims/data/tpc-h/4-partition/sf-10/ -#data=/home/claims/data/tpc-h/8-partition/sf-10/ -#data=/home/claims/data/tpc-h/1-partition/sf-100/ -#data=/home/claims/data/tpc-h/4-partition/sf-100/ -#data=/home/claims/data/tpc-h/8-partition/sf-100/ -data=/home/imdb/data/tpc-h/1-partition/sf-1/ -client_listener_port=10000 diff --git a/sbin/2-claims-conf/cluster.config b/sbin/2-claims-conf/cluster.config new file mode 100644 index 000000000..502bfef18 --- /dev/null +++ b/sbin/2-claims-conf/cluster.config @@ -0,0 +1,60 @@ +########################### +# claims deploy config +########################### +[cluster] +master = 219.228.147.162 +#slaves = 10.11.1.190 10.11.1.191 10.11.1.193 10.11.1.194 10.11.1.195 10.11.1.196 10.11.1.197 10.11.1.198 10.11.1.199 +#claimshome = /home/claims/Han/deploy2 +#claimshome = /home/claims/Han/deploy +claimshome = /home/imdb/test/CLAIMS +user = claims +logpath = sbin/logs +runclaimsprocid = sbin/.claimssserver.pid + +########################## +# claimsserver config +########################## +[claims] +################################ +# localdisk +################################ +#data = /home/claims/data/test/concert/ +#data = /home/imdb/data/test/guizhou/ +#data = /home/imdb/data/tpc-h/sf1/4partition/ +#data = /home/imdb/data/test/cst/ +################################ +# hdfs +################################ +#data = /home/claims/data/tpc-h/sf1/1partition/ +#data = /home/claims/data/tpc-h/sf1/4partition/ +#data = /home/claims/data/tpc-h/sf1/8partition/ +#data = /home/claims/data/tpc-h/sf10/1partition/ +#data = /home/claims/data/tpc-h/sf10/4partition/ +#data = /home/claims/data/tpc-h/sf10/8partition/ +#data = /home/claims/data/tpc-h/sf100/1partition/ +#data = /home/claims/data/tpc-h/sf100/4partition/ +#data = /home/claims/data/tpc-h/sf100/8partition/ +data = /test/claims + +hdfs_master_ip = 219.228.147.162 + +hdfs_master_port = 9000 + +max_degree_of_parallelism = 4 + +initial_degree_of_parallelism = 1 + +expander_adaptivity_check_frequency = 1000 + +enable_expander_adaptivity = 0 + +local_disk_mode = 0 + +client_listener_port = 10012 + +enable_codegen = 0 + +load_thread_num = 12 + +memory_utilization = 80 + diff --git a/sbin/2-claims-conf/generate-config.sh b/sbin/2-claims-conf/generate-config.sh index ca6acb21e..bf2637af1 100755 --- a/sbin/2-claims-conf/generate-config.sh +++ b/sbin/2-claims-conf/generate-config.sh @@ -1,6 +1,7 @@ #!/bin/sh -cd $CLAIMS_HOME/sbin/2-claims-conf +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR source ./load-config.sh rm -f config-* @@ -20,15 +21,15 @@ ip = "$slave"; #端口范围(调试用) PortManager: { - start = 19000; - end = 19500; + start = 48000; + end = 48500; } #master的IP地址和端口 coordinator: { ip="$master" - port="11001" + port="10010" } # whether this instance is the master. @@ -38,20 +39,20 @@ master = $ismaster data = "$data" #hdfs主节点 -hdfs_master_ip = "10.11.1.192" +hdfs_master_ip = "$hdfs_master_ip" #hdfs主节点端口 -hdfs_master_port = 9000 +hdfs_master_port = $hdfs_master_port #最大单机算子并行度 -max_degree_of_parallelism=10 +max_degree_of_parallelism=$max_degree_of_parallelism #初始单机算子并行度 -initial_degree_of_parallelism=8 +initial_degree_of_parallelism=$initial_degree_of_parallelism -expander_adaptivity_check_frequency=1000 +expander_adaptivity_check_frequency=$expander_adaptivity_check_frequency -enable_expander_adaptivity=0 +enable_expander_adaptivity=$enable_expander_adaptivity # 0: hdfs # 1: local @@ -59,9 +60,11 @@ local_disk_mode = $local_disk_mode client_listener_port = $client_listener_port -enable_codegen = 0 +enable_codegen = $enable_codegen -load_thread_num = 12 +load_thread_num = $load_thread_num + +memory_utilization = $memory_utilization EOF done diff --git a/sbin/2-claims-conf/load-config.sh b/sbin/2-claims-conf/load-config.sh index 4f0365e39..013ad7dff 100755 --- a/sbin/2-claims-conf/load-config.sh +++ b/sbin/2-claims-conf/load-config.sh @@ -1,35 +1,77 @@ #!/bin/sh +#------------------------------------------------------------------------------ +# model: cfg_get +# args: [1] => IN: +# [2] => IN: +# [3] => OUT: +# describe: get configure value by key from a configure file +# example: +# > cfg_get "cluster.config" "master" "master" +# > echo $master +# file: +#------------------------------------------------------------------------------ +# Usage: getcfg +getcfg() { + export $3="`sed '/^\s*'$2'\s*=/!d;s/.*=\s*//' $1`" +} + function load_config() { -config=$CLAIMS_HOME/sbin/2-claims-conf/cluster-deploy.config -#master=`sed '/^master=/!d;s/.*=//' $config` -#slaves=`sed '/^slaves=/!d;s/.*=//' $config` -#local_disk_mode=`sed '/^local_disk_mode=/!d;s/.*=//' $config` -#data=`sed '/^data=/!d;s/.*=//' $config` -#deploypath=`sed '/^deploypath=/!d;s/.*=//' $config` -#user=`sed '/^user=/!d;s/.*=//' $config` -#client_listener_port=`sed '/^client_listener_port=/!d;s/.*=//' $config` -#logfilepath=`sed '/^logfilepath=/!d;s/.*=//' $config` -#runclaimsprocid=`sed '/^runclaimsprocid=/!d;s/.*=//' $config` -source $config + config=cluster.config + getcfg $config master master + getcfg $config slaves slaves + getcfg $config claimshome claimshome + getcfg $config user user + getcfg $config logpath logpath + getcfg $config runclaimsprocid runclaimsprocid + getcfg $config data data + getcfg $config hdfs_master_ip hdfs_master_ip + getcfg $config hdfs_master_port hdfs_master_port + getcfg $config max_degree_of_parallelism max_degree_of_parallelism + getcfg $config initial_degree_of_parallelism initial_degree_of_parallelism + getcfg $config expander_adaptivity_check_frequency expander_adaptivity_check_frequency + getcfg $config enable_expander_adaptivity enable_expander_adaptivity + getcfg $config local_disk_mode local_disk_mode + getcfg $config client_listener_port client_listener_port + getcfg $config enable_codegen enable_codegen + getcfg $config load_thread_num load_thread_num + getcfg $config memory_utilization memory_utilization + +} + +function currdir() +{ + CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + cd $CURRDIR } + function print_config() { echo "=========cluster config start========" -echo "config : [$config]" -echo "master : [$master]" -echo "slaves : [$slaves]" -echo "deploypath : [$deploypath]" -echo "user : [$user]" -echo "local_disk_mode : [$local_disk_mode]" -echo "data : [$data]" -echo "client_listener_port :[$client_listener_port]" -echo "logfilepath : [$logfilepath]" -echo "runclaimsprocid : [$runclaimsprocid]" +echo "config : [$config]" +echo "master : [$master]" +echo "slaves : [$slaves]" +echo "claimshome : [$claimshome]" +echo "user : [$user]" +echo "logpath : [$logpath]" +echo "runclaimsprocid : [$runclaimsprocid]" +echo "data : [$data]" +echo "hdfs_master_ip : [$hdfs_master_ip]" +echo "hdfs_master_port : [$hdfs_master_port]" +echo "max_degree_of_parallelism : [$max_degree_of_parallelism]" +echo "initial_degree_of_parallelism : [$initial_degree_of_parallelism]" +echo "expander_adaptivity_check_frequency : [$expander_adaptivity_check_frequency]" +echo "enable_expander_adaptivity : [$enable_expander_adaptivity]" +echo "local_disk_mode : [$local_disk_mode]" +echo "client_listener_port : [$client_listener_port]" +echo "enable_codegen : [$enable_codegen]" +echo "load_thread_num : [$load_thread_num]" +echo "memory_utilization : [$memory_utilization]" echo "=========cluster config end==========" } +currdir load_config #print_config diff --git a/sbin/3-deploy.sh b/sbin/3-deploy.sh index bb15de71e..2d216fdf7 100755 --- a/sbin/3-deploy.sh +++ b/sbin/3-deploy.sh @@ -1,27 +1,45 @@ #!/bin/sh -if [ ! -f "${0##*/}" ]; then - echo "please run script in sbin/ directory!" - exit 1 -fi -cd $CLAIMS_HOME/sbin/2-claims-conf/ +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd 2-claims-conf/ source ./load-config.sh -cd ../ +source ./generate-config.sh +cd ../../ +# now in CLAIMS_HOME -if [ "$1" = "all" ]; then -for slave in $slaves -do - ssh $user@$slave "$deploypath/stop-slave.sh>/dev/null 2>&1 &" - - scp $CLAIMS_HOME/install/claimsserver $user@$slave:$deploypath - scp $CLAIMS_HOME/sbin/2-claims-conf/config-$slave $user@$slave:$deploypath - scp $CLAIMS_HOME/sbin/slave-scripts/start-slave.sh $user@$slave:$deploypath - scp $CLAIMS_HOME/sbin/slave-scripts/stop-slave.sh $user@$slave:$deploypath -done -else -for slave in $slaves +./sbin/stop-all.sh>/dev/null 2>&1 + +echo -e "\033[36m<$claimshome>\033[0m" + +for node in $master $slaves do - scp $CLAIMS_HOME/sbin/2-claims-conf/config-$slave $user@$slave:$deploypath -done -fi + echo -e "\033[36m<-$node->\033[0m" + + if [ "$1" = "" ] || [ "$1" = "exec" ]; then + ssh -f -n -l $user $node "if [ ! -d '$claimshome/sbin' ]; then mkdir -p '$claimshome/sbin'; fi; exit" + ssh -f -n -l $user $node "if [ ! -d '$claimshome/install' ]; then mkdir -p '$claimshome/install'; fi; exit" + scp install/claimsserver $user@$node:$claimshome/install + scp install/client $user@$node:$claimshome/install + scp install/test $user@$node:$claimshome/install + scp sbin/*.sh $user@$node:$claimshome/sbin + fi + + if [ "$1" = "" ] || [ "$1" = "config" ]; then + ssh -f -n -l $user $node "if [ ! -d '$claimshome/sbin/2-claims-conf' ]; then mkdir -p '$claimshome/sbin/2-claims-conf'; fi; exit" + scp -r sbin/2-claims-conf/cluster.config $user@$node:$claimshome/sbin/2-claims-conf + scp -r sbin/2-claims-conf/*.sh $user@$node:$claimshome/sbin/2-claims-conf + scp -r sbin/2-claims-conf/config-$node $user@$node:$claimshome/sbin/2-claims-conf + fi + + if [ "$1" = "" ] || [ "$1" = "test" ]; then + ssh -f -n -l $user $node "if [ ! -d '$claimshome/sbin/claims-test' ]; then mkdir -p '$claimshome/sbin/claims-test'; fi; exit" + scp -r sbin/claims-test/*.sh $user@$node:$claimshome/sbin/claims-test + ssh -f -n -l $user $node "if [ ! -d '$claimshome/sbin/claims-test/testcase' ]; then mkdir -p '$claimshome/sbin/claims-test/testcase'; fi; exit" + scp -r sbin/claims-test/testcase/* $user@$node:$claimshome/sbin/claims-test/testcase + ssh -f -n -l $user $node "if [ ! -d '$claimshome/sbin/claims-test/monitor' ]; then mkdir -p '$claimshome/sbin/claims-test/monitor'; fi; exit" + scp -r sbin/claims-test/monitor/* $user@$node:$claimshome/sbin/claims-test/monitor + fi + +done diff --git a/sbin/4-stop-all.sh b/sbin/4-stop-all.sh deleted file mode 100755 index 0766c8d2e..000000000 --- a/sbin/4-stop-all.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/sh -if [ ! -f "${0##*/}" ]; then - echo "please run script in sbin/ directory!" - exit 1 -fi - -cd $CLAIMS_HOME/sbin/2-claims-conf/ -source ./load-config.sh -cd ../ - -for slave in $slaves -do -ssh $user@$slave "$deploypath/stop-slave.sh>/dev/null 2>&1" & -echo -e "$slave claimsserver stop [\033[32mOK\033[0m]" -done - -############################## -# master stop claimsserver # -############################## -if [ "$1" = "all" ]; then - -claimspids=`ps x | grep -w $CLAIMS_HOME/install/claimsserver | grep -v grep | awk '{print $1}'` -if [ "$claimspids" != "" ]; then -for claimspid in $claimspids -do -echo "claimsserver master pid : [$claimspid]" -kill -9 $claimspid -done -fi - -clientpids=`ps x | grep -w $CLAIMS_HOME/install/client | grep -v grep | awk '{print $1}'` -if [ "$clientpids" != "" ]; then -for clientpid in $clientpids -do -echo "claims client pid : [$clientpid]" -kill -9 $clientpid -done -fi - -else -if [ -f "$runclaimsprocid" ]; then -claimspids=`sed '/^claimsserver=/!d;s/.*=//' $runclaimsprocid` -if [ "$claimspids" != "" ]; then -echo "claimsserver master pid : [$claimspids]" -kill -9 $claimspids -fi -rm -f $runclaimsprocid -fi -fi diff --git a/sbin/5-start-all.sh b/sbin/5-start-all.sh deleted file mode 100755 index b6f1652f2..000000000 --- a/sbin/5-start-all.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/sh - -if [ ! -f "${0##*/}" ]; then - echo "please run script in sbin/ directory!" - exit 1 -fi - -cd $CLAIMS_HOME/sbin/2-claims-conf/ -source ./load-config.sh - -# master config -configfile=$CLAIMS_HOME/sbin/2-claims-conf/config-$master - - -####################################### -# start cluster # -####################################### - -cd $CLAIMS_HOME -cd install -ulimit -c unlimited - -if [ ! -d "$logfilepath" ]; then - mkdir -p "$logfilepath" -fi - -thislog=$logfilepath/claimsserver.$(date +%Y-%m-%d).log - -# start master firstly -if [ -f "$runclaimsprocid" ]; then -claimspids=`sed '/^claimsserver=/!d;s/.*=//' $runclaimsprocid` -if [ "$claimspids" != "" ]; then - echo -e "\033[31m claimsserver is already running with process pid:[$claimspids] - please run script: ./4-stop-all.sh to stop it firstly!\033[0m" - exit -fi -fi -echo "-------------------------------" -echo "configfile: [$configfile]" -echo "-------------------------------" -datestr=`date '+%Y-%m-%d %H:%M:%S'` -thisstartstr="========run claimsserver time: $datestr========" -echo $thisstartstr -echo $thisstartstr >> $thislog -echo -e "\033[31m`pwd`\033[0m" -$CLAIMS_HOME/install/claimsserver -c $configfile >> $thislog & -echo "claimsserver=$!" > $runclaimsprocid -echo -e "master start claimsserver [\033[32mOK\033[0m]" - -# start slaves -for slave in $slaves -do -{ - ssh $user@$slave "$deploypath/start-slave.sh config-$slave>/dev/null 2>&1" & - echo -e "$slave claimsserver start [\033[32mOK\033[0m]" -} -done - diff --git a/sbin/claims-test/1-run-process.sh b/sbin/claims-test/1-run-process.sh new file mode 100755 index 000000000..1e6c5de20 --- /dev/null +++ b/sbin/claims-test/1-run-process.sh @@ -0,0 +1,77 @@ +#!/bin/sh +########################################################################################### +#default param # +#./1-run-process.sh --concurrency=3 --testsuit=testsuit1 --resultpath=sf1(作为结果文件夹前缀)# +# --Isprocessresult=1 # +########################################################################################### +concurrency_count=1 +testsuit="." +resultpath="sf1" +processresult=0 + +var=$* +for i in $var +do + if [[ $i = --concurrency=* ]] + then + concurrency_count=${i##*=} + elif [[ $i = --testsuit=* ]] + then + testsuit=${i##*=} + elif [[ $i = --resultpath=* ]] + then + resultpath=${i##*=} + elif [[ $i = --Isprocessresult=* ]] + then + processresult=${i##*=} + fi +done + +set -e +cd $CLAIMS_HOME/sbin/2-claims-conf/ +source ./load-config.sh +################## +# start test # +################## + cd $CLAIMS_HOME + cd install + ulimit -c unlimited + +echo "concurrency_count=[$concurrency_count]" + +cd $CLAIMS_HOME/sbin/claims-test + +filename=${resultpath}_`date '+%Y-%m-%d-%H%M%S'` +mkdir ./testresult/$filename + +tests=`find ./testcase/${testsuit} -maxdepth 1 -name "*.test"` +for test in $tests +do + resultfile=${test##*/} + result=${resultfile%.*} + for((cur=1;cur<=concurrency_count;cur++)) + do + { + datestr=`date '+%Y-%m-%d %H:%M:%S'` + thisstartstr="========run test:[$result]-[$cur] time: $datestr========" + echo -e "\033[33m$thisstartstr\033[0m" + $CLAIMS_HOME/install/client $master $client_listener_port < $test > ./testresult/${filename}/${result}_${cur}.result + sleep 1 + }& + done + wait +done + +if [ $processresult = 1 ] + then + echo -e "\033[33m======start process result\033[0m" + ./process_claims_result.sh ${filename} + echo -e "\033[33m======end process result\033[0m" + wait +else +echo "not process result" +fi + + + + diff --git a/sbin/claims-test/2-compare.sh b/sbin/claims-test/2-compare.sh new file mode 100755 index 000000000..5a10243b0 --- /dev/null +++ b/sbin/claims-test/2-compare.sh @@ -0,0 +1,20 @@ +#!/bin/sh +############## +# $1 = folder# +############## +folder=$1 +cd ./testresult/$folder +results=`find . -maxdepth 1 -name "*.result"` + +touch compareresult + +for result in $results +do + format=${result%_*} + realformat=${format##*/} + diff $result ../../hawq_r/${realformat}_${folder%%_*}.result + if [ $? -ne 0 ] ; then + echo "$result" >> ./compareresult + fi +done + diff --git a/sbin/claims-test/analysis.sh b/sbin/claims-test/analysis.sh new file mode 100755 index 000000000..f73c8c0c0 --- /dev/null +++ b/sbin/claims-test/analysis.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +testname=concert +num=$2 +vimdiff $testname-$1-$num.result $testname-$1-$[num+1].result $testname-$1-$[num+2].result $testname-$1-$[num+3].result diff --git a/sbin/claims-test/auto-gtestfor.sh b/sbin/claims-test/auto-gtestfor.sh new file mode 100755 index 000000000..631375fc0 --- /dev/null +++ b/sbin/claims-test/auto-gtestfor.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +source /home/claims/.bashrc + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../2-claims-conf +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +cd sbin/claims-test/ + rm testresult/* -rf + starttime=$(date '+%Y-%m-%d %H:%M:%S') + ./claimstestnr.sh 1 1 gtestfor >> auto-gtestfor.log + echo "start time:$starttime" + echo "end time:$(date '+%Y-%m-%d %H:%M:%S')" +cd ../../ diff --git a/sbin/claims-test/claimsforgz.sh b/sbin/claims-test/claimsforgz.sh new file mode 100755 index 000000000..43b0548d5 --- /dev/null +++ b/sbin/claims-test/claimsforgz.sh @@ -0,0 +1,17 @@ +#!/bin/sh +set -e +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR + +cd ../2-claims-conf/ +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +# for debug begin ##### +cd install +ulimit -c unlimited +cd ../ +# for debug end ####### +./install/claimsserver -c ./sbin/2-claims-conf/config-$master > /dev/null 2>&1 & +echo "[$(date '+%Y-%m-%d %H:%M:%S')] claimsserver=$!" >> guizhoutest.log diff --git a/sbin/claims-test/claimsserver.sh b/sbin/claims-test/claimsserver.sh new file mode 100755 index 000000000..a7c2ede14 --- /dev/null +++ b/sbin/claims-test/claimsserver.sh @@ -0,0 +1,18 @@ +#!/bin/sh +set -e +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR + +cd ../2-claims-conf/ +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +# for debug begin ##### +cd install +ulimit -c unlimited +cd ../ +# for debug end ####### +serverpath=`pwd` +echo $serverpath +$serverpath/install/claimsserver -c $serverpath/sbin/2-claims-conf/config-$master diff --git a/sbin/claims-test/claimstest.sh b/sbin/claims-test/claimstest.sh index 6120bb9e8..6faef407b 100755 --- a/sbin/claims-test/claimstest.sh +++ b/sbin/claims-test/claimstest.sh @@ -1,14 +1,23 @@ #!/bin/sh + +if [ ! -f "${0##*/}" ]; then + echo "please run script in sbin/claims-test/ directory!" + exit 1 +fi + set -e -cd $CLAIMS_HOME/sbin/2-claims-conf/ +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../2-claims-conf source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME -################## -# start test # -################## - cd $CLAIMS_HOME - cd install - ulimit -c unlimited +# for debug begin ##### +cd install +ulimit -c unlimited +cd ../ +# for debug end ####### if [ "$1" = "" ]; then runloops=1 @@ -26,52 +35,29 @@ if [ "$3" = "" ]; then echo "please input test case as the third param." exit 1 fi +echo "loops=[$runloops];""concurrency=[$concurrency_count]" -echo "runloops=[$runloops]" -echo "concurrency_count=[$concurrency_count]" - -thislog=$logfilepath/client.$(date +%Y-%m-%d).log - -testmode="userresult" - -if [ "$testmode" = "userresult" ]; then -cd $CLAIMS_HOME/sbin/claims-test -for((loop=1;loop<=runloops;loop++)) -do -{ - for((cur=1;cur<=concurrency_count;cur++)) - do - { - datestr=`date '+%Y-%m-%d %H:%M:%S'` - thisstartstr="========run test:[$3] [$loop-$cur] time: $datestr========" - echo -e "\033[33m$thisstartstr\033[0m" -# echo $thisstartstr >> $thislog - $CLAIMS_HOME/install/client $master $client_listener_port < $CLAIMS_HOME/sbin/claims-test/testcase/$3.test > $CLAIMS_HOME/sbin/claims-test/testresult/$3-$cur.result - sleep 1 +thislog=$logpath/client.$(date +%Y-%m-%d).log +cd sbin/claims-test +if [ ! -d "testresult" ]; then + mkdir -p "testresult" +fi +cd ../../ - }& - done - wait -} -done -else for((loop=1;loop<=runloops;loop++)) do { for((cur=1;cur<=concurrency_count;cur++)) do { - datestr=`date '+%Y-%m-%d %H:%M:%S'` - thisstartstr="========run client [$loop-$cur] time: $datestr========" + thisstartstr="========run test:[$3] [$loop-$cur] time:[$datestr]========" echo -e "\033[33m$thisstartstr\033[0m" - echo $thisstartstr >> $thislog - ./client 127.0.0.1 11000 < /home/claims/jenkins-scripts/claims-test/testcase/decimal.test >> $thislog + ./install/client $master $client_listener_port < ./sbin/claims-test/testcase/$3.test > ./sbin/claims-test/testresult/$3-$loop-$cur-$(date '+%Y-%m-%d-%H%M%S').result sleep 1 - }& done wait + echo -e "\033[36mtest loop:[$loop] end time:[`date '+%Y-%m-%d %H:%M:%S'`]\033[0m" } done -fi diff --git a/sbin/claims-test/claimstestnr.sh b/sbin/claims-test/claimstestnr.sh new file mode 100755 index 000000000..c180cec8a --- /dev/null +++ b/sbin/claims-test/claimstestnr.sh @@ -0,0 +1,63 @@ +#!/bin/sh + +if [ ! -f "${0##*/}" ]; then + echo "please run script in sbin/claims-test/ directory!" + exit 1 +fi + +set -e +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../2-claims-conf +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +# for debug begin ##### +cd install +ulimit -c unlimited +cd ../ +# for debug end ####### + +if [ "$1" = "" ]; then + runloops=1 +else + runloops=$1 +fi + +if [ "$2" = "" ]; then + concurrency_count=1 +else + concurrency_count=$2 +fi + +if [ "$3" = "" ]; then + echo "please input test case as the third param." + exit 1 +fi +echo "loops=[$runloops];""concurrency=[$concurrency_count]" + +thislog=$logpath/client.$(date +%Y-%m-%d).log +cd sbin/claims-test +if [ ! -d "testresult" ]; then + mkdir -p "testresult" +fi +cd ../../ + +for((loop=1;loop<=runloops;loop++)) +do +{ + for((cur=1;cur<=concurrency_count;cur++)) + do + { + datestr=`date '+%Y-%m-%d %H:%M:%S'` + thisstartstr="========run test:[$3] [$loop-$cur] time:[$datestr]========" + echo -e "\033[33m$thisstartstr\033[0m" + ./install/client $master $client_listener_port < ./sbin/claims-test/testcase/$3.test > /dev/null 2>&1 + sleep 1 + }& + done + wait + echo -e "\033[36mtest loop:[$loop] end time:[`date '+%Y-%m-%d %H:%M:%S'`]\033[0m" +} +done diff --git a/sbin/claims-test/client.sh b/sbin/claims-test/client.sh new file mode 100755 index 000000000..f06611cf9 --- /dev/null +++ b/sbin/claims-test/client.sh @@ -0,0 +1,17 @@ +#!/bin/sh +set -e +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR + +cd ../2-claims-conf/ +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +# for debug begin ##### +cd install +ulimit -c unlimited +cd ../ +# for debug end ####### + +./install/client $master $client_listener_port diff --git a/sbin/claims-test/monitor-gtest.sh b/sbin/claims-test/monitor-gtest.sh deleted file mode 100755 index 66d2e68e4..000000000 --- a/sbin/claims-test/monitor-gtest.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh - -cd $CLAIMS_HOME/sbin/2-claims-conf -source ./load-config.sh - -while [ 1 ] -do - procid=`pgrep claimsserver` - if [ "$procid" = "" ]; then - echo "claimsserver is aborted. Try to restart..." - cd $CLAIMS_HOME/sbin - ./4-stop-all.sh - if [ -d "$CLAIMS_HOME/install" ]; then - cd $CLAIMS_HOME/install - if [ ! -f "claimsserver" ]; then - cd $CLAIMS_HOME/sbin - ./1-compile.sh - fi - else - cd $CLAIMS_HOME/sbin - ./1-compile.sh - fi - cd $CLAIMS_HOME/sbin - ./5-start-all.sh - sleep 3 - else - echo "claimsserver is running..." - cd $CLAIMS_HOME/install - ./test --ip $master --port $client_listener_port - fi -done diff --git a/sbin/claims-test/monitor-test.sh b/sbin/claims-test/monitor-test.sh deleted file mode 100755 index 194128dae..000000000 --- a/sbin/claims-test/monitor-test.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh - -cd $CLAIMS_HOME/sbin/2-claims-conf -source ./load-config.sh - -while [ 1 ] -do - procid=`pgrep claimsserver` - if [ "$procid" = "" ]; then - echo "claimsserver is aborted. Try to restart..." - cd $CLAIMS_HOME/sbin - ./4-stop-all.sh - if [ "$local_disk_mode" = "1" ]; then - rm $data* - fi - if [ -d "$CLAIMS_HOME/install" ]; then - cd $CLAIMS_HOME/install - if [ ! -f "claimsserver" ]; then - cd $CLAIMS_HOME/sbin - ./1-compile.sh - fi - else - cd $CLAIMS_HOME/sbin - ./1-compile.sh - fi - cd $CLAIMS_HOME/sbin - ./5-start-all.sh - sleep 3 - else - echo "claimsserver is running..." - cd $CLAIMS_HOME/sbin/claims-test/ - ./claimstest.sh 1 3 decimal - fi -done diff --git a/sbin/claims-test/monitor/monitor-concert.sh b/sbin/claims-test/monitor/monitor-concert.sh new file mode 100755 index 000000000..05334b630 --- /dev/null +++ b/sbin/claims-test/monitor/monitor-concert.sh @@ -0,0 +1,42 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../../2-claims-conf +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +while [ 1 ] +do + procid=`ps x | grep -w ./install/claimsserver | grep -v grep | awk '{print $1}'` + if [ "$procid" = "" ]; then + echo "claimsserver is aborted. Try to restart..." + ./sbin/stop-all.sh + if [ "$local_disk_mode" = "1" ]; then + rm $data* + fi + if [ -d "install" ]; then + if [ ! -f "install/claimsserver" ]; then + ./sbin/1-compile.sh + fi + else + ./sbin/1-compile.sh + fi + ./sbin/3-deploy.sh + ./sbin/start-all.sh + sleep 3 + else + echo "claimsserver is running..." + cd sbin/claims-test/ + read -p "Hit the ENTER |__>" tempuseless + echo $tempuseless + #./claimstest.sh 1 1 concertdroptable + #./claimstest.sh 1 1 concertcreatetable + #./claimstest.sh 1 1 concertload50wdata + #./claimstest.sh 1 100 concert + ./claimstest.sh 100 1 cancel + cd ../../ + # now in CLAIMS_HOME + fi +done diff --git a/sbin/claims-test/monitor/monitor-gtest.sh b/sbin/claims-test/monitor/monitor-gtest.sh new file mode 100755 index 000000000..b0668114b --- /dev/null +++ b/sbin/claims-test/monitor/monitor-gtest.sh @@ -0,0 +1,36 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../../2-claims-conf +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +while [ 1 ] +do + procid=`ps x | grep -w ./install/claimsserver | grep -v grep | awk '{print $1}'` + if [ "$procid" = "" ]; then + echo "claimsserver is aborted. Try to restart..." + ./sbin/stop-all.sh + if [ "$local_disk_mode" = "1" ]; then + rm $data* + fi + if [ -d "install" ]; then + if [ ! -f "install/claimsserver" ]; then + ./sbin/1-compile.sh + fi + else + ./sbin/1-compile.sh + fi + ./sbin/3-deploy.sh + ./sbin/start-all.sh + sleep 3 + cd sbin/claims-test + ./claimstest.sh 1 1 load_tpch_sf1_1p + cd ../../ + else + echo "claimsserver is running..." + ./install/test --ip $master --port $client_listener_port + fi +done diff --git a/sbin/claims-test/monitor/monitor-guizhou-90s.sh b/sbin/claims-test/monitor/monitor-guizhou-90s.sh new file mode 100755 index 000000000..5b4deacef --- /dev/null +++ b/sbin/claims-test/monitor/monitor-guizhou-90s.sh @@ -0,0 +1,33 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../../2-claims-conf +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME +while [ 1 ] +do + procid=`ps x | grep -w ./install/claimsserver | grep -v grep | awk '{print $1}'` + if [ "$procid" = "" ]; then + ((loops++)) + echo "claimsserver is aborted [$(date '+%Y-%m-%d %H:%M:%S')] [$loops] times. Try to restart..." >> ./sbin/claims-test/gtestfor.loops + echo "claimsserver is aborted [$(date '+%Y-%m-%d %H:%M:%S')] [$loops] times. Try to restart..." >> ./sbin/claims-test/gtestfor.log +# ./sbin/claims-test/claimsforgz.sh > /dev/null 2>&1 & + ./sbin/stop-all.sh + sleep 3 + ./sbin/start-all.sh + sleep 3 + ttimes=0 + else + ((ttimes++)) + echo "[$ttimes][$(date '+%Y-%m-%d %H:%M:%S')] claimsserver is running..." + echo "[$ttimes][$(date '+%Y-%m-%d %H:%M:%S')] claimsserver is running..." >> ./sbin/claims-test/gtestfor.loops + cd sbin/claims-test/ +# read -p "Hit the ENTER |__>" tempuseless +# echo $tempuseless + ./claimstestnr.sh 1 20 gtestfor >> gtestfor.log + cd ../../ + # now in CLAIMS_HOME + fi +done diff --git a/sbin/claims-test/monitor/monitor-guizhou.sh b/sbin/claims-test/monitor/monitor-guizhou.sh new file mode 100755 index 000000000..35a08874d --- /dev/null +++ b/sbin/claims-test/monitor/monitor-guizhou.sh @@ -0,0 +1,26 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../../2-claims-conf +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME +while [ 1 ] +do + procid=`ps x | grep -w ./install/claimsserver | grep -v grep | awk '{print $1}'` + if [ "$procid" = "" ]; then + ((loops++)) + echo "claimsserver is aborted [$(date '+%Y-%m-%d %H:%M:%S')] [$loops] times. Try to restart..." >> guizhoutest.log + ./sbin/claims-test/claimsforgz.sh > /dev/null 2>&1 & + sleep 3 + else + echo "claimsserver is running..." + cd sbin/claims-test/ +# read -p "Hit the ENTER |__>" tempuseless +# echo $tempuseless + ./claimstest.sh 1 1 huishuiQ2 + cd ../../ + # now in CLAIMS_HOME + fi +done diff --git a/sbin/claims-test/monitor/monitor_loader.py b/sbin/claims-test/monitor/monitor_loader.py new file mode 100755 index 000000000..42c4cfaba --- /dev/null +++ b/sbin/claims-test/monitor/monitor_loader.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# Filename : monitor_loader.py +# +# This script must be running on master + +usage = u''' +this scripts main role is to load data into +different data path, eg: + /home/claims/data/tpc-h/sf1/1partition + /home/claims/data/tpc-h/sf1/4partition + /home/claims/data/tpc-h/sf1/8partition + /home/claims/data/tpc-h/sf10/1partition + /home/claims/data/tpc-h/sf10/4partition + /home/claims/data/tpc-h/sf10/8partition + /home/claims/data/tpc-h/sf100/1partition + /home/claims/data/tpc-h/sf100/4partition + /home/claims/data/tpc-h/sf100/8partition +and different disk mode, eg: + local disk + hdfs +while we load data into these place, we should +first modify the config file of claims: + sbin/2-claims-conf/cluster.config +then deploy it, call script: + sbin/3-deploy.sh +then restart claimsserver: + sbin/start-all.sh +then start client and load the data, this step will call script: + sbin/claims-test/claimstest.sh 1 1 +here we do all these automatically in this scripts. +and you can alse add your own load path and ddl into this scripts later. +''' +import os +import sys +import ConfigParser +import subprocess +import time + +''' + local_disk_mode + 0 : hdfs mode + 1 : local disk mode + data + the path for claims to store data + case + first: loop times + second: concurrency + third: test case in sbin/claims-test/testcase/ + the case tuple can contain numerous object +''' +''' +mode_data_case=( + (0, "/home/claims/data/tpc-h/sf1/1partition/", ((1, 1, "ddl_tpch_sf1_1partition"),)) + ,(0, "/home/claims/data/tpc-h/sf1/4partition/", ((1, 1, "ddl_tpch_sf1_4partition"),)) + ,(0, "/home/claims/data/tpc-h/sf1/8partition/", ((1, 1, "ddl_tpch_sf1_8partition"),)) + ,(0, "/home/claims/data/tpc-h/sf10/1partition/", ((1, 1, "ddl_tpch_sf10_1partition"),)) + ,(0, "/home/claims/data/tpc-h/sf10/4partition/", ((1, 1, "ddl_tpch_sf10_4partition"),)) + ,(0, "/home/claims/data/tpc-h/sf10/8partition/", ((1, 1, "ddl_tpch_sf10_8partition"),)) + ,(0, "/home/claims/data/tpc-h/sf100/1partition/", ((1, 1, "ddl_tpch_sf100_1partition"),)) + ,(0, "/home/claims/data/tpc-h/sf100/4partition/", ((1, 1, "ddl_tpch_sf100_4partition"),)) + ,(0, "/home/claims/data/tpc-h/sf100/8partition/", ((1, 1, "ddl_tpch_sf100_8partition"),)) + ,(1, "/home/claims/data/tpc-h/sf1/1partition/", ((1, 1, "ddl_tpch_sf1_1partition"),)) + ,(1, "/home/claims/data/tpc-h/sf1/4partition/", ((1, 1, "ddl_tpch_sf1_4partition"),)) + ,(1, "/home/claims/data/tpc-h/sf1/8partition/", ((1, 1, "ddl_tpch_sf1_8partition"),)) + ,(1, "/home/claims/data/tpc-h/sf10/1partition/", ((1, 1, "ddl_tpch_sf10_1partition"),)) + ,(1, "/home/claims/data/tpc-h/sf10/4partition/", ((1, 1, "ddl_tpch_sf10_4partition"),)) + ,(1, "/home/claims/data/tpc-h/sf10/8partition/", ((1, 1, "ddl_tpch_sf10_8partition"),)) + ,(1, "/home/claims/data/tpc-h/sf100/1partition/", ((1, 1, "ddl_tpch_sf100_1partition"),)) + ,(1, "/home/claims/data/tpc-h/sf100/4partition/", ((1, 1, "ddl_tpch_sf100_4partition"),)) + ,(1, "/home/claims/data/tpc-h/sf100/8partition/", ((1, 1, "ddl_tpch_sf100_8partition"),)) + ) +''' + +mode_data_case=( + + (1, "/home/claims/data/tpc-h/sf1/1partition/", ((1, 1, "tpc_sql_1"), (1, 1, "tpc_sql_3"), (1, 1, "tpc_sql_5"), (1, 1, "tpc_sql_6"), (1, 1, "tpc_sql_10"), (1, 1, "tpc_sql_12"), (1, 1, "tpc_sql_13"), (1, 1, "tpc_sql_17"),)) + ,(1, "/home/claims/data/tpc-h/sf1/4partition/", ((1, 1, "tpc_sql_1"), (1, 1, "tpc_sql_3"), (1, 1, "tpc_sql_5"), (1, 1, "tpc_sql_6"), (1, 1, "tpc_sql_10"), (1, 1, "tpc_sql_12"), (1, 1, "tpc_sql_13"), (1, 1, "tpc_sql_17"),)) + ,(1, "/home/claims/data/tpc-h/sf1/8partition/", ((1, 1, "tpc_sql_1"), (1, 1, "tpc_sql_3"), (1, 1, "tpc_sql_5"), (1, 1, "tpc_sql_6"), (1, 1, "tpc_sql_10"), (1, 1, "tpc_sql_12"), (1, 1, "tpc_sql_13"), (1, 1, "tpc_sql_17"),)) + ,(1, "/home/claims/data/tpc-h/sf10/1partition/", ((1, 1, "tpc_sql_1"), (1, 1, "tpc_sql_3"), (1, 1, "tpc_sql_5"), (1, 1, "tpc_sql_6"), (1, 1, "tpc_sql_10"), (1, 1, "tpc_sql_12"), (1, 1, "tpc_sql_13"), (1, 1, "tpc_sql_17"),)) + ,(1, "/home/claims/data/tpc-h/sf10/4partition/", ((1, 1, "tpc_sql_1"), (1, 1, "tpc_sql_3"), (1, 1, "tpc_sql_5"), (1, 1, "tpc_sql_6"), (1, 1, "tpc_sql_10"), (1, 1, "tpc_sql_12"), (1, 1, "tpc_sql_13"), (1, 1, "tpc_sql_17"),)) + ,(1, "/home/claims/data/tpc-h/sf10/8partition/", ((1, 1, "tpc_sql_1"), (1, 1, "tpc_sql_3"), (1, 1, "tpc_sql_5"), (1, 1, "tpc_sql_6"), (1, 1, "tpc_sql_10"), (1, 1, "tpc_sql_12"), (1, 1, "tpc_sql_13"), (1, 1, "tpc_sql_17"),)) + ,(1, "/home/claims/data/tpc-h/sf100/1partition/", ((1, 1, "tpc_sql_1"), (1, 1, "tpc_sql_3"), (1, 1, "tpc_sql_5"), (1, 1, "tpc_sql_6"), (1, 1, "tpc_sql_10"), (1, 1, "tpc_sql_12"), (1, 1, "tpc_sql_13"), (1, 1, "tpc_sql_17"),)) + ,(1, "/home/claims/data/tpc-h/sf100/4partition/", ((1, 1, "tpc_sql_1"), (1, 1, "tpc_sql_3"), (1, 1, "tpc_sql_5"), (1, 1, "tpc_sql_6"), (1, 1, "tpc_sql_10"), (1, 1, "tpc_sql_12"), (1, 1, "tpc_sql_13"), (1, 1, "tpc_sql_17"),)) + ,(1, "/home/claims/data/tpc-h/sf100/8partition/", ((1, 1, "tpc_sql_1"), (1, 1, "tpc_sql_3"), (1, 1, "tpc_sql_5"), (1, 1, "tpc_sql_6"), (1, 1, "tpc_sql_10"), (1, 1, "tpc_sql_12"), (1, 1, "tpc_sql_13"), (1, 1, "tpc_sql_17"),)) + ) + + +claimshome=os.getenv("CLAIMS_HOME") +cf = ConfigParser.ConfigParser() +cf.read("%s/sbin/2-claims-conf/cluster.config"%(claimshome)) +hostname = cf.get("cluster", "master") +user = cf.get("cluster", "user") +config_disk_mode="ssh -f -n -l %s %s \"cd $CLAIMS_HOME/sbin/2-claims-conf; sed -i 's:^local_disk_mode = .*$:local_disk_mode = %s:g' ./cluster.config; exit;\";sleep 1;" +config_data="ssh -f -n -l %s %s \"cd $CLAIMS_HOME/sbin/2-claims-conf; sed -i 's:^data = .*$:data = %s:g' ./cluster.config; exit;\";sleep 1;" +deploy="ssh -f -n -l %s %s \"$CLAIMS_HOME/sbin/3-deploy.sh config; exit;\";" +startall="%s/sbin/start-all.sh;" +stopall="%s/sbin/stop-all.sh;" +runtest="cd %s/sbin/claims-test; ./claimstest.sh %d %d %s;" + +def autotest(): + for i in mode_data_case: + command1=config_disk_mode%(user, hostname, i[0])+config_data%(user, hostname, i[1])+deploy%(user, hostname) + print command1 + os.system(command1) + time.sleep(5) + for j in i[2]: + os.system(stopall%claimshome) + time.sleep(25) + os.system(startall%claimshome) + time.sleep(30) + print "test:[%s],loop:[%d],currencuy:[%d]" % (j[2], j[0], j[1]) + os.system(runtest % (claimshome, j[0], j[1], j[2])) + time.sleep(5) + os.system(stopall%claimshome) + time.sleep(5) + +def main(): + if len(sys.argv)>1: + if sys.argv[1]=="--help": + print usage + else: + print "unrecognized option '%s'" % sys.argv[1] + print "use the --help option for usage information" + else: + #print "time sleep 3600s" + #time.sleep(3600) + #print "start auto test" + autotest() + +if __name__=="__main__": + main() + diff --git a/sbin/claims-test/mserver.sh b/sbin/claims-test/mserver.sh new file mode 100755 index 000000000..cc099095a --- /dev/null +++ b/sbin/claims-test/mserver.sh @@ -0,0 +1,34 @@ +#!/bin/sh + + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../2-claims-conf +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +while [ 1 ] +do + procid=`ps x | grep -w ./install/claimsserver | grep -v grep | awk '{print $1}'` + if [ "$procid" = "" ]; then + echo "claimsserver is aborted. Try to restart..." + ./sbin/stop-all.sh +# if [ "$local_disk_mode" = "1" ]; then +# rm $data* +# fi + if [ -d "install" ]; then + if [ ! -f "install/claimsserver" ]; then + ./sbin/1-compile.sh + fi + else + ./sbin/1-compile.sh + fi + ./sbin/3-deploy.sh + ./sbin/start-all.sh + sleep 3 + else + echo "claimsserver is running..." + sleep 180 + fi +done diff --git a/sbin/claims-test/performance.py b/sbin/claims-test/performance.py new file mode 100755 index 000000000..0ab42baeb --- /dev/null +++ b/sbin/claims-test/performance.py @@ -0,0 +1,26 @@ +#!/usr/bin/python +import os,sys +import glob + +SQLNAME = [ "tpc_sql_1", "tpc_sql_3", "tpc_sql_5", "tpc_sql_6", "tpc_sql_10", "tpc_sql_12", "tpc_sql_13", "tpc_sql_17", ] + +def listresult(): + with open('./testresult/output.csv', 'wt') as handle: + for sql in SQLNAME: + print sql + handle.writelines(sql + ',\n') + filelst = glob.glob('./testresult/'+sql+'-*') + filelst.sort() + for resfile in filelst: + print resfile + fobj = open(resfile, 'r') + line = fobj.readline() + while line: + if not line.find('tuples') == -1: + print line + handle.writelines(','+line) + line = fobj.readline() + fobj.close() + +if __name__=="__main__": + listresult() diff --git a/sbin/claims-test/process_claims_result.sh b/sbin/claims-test/process_claims_result.sh new file mode 100755 index 000000000..48ef32f7b --- /dev/null +++ b/sbin/claims-test/process_claims_result.sh @@ -0,0 +1,15 @@ +result=$1 +file=`find ./testresult/${result} -maxdepth 1 -name "*.result"` +for i in $file +do +sed -i '1,10d' $i +sed -i '$d' $i +sed -i '$d' $i +sed -i '$d' $i +sed -i '$d' $i +sed -i "/[>]/d " $i +sed -i "1,4d" $i +sed -i 's/ //g' $i +sed -i "s/\.[0-9]*//g" $i +sed -i "s/|//g" $i +done diff --git a/sbin/claims-test/process_hawq_result.sh b/sbin/claims-test/process_hawq_result.sh new file mode 100755 index 000000000..a3ac4c685 --- /dev/null +++ b/sbin/claims-test/process_hawq_result.sh @@ -0,0 +1,10 @@ +file=`find ./hawq_r -maxdepth 1 -name "*.result"` +for i in $file +do +sed -i '1,2d' $i +sed -i '$d' $i +sed -i '$d' $i +sed -i 's/ //g' $i +sed -i "s/\.[0-9]*//g" $i +sed -i "s/|//g" $i +done diff --git a/sbin/claims-test/pstree.sh b/sbin/claims-test/pstree.sh new file mode 100755 index 000000000..bef5301fb --- /dev/null +++ b/sbin/claims-test/pstree.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +outexe=`pidof outexe` +echo $outexe +while [ 1 ] +do + pstree -p $outexe + sleep 1 +done diff --git a/sbin/claims-test/reoom.sh b/sbin/claims-test/reoom.sh new file mode 100755 index 000000000..24ed2bdb2 --- /dev/null +++ b/sbin/claims-test/reoom.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../2-claims-conf/ +source ./load-config.sh +source ./generate-config.sh +cd ../../ +# now in CLAIMS_HOME + +#allcluster="90s190 90s191 90s192 90s193 90s194 90s195 90s196 90s197 90s198 90s199" + +for node in $master $slaves #$allcluster +do +{ + ssh -f -n -l $user $node "pgrep -f ./install/claimsserver | while read PID;do sudo echo -17 > /proc/\$PID/oom_adj;done" + echo -e "$node claimsserver echo -17 to oom_adj [\033[32mOK\033[0m]" +} +done diff --git a/sbin/claims-test/run-concert.sh b/sbin/claims-test/run-concert.sh new file mode 100755 index 000000000..df5d5e81d --- /dev/null +++ b/sbin/claims-test/run-concert.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../2-claims-conf +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +cd sbin/claims-test/ +while [ 1 ] +do + read -p "Hit the ENTER |__>" tempuseless + echo $tempuseless + starttime=$(date '+%Y-%m-%d %H:%M:%S') +# ./claimstest.sh 1 1 concertdroptable +# ./claimstest.sh 1 1 concertcreatetable +# ./claimstest.sh 1 1 concertload50wdata + ./claimstest.sh 1000 20 AggregationLargeGroups + echo "start time:$starttime" + echo "end time:$(date '+%Y-%m-%d %H:%M:%S')" +done +cd ../../ diff --git a/sbin/claims-test/run-gtest.sh b/sbin/claims-test/run-gtest.sh new file mode 100755 index 000000000..646fc7580 --- /dev/null +++ b/sbin/claims-test/run-gtest.sh @@ -0,0 +1,26 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../2-claims-conf +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +starttime=$(date '+%Y-%m-%d %H:%M:%S') +while [ 1 ] +do + echo "claimsserver is running..." + ((loop++)) + echo -e "\033[33m=======[$loop]=time:[`date '+%Y-%m-%d %H:%M:%S'`]========\033[0m" + ./install/test --ip $master --port $client_listener_port + + if [ $? -ne 0 ]; then + echo "test error." + break + fi +done + +echo "loops:$loop" +echo "start time:$starttime" +echo "end time:$(date '+%Y-%m-%d %H:%M:%S')" diff --git a/sbin/claims-test/run-gtestfor.sh b/sbin/claims-test/run-gtestfor.sh new file mode 100755 index 000000000..0aad4df87 --- /dev/null +++ b/sbin/claims-test/run-gtestfor.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../2-claims-conf +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +cd sbin/claims-test/ +while [ 1 ] +do + read -p "Hit the ENTER |__>" tempuseless + echo $tempuseless + starttime=$(date '+%Y-%m-%d %H:%M:%S') +# ./claimstest.sh 1 1 concertdroptable +# ./claimstest.sh 1 1 concertcreatetable +# ./claimstest.sh 1 1 concertload50wdata + ./claimstest.sh 20000 20 gtestfor + echo "start time:$starttime" + echo "end time:$(date '+%Y-%m-%d %H:%M:%S')" +done +cd ../../ diff --git a/sbin/claims-test/run-gtestnr.sh b/sbin/claims-test/run-gtestnr.sh new file mode 100755 index 000000000..b3e3bf37a --- /dev/null +++ b/sbin/claims-test/run-gtestnr.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../2-claims-conf +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +cd sbin/claims-test/ +while [ 1 ] +do + read -p "Hit the ENTER |__>" tempuseless + echo $tempuseless + starttime=$(date '+%Y-%m-%d %H:%M:%S') +# ./claimstest.sh 1 1 concertdroptable +# ./claimstest.sh 1 1 concertcreatetable +# ./claimstest.sh 1 1 concertload50wdata + ./claimstestnr.sh 10000 1 gtestfor + echo "start time:$starttime" + echo "end time:$(date '+%Y-%m-%d %H:%M:%S')" +done +cd ../../ diff --git a/sbin/claims-test/run-gtestsingle.sh b/sbin/claims-test/run-gtestsingle.sh new file mode 100755 index 000000000..e70b75766 --- /dev/null +++ b/sbin/claims-test/run-gtestsingle.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd ../2-claims-conf +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +cd sbin/claims-test/ +while [ 1 ] +do + read -p "Hit the ENTER |__>" tempuseless + echo $tempuseless + starttime=$(date '+%Y-%m-%d %H:%M:%S') +# ./claimstest.sh 1 1 concertdroptable +# ./claimstest.sh 1 1 concertcreatetable +# ./claimstest.sh 1 1 concertload50wdata + ./claimstestnr.sh 30000 30 AggregationLargeGroups + echo "start time:$starttime" + echo "end time:$(date '+%Y-%m-%d %H:%M:%S')" +done +cd ../../ diff --git a/sbin/claims-test/statustracker.sh b/sbin/claims-test/statustracker.sh new file mode 100755 index 000000000..53e6e71d8 --- /dev/null +++ b/sbin/claims-test/statustracker.sh @@ -0,0 +1,35 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR + +cd ../2-claims-conf/ +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +timestr=$(date +%Y-%m-%d) +thisstatus=claimsserver.$timestr.status + +while [ 1 ] +do + + if [ -f "$runclaimsprocid" ]; then + claimspids=`sed '/^claimsserver=/!d;s/.*=//' $runclaimsprocid` + if [ "$claimspids" != "" ]; then + echo "track claimsserver pid : [$claimspids]" + echo "========time:[$(date '+%Y-%m-%d %H:%M:%S')]========" >> $thisstatus + cat /proc/$claimspids/status >> $thisstatus + if [ $? -ne 0 ] ; then + echo "claimsserver is aborted abnormally." + break + fi + fi + sleep 60 + else + echo "claimsserver pid file does not exist." + break + fi + +done + diff --git a/sbin/claims-test/testcase/AggregationLargeGroups.test b/sbin/claims-test/testcase/AggregationLargeGroups.test new file mode 100644 index 000000000..7c8ce426a --- /dev/null +++ b/sbin/claims-test/testcase/AggregationLargeGroups.test @@ -0,0 +1,4 @@ +select row_id from LINEITEM group by row_id; +select L_PARTKEY,count(*) from LINEITEM group by L_PARTKEY; +select L_RETURNFLAG from LINEITEM group by L_RETURNFLAG; +exit; diff --git a/sbin/claims-test/testcase/cancel.test b/sbin/claims-test/testcase/cancel.test new file mode 100644 index 000000000..defc5edd3 --- /dev/null +++ b/sbin/claims-test/testcase/cancel.test @@ -0,0 +1,5 @@ +select count(*) from PART,REGION where REGION.row_id>3; +select count(*) as a from LINEITEM where row_id < 3000000 order by a ; +select count(*),sum(L_QUANTITY) from LINEITEM; +select count(*) from (select row_id from REGION where row_id>33) as a, (select row_id from NATION where row_id>40) as b; +exit; \ No newline at end of file diff --git a/sbin/claims-test/testcase/concert.test b/sbin/claims-test/testcase/concert.test new file mode 100644 index 000000000..79ffc18cf --- /dev/null +++ b/sbin/claims-test/testcase/concert.test @@ -0,0 +1,39 @@ +insert into t1 values("张三", "123456789012345678", 100, "2012-11-05 08:15:29", "2013-03-05 00:00:00", 0); +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where ID = "123456789012345678"; +select * from t1 where ID = "678938285229572993"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where ID = "123456789012345678"; +select * from t1 where ID = "678938285229572993"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where ID = "123456789012345678"; +select * from t1 where ID = "678938285229572993"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where ID = "123456789012345678"; +select * from t1 where ID = "678938285229572993"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where ID = "123456789012345678"; +select * from t1 where ID = "678938285229572993"; +select count(*) from t1; +insert into t2 values("张三", "123456789012345678", 100, "2012-11-05 08:15:29", "2013-03-05 00:00:00", 1); +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where ID = "123456789012345678"; +select * from t2 where ID = "678938285229572993"; +select count(*) from t2; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where ID = "123456789012345678"; +select * from t2 where ID = "678938285229572993"; +select count(*) from t2; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where ID = "123456789012345678"; +select * from t2 where ID = "678938285229572993"; +select count(*) from t2; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where ID = "123456789012345678"; +select * from t2 where ID = "678938285229572993"; +select count(*) from t2; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where ID = "123456789012345678"; +select * from t2 where ID = "678938285229572993"; +select count(*) from t2; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +exit; + diff --git a/sbin/claims-test/testcase/concert2.test b/sbin/claims-test/testcase/concert2.test new file mode 100644 index 000000000..03b8c658b --- /dev/null +++ b/sbin/claims-test/testcase/concert2.test @@ -0,0 +1,37 @@ +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where ID = "123456789012345678"; +select * from t1 where ID = "678938285229572993"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where ID = "123456789012345678"; +select * from t1 where ID = "678938285229572993"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where ID = "123456789012345678"; +select * from t1 where ID = "678938285229572993"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where ID = "123456789012345678"; +select * from t1 where ID = "678938285229572993"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where ID = "123456789012345678"; +select * from t1 where ID = "678938285229572993"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where ID = "123456789012345678"; +select * from t2 where ID = "678938285229572993"; +select count(*) from t2; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where ID = "123456789012345678"; +select * from t2 where ID = "678938285229572993"; +select count(*) from t2; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where ID = "123456789012345678"; +select * from t2 where ID = "678938285229572993"; +select count(*) from t2; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where ID = "123456789012345678"; +select * from t2 where ID = "678938285229572993"; +select count(*) from t2; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where ID = "123456789012345678"; +select * from t2 where ID = "678938285229572993"; +select count(*) from t2; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +exit; + diff --git a/sbin/claims-test/testcase/concert3.test b/sbin/claims-test/testcase/concert3.test new file mode 100644 index 000000000..161be8b77 --- /dev/null +++ b/sbin/claims-test/testcase/concert3.test @@ -0,0 +1,26 @@ +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t1 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t1; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t2; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t2; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t2; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t2; +select name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED from t2 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t2; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +exit; diff --git a/sbin/claims-test/testcase/concert4.test b/sbin/claims-test/testcase/concert4.test new file mode 100644 index 000000000..90c7dcfbf --- /dev/null +++ b/sbin/claims-test/testcase/concert4.test @@ -0,0 +1,26 @@ +select * from t1 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t1; +select * from t1 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t1; +select * from t1 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t1; +select * from t1 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t1; +select * from t1 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t1; +select * from t2 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t2; +select * from t2 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t2; +select * from t2 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t2; +select * from t2 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t2; +select * from t2 where BUY_TICKET_TIME < "2015-12-31 00:00:00" and SHOW_TIME > "2015-12-31 00:00:00"; +select count(*) from t2; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +select count(*) from t1, t2 where t1.ID=t2.ID; +exit; diff --git a/sbin/claims-test/testcase/concertbak.test b/sbin/claims-test/testcase/concertbak.test new file mode 100644 index 000000000..1228131c0 --- /dev/null +++ b/sbin/claims-test/testcase/concertbak.test @@ -0,0 +1,23 @@ +insert into t1 values("张三", "123456789012345678", 100, "2012-11-05 08:15:29", "2013-03-05 00:00:00", 0); +select * from t1 where ID = "123456789012345678"; +select count(*) from t1; +select * from t1 where ID = "123456789012345678"; +select count(*) from t1; +select * from t1 where ID = "123456789012345678"; +select count(*) from t1; +select * from t1 where ID = "123456789012345678"; +select count(*) from t1; +select * from t1 where ID = "123456789012345678"; +select count(*) from t1; +insert into t2 values("张三", "123456789012345678", 100, "2012-11-05 08:15:29", "2013-03-05 00:00:00", 1); +select * from t2 where ID = "123456789012345678"; +select count(*) from t1; +select * from t2 where ID = "123456789012345678"; +select count(*) from t1; +select * from t2 where ID = "123456789012345678"; +select count(*) from t1; +select * from t2 where ID = "123456789012345678"; +select count(*) from t1; +select * from t2 where ID = "123456789012345678"; +select count(*) from t1; +exit; diff --git a/sbin/claims-test/testcase/concertcreatetable.test b/sbin/claims-test/testcase/concertcreatetable.test new file mode 100644 index 000000000..2c56a1136 --- /dev/null +++ b/sbin/claims-test/testcase/concertcreatetable.test @@ -0,0 +1,5 @@ +create table t1(name VARCHAR(10), ID VARCHAR(20), SEAT_ID int, BUY_TICKET_TIME DATETIME, SHOW_TIME DATETIME, CHECKED int); +create projection on t1(name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED) number = 4 partitioned on ID; +create table t2(name VARCHAR(10), ID VARCHAR(20), SEAT_ID int, BUY_TICKET_TIME DATETIME, SHOW_TIME DATETIME, CHECKED int); +create projection on t2(name, ID, SEAT_ID, BUY_TICKET_TIME, SHOW_TIME, CHECKED) number = 4 partitioned on ID; +exit; diff --git a/sbin/claims-test/testcase/concertdroptable.test b/sbin/claims-test/testcase/concertdroptable.test new file mode 100644 index 000000000..0865f28db --- /dev/null +++ b/sbin/claims-test/testcase/concertdroptable.test @@ -0,0 +1,3 @@ +drop table t1; +drop table t2; +exit; diff --git a/sbin/claims-test/testcase/concertload50wdata.test b/sbin/claims-test/testcase/concertload50wdata.test new file mode 100644 index 000000000..5c43d6979 --- /dev/null +++ b/sbin/claims-test/testcase/concertload50wdata.test @@ -0,0 +1,3 @@ +load table t1 from "/home/imdb/rawData/concert/concert.txt" with ',','\n'; +load table t2 from "/home/imdb/rawData/concert/concert.txt" with ',','\n'; +exit; diff --git a/sbin/claims-test/testcase/create_and_drop_table.test b/sbin/claims-test/testcase/create_and_drop_table.test new file mode 100644 index 000000000..1e3dc0561 --- /dev/null +++ b/sbin/claims-test/testcase/create_and_drop_table.test @@ -0,0 +1,112 @@ + +drop table NATION; + +create table NATION( +N_NATIONKEY bigint unsigned, +N_NAME varchar(25), +N_REGIONKEY bigint unsigned, +N_COMMENT varchar(152) +); + +create projection on NATION( +N_NATIONKEY, +N_NAME, +N_REGIONKEY, +N_COMMENT +) number = 4 partitioned on N_NATIONKEY; + +drop table NATION; + +drop table PART; + +create table PART( +P_PARTKEY bigint unsigned, +P_NAME varchar(55), +P_MFGR varchar(25), +P_BRAND varchar(10), +P_TYPE varchar(25), +P_SIZE int, +P_CONTAINER varchar(10), +P_RETAILPRICE decimal(20,4), +P_COMMENT varchar(23) +); + + +drop table PART; + +create table PART( +P_PARTKEY bigint unsigned, +P_NAME varchar(55), +P_MFGR varchar(25), +P_BRAND varchar(10), +P_TYPE varchar(25), +P_SIZE int, +P_CONTAINER varchar(10), +P_RETAILPRICE decimal(20,4), +P_COMMENT varchar(23) +); + +create projection on PART( +P_PARTKEY, +P_NAME, +P_MFGR, +P_BRAND, +P_TYPE, +P_SIZE, +P_CONTAINER, +P_RETAILPRICE, +P_COMMENT +) number = 1 partitioned on P_PARTKEY; + +drop table PART; + +drop table NATION; + +create table NATION( +N_NATIONKEY bigint unsigned, +N_NAME varchar(25), +N_REGIONKEY bigint unsigned, +N_COMMENT varchar(152) +); + +create projection on NATION( +N_NATIONKEY, +N_NAME, +N_REGIONKEY, +N_COMMENT +) number = 4 partitioned on N_NATIONKEY; + + +load table NATION from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl" with '|','\n'; + +select * from NATION; + + +drop table PART; + +create table PART( +P_PARTKEY bigint unsigned, +P_NAME varchar(55), +P_MFGR varchar(25), +P_BRAND varchar(10), +P_TYPE varchar(25), +P_SIZE int, +P_CONTAINER varchar(10), +P_RETAILPRICE decimal(20,4), +P_COMMENT varchar(23) +); + +create projection on PART( +P_PARTKEY, +P_NAME, +P_MFGR, +P_BRAND, +P_TYPE, +P_SIZE, +P_CONTAINER, +P_RETAILPRICE, +P_COMMENT +) number = 1 partitioned on P_PARTKEY; + +drop table PART; + diff --git a/sbin/claims-test/testcase/ddl_tpch_sf1_4partition.test b/sbin/claims-test/testcase/ddl_tpch_sf1_4partition.test index 729b7b7a9..032268983 100644 --- a/sbin/claims-test/testcase/ddl_tpch_sf1_4partition.test +++ b/sbin/claims-test/testcase/ddl_tpch_sf1_4partition.test @@ -58,7 +58,7 @@ PS_SUPPKEY, PS_AVAILQTY, PS_SUPPLYCOST, PS_COMMENT -) number = 1 partitioned on PS_PARTKEY; +) number = 4 partitioned on PS_PARTKEY; load table PARTSUPP from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl" with '|','\n'; create table CUSTOMER( diff --git a/sbin/claims-test/testcase/delete.test b/sbin/claims-test/testcase/delete.test new file mode 100644 index 000000000..7089cfe51 --- /dev/null +++ b/sbin/claims-test/testcase/delete.test @@ -0,0 +1,35 @@ +drop table if exists t1; +create table t1(a int, b int, c int); +create projection on t1(a, b, c) number = 1 partitioned on row_id; +show tables; +desc t1; +insert into t1 values(1, 1, 1); +insert into t1 values(1, 2, 1); +insert into t1 values(1, 3, 1); +insert into t1 values(1, 4, 1); +insert into t1 values(1, 5, 1); +insert into t1 values(1, 6, 1); +insert into t1 values(1, 7, 1); +insert into t1 values(1, 8, 1); +insert into t1 values(1, 9, 1); + +select * from t1; + +delete from t1 where b = 2; +select * from t1; +delete from t1 where b = 3; +select * from t1; +delete from t1 where b = 4; +select * from t1; +delete from t1 where b = 5; +select * from t1; +delete from t1 where b = 6; +select * from t1; +delete from t1 where b = 7; +select * from t1; +delete from t1 where b = 8; +select * from t1; +delete from t1 where b = 9; +select * from t1; + +exit; diff --git a/sbin/claims-test/testcase/equaljoin.test b/sbin/claims-test/testcase/equaljoin.test new file mode 100644 index 000000000..750aca6e9 --- /dev/null +++ b/sbin/claims-test/testcase/equaljoin.test @@ -0,0 +1,2 @@ +select count(*) from LINEITEM,PART where PART.row_id=LINEITEM.row_id; +exit; diff --git a/sbin/claims-test/testcase/full_join.test b/sbin/claims-test/testcase/full_join.test new file mode 100644 index 000000000..82629ef84 --- /dev/null +++ b/sbin/claims-test/testcase/full_join.test @@ -0,0 +1,50 @@ +select count(*) from A full join B on A.id = B.id; +select count(*) from A full join B on A.num = B.name; +select count(*) from A full join B on A.id = B.id AND A.num = B.name; +select count(*) from A full join B on A.strdate = B.strdate; + +select count(*) from LINEITEM FULL join REGION on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join REGION on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join REGION on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join REGION on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join REGION on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join REGION on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join REGION on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join REGION on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join REGION on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join REGION on REGION.R_COMMENT = LINEITEM.L_COMMENT; + +select count(*) from REGION FULL join LINEITEM on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from REGION FULL join LINEITEM on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from REGION FULL join LINEITEM on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from REGION FULL join LINEITEM on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from REGION FULL join LINEITEM on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from REGION FULL join LINEITEM on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from REGION FULL join LINEITEM on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from REGION FULL join LINEITEM on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from REGION FULL join LINEITEM on REGION.R_COMMENT = LINEITEM.L_COMMENT; +select count(*) from REGION FULL join LINEITEM on REGION.R_COMMENT = LINEITEM.L_COMMENT; + +select count(*) from LINEITEM FULL join ORDERS on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join ORDERS on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join ORDERS on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join ORDERS on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join ORDERS on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join ORDERS on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join ORDERS on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join ORDERS on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join ORDERS on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from LINEITEM FULL join ORDERS on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; + +select count(*) from ORDERS FULL join LINEITEM on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from ORDERS FULL join LINEITEM on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from ORDERS FULL join LINEITEM on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from ORDERS FULL join LINEITEM on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from ORDERS FULL join LINEITEM on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from ORDERS FULL join LINEITEM on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from ORDERS FULL join LINEITEM on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from ORDERS FULL join LINEITEM on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from ORDERS FULL join LINEITEM on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; +select count(*) from ORDERS FULL join LINEITEM on ORDERS.O_COMMENT = LINEITEM.L_COMMENT; + +exit; diff --git a/sbin/claims-test/testcase/gtestfor.test b/sbin/claims-test/testcase/gtestfor.test new file mode 100644 index 000000000..5cba470f4 --- /dev/null +++ b/sbin/claims-test/testcase/gtestfor.test @@ -0,0 +1,16 @@ +select count(*) from LINEITEM; +select count(*) from LINEITEM where row_id < 6000000; +select count(*) from LINEITEM where row_id < 3000000; +select count(*) from LINEITEM where row_id < 300; +select count(*),sum(L_QUANTITY) from LINEITEM; +select row_id from LINEITEM group by row_id; +select L_PARTKEY,count(*) from LINEITEM group by L_PARTKEY; +select L_RETURNFLAG from LINEITEM group by L_RETURNFLAG; +select count(*) from PART,LINEITEM where PART.row_id=LINEITEM.row_id; +select count(*) from PART,REGION; +select count(*) from (select row_id from NATION where row_id<3) as a,(select row_id from REGION where row_id=2) as b; +select count(*) from (select row_id from PART) as a, (select row_id from REGION where row_id=222) as b; +select count(*) from (select row_id from REGION where row_id>33) as a,(select row_id from PART) as b; +select count(*) from (select row_id from REGION where row_id>33) as a,(select row_id from NATION where row_id>40) as b; +select count(*) from PART,LINEITEM where PART.row_id%10=1 and LINEITEM.row_id % 10 =1 and PART.row_id = LINEITEM.row_id; +exit; diff --git a/sbin/claims-test/testcase/guizhouDDL.test b/sbin/claims-test/testcase/guizhouDDL.test new file mode 100644 index 000000000..4a5b7d0d1 --- /dev/null +++ b/sbin/claims-test/testcase/guizhouDDL.test @@ -0,0 +1,1090 @@ + +DROP TABLE IF EXISTS `family`; +CREATE TABLE `family` ( +`FamilyID` int(10), +`GroupID` int(10), +`IsPoorFamily` int(10), + `IsImmigrant` int(10), +`MasterName` varchar(255), +`RID` varchar(255), +`HeadCount` int(10), +`Age` int(10), +`Phone` varchar(255) , +`Property` varchar(255) , +`IdentificationStandard` varchar(255), +`PovertyCauses` varchar(255), +`OtherPovertyCauses` varchar(255), +`Income_RMB` decimal(10,2), +`Bank` varchar(255), +`BankAccount` varchar(255), +`PovertyScore` int(10), + `PlanGetRidOfPovertyTime` varchar(32), +`ActualGetRidOfPovertyTime` datetime , +`FourLineStatus` varchar(255), +`HelpCategory` varchar(255), +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), +`UpdateTime` datetime, + `Outcoming` varchar(255), + `Location` varchar(255) +); +CREATE PROJECTION ON `family` ( +`FamilyID`, +`GroupID`, +`IsPoorFamily`, + `IsImmigrant`, +`MasterName`, +`RID`, +`HeadCount`, +`Age`, +`Phone`, +`Property`, +`IdentificationStandard`, +`PovertyCauses`, +`OtherPovertyCauses`, +`Income_RMB`, +`Bank`, +`BankAccount`, +`PovertyScore`, +`PlanGetRidOfPovertyTime`, +`ActualGetRidOfPovertyTime`, +`FourLineStatus`, +`HelpCategory`, +`Creater`, +`CreateTime`, +`Reviser`, +`UpdateTime`, +`Outcoming`, + `Location` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `familygroup`; +CREATE TABLE `familygroup` ( +`GroupID` int(10), +`Name` varchar(255), +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), +`UpdateTime` datetime, +`VillageID` int(10) +); +CREATE PROJECTION ON `familygroup` ( +`GroupID`, +`Name`, +`Creater`, +`CreateTime`, +`Reviser`, +`UpdateTime`, +`VillageID` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `familyimmigration`; +CREATE TABLE `familyimmigration` ( +`FamilyImmigrationID` int(10), +`FamilyID` int(10), +`MoveType` varchar(255), +`IsInPlan` varchar(255), +`WillToMove` varchar(255), + `MoveYear` int(11), +`MoveMode` varchar(255), +`SettlementPlace` varchar(255), +`MoveProblem` varchar(255), +`SettlementMode` varchar(255), +`SettlementArea` decimal(10,2), +`HousingFund` decimal(10,2), +`HousingSubsidy` decimal(10,2), +`DismantleBonus` decimal(10,2), +`SelfFinancing` decimal(10,2), +`PlanMoveTime` datetime, + `ActualMoveTime` varchar(32), +`PlanDismantleTime` datetime, + `ActualDismantleTime` varchar(32), + `PostSupportPlan` varchar(8192), + `SettlementType` varchar(255), +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), +`UpdateTime` datetime +); +CREATE PROJECTION ON `familyimmigration` ( +`FamilyImmigrationID`, +`FamilyID`, +`MoveType`, +`IsInPlan`, +`WillToMove`, + `MoveYear`, +`MoveMode`, +`SettlementPlace`, +`MoveProblem`, +`SettlementMode`, +`SettlementArea`, +`HousingFund`, +`HousingSubsidy`, +`DismantleBonus`, +`SelfFinancing`, +`PlanMoveTime`, + `ActualMoveTime`, +`PlanDismantleTime`, + `ActualDismantleTime`, + `PostSupportPlan`, + `SettlementType`, +`Creater`, +`CreateTime`, +`Reviser`, +`UpdateTime` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `familyincome`; +CREATE TABLE `familyincome` ( +`FamilyIncomeID` int(10), +`FamilyID` int(10), +`A32OutstandingLoan` decimal(10,2), +`N02Annualncome` decimal(10,2), +`A34WageIncome` decimal(10,2), +`N03PropertyIncome` decimal(10,2), +`A35ProductiveIncome` decimal(10,2), +`N01AnnualNetIncome` decimal(10,2), +`A36Subsidy` decimal(10,2), +`N04ProductiveCost` decimal(10,2), +`A36aFamilyPlanningAwards` decimal(10,2), +`A36bMinGovSubsidy` decimal(10,2), +`A36cPensionInsurance` decimal(10,2), +`A36dNCMSMedicalFee` decimal(10,2), +`A36eMedicalAssistanceFund` decimal(10,2), +`A36fEcoCompensation` decimal(10,2), +`EducationalDebt` decimal(10,2), +`IncomeOfEconomicCrop` decimal(10,2), +`OwnedFoodstuff` decimal(10,2), +`FamilyFarmingIncome` decimal(10,2), +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), + `UpdateTime` datetime, + `TransferIncome` decimal(10,2) +); +CREATE PROJECTION ON `familyincome` ( +`FamilyIncomeID`, +`FamilyID`, +`A32OutstandingLoan`, +`N02Annualncome`, +`A34WageIncome`, +`N03PropertyIncome`, +`A35ProductiveIncome`, +`N01AnnualNetIncome`, +`A36Subsidy`, +`N04ProductiveCost`, +`A36aFamilyPlanningAwards`, +`A36bMinGovSubsidy` , +`A36cPensionInsurance`, +`A36dNCMSMedicalFee`, +`A36eMedicalAssistanceFund`, +`A36fEcoCompensation`, +`EducationalDebt`, +`IncomeOfEconomicCrop`, +`OwnedFoodstuff`, +`FamilyFarmingIncome`, +`Creater`, +`CreateTime`, +`Reviser`, + `UpdateTime`, + `TransferIncome` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `familyliving`; +CREATE TABLE `familyliving` ( +`FamilyLivingID` int(10), +`FamilyID` int(10), +`A21HasWater` varchar(255), +`A22IsSafeWater` varchar(255), +`WaterSituation` varchar(255), +`A23HasElectric` varchar(255), +`A24HasRadioAndTV` varchar(255), +`A25DistanceToMainRoad` varchar(255), +`A26RoadType` varchar(255), +`DurableConsumerGood` varchar(255), +`A27HousingArea` decimal(10,2), +`A28IsDangerousBuilding` varchar(255), +`A29HasToilet` varchar(255), +`A30MainFuelType` varchar(255), +`A31HasJoinedFC` varchar(255), +`BulidingStructure` varchar(255), +`RelocationStatus` varchar(255), +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), + `UpdateTime` datetime , + `HouseBaseArea` decimal(10,2) , + `ProductionRoomArea` decimal(10,2) , + `A17CultivatedArea` decimal(10,2) , + `A17aEffectiveIrrigationArea` decimal(10,2) , + `A17bField` decimal(10,2) , + `A17cField` decimal(10,2) , + `A18ForestArea` decimal(10,2) , + `A18aReforestationCultivatedArea` decimal(10,2) , + `A18bFruitTreeArea` decimal(10,2) , + `A19PastureArea` decimal(10,2) , + `A20WaterArea` decimal(10,2) +); +CREATE PROJECTION ON `familyliving` ( +`FamilyLivingID`, +`FamilyID`, +`A21HasWater`, +`A22IsSafeWater`, +`WaterSituation`, +`A23HasElectric`, +`A24HasRadioAndTV`, +`A25DistanceToMainRoad`, +`A26RoadType`, +`DurableConsumerGood`, +`A27HousingArea`, +`A28IsDangerousBuilding`, +`A29HasToilet`, +`A30MainFuelType`, +`A31HasJoinedFC`, +`BulidingStructure`, +`RelocationStatus`, +`Creater`, +`CreateTime`, +`Reviser`, + `UpdateTime`, + `HouseBaseArea`, + `ProductionRoomArea`, + `A17CultivatedArea`, + `A17aEffectiveIrrigationArea`, + `A17bField`, + `A17cField`, + `A18ForestArea`, + `A18aReforestationCultivatedArea`, + `A18bFruitTreeArea`, + `A19PastureArea`, + `A20WaterArea` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `familyperson`; +CREATE TABLE `familyperson` ( +`FamilyPersonID` int(10), +`FamilyID` int(10), +`Name` varchar(255), +`Sex` varchar(255), +`RID` varchar(255), +`Relationship` varchar(255), +`EthnicGroup` varchar(255), +`Education` varchar(255), +`StudentStatus` varchar(255), +`Health` varchar(255), +`WorkCapacity` varchar(255), +`StateOfWork` varchar(255), +`TimeOfWork` varchar(255), +`NCMS` varchar(255), +`ResidentPension` varchar(255), +`AddReason` varchar(255), +`MilitaryService` varchar(255), +`EndownmentInsurance` varchar(255), + `Skill` varchar(255) , +`SkillNumber` int(11), +`IsTrained` varchar(255), +`IncomeOfWork` decimal(10,2), +`TransferJob` varchar(255), + `ChildrenSchooling` varchar(255) , +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), + `UpdateTime` datetime , + `Other` varchar(255) , + `Job` varchar(255) , + `PlanJob` varchar(255) , + `Age` int(10) , + `Phone` varchar(255) +); +CREATE PROJECTION ON `familyperson` ( +`FamilyPersonID`, +`FamilyID`, +`Name`, +`Sex`, +`RID`, +`Relationship`, +`EthnicGroup`, +`Education` , +`StudentStatus`, +`Health`, +`WorkCapacity`, +`StateOfWork`, +`TimeOfWork`, +`NCMS`, +`ResidentPension`, +`AddReason`, +`MilitaryService`, +`EndownmentInsurance`, + `Skill`, +`SkillNumber`, +`IsTrained`, +`IncomeOfWork`, +`TransferJob`, + `ChildrenSchooling`, +`Creater`, +`CreateTime`, +`Reviser`, + `UpdateTime`, + `Other`, + `Job`, + `PlanJob`, + `Age`, + `Phone` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `familypicture`; +CREATE TABLE `familypicture` ( +`PictureID` int(10), +`FamilyID` int(10), +`ArchivesNO` varchar(255) , +`Name` varchar(255), +`Path` varchar(255), +`Type` varchar(255), +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), +`UpdateTime` datetime +); +CREATE PROJECTION ON `familypicture` ( +`PictureID`, +`FamilyID`, +`ArchivesNO`, +`Name`, +`Path`, +`Type`, +`Creater`, +`CreateTime`, +`Reviser`, +`UpdateTime` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `family_project`; +CREATE TABLE `family_project` ( + `FamilyProjectID` int(10), + `FamilyID` int(10) , + `VillageProjectID` int(10) , + `Creater` varchar(255) , + `CreateTime` datetime , + `Reviser` varchar(255) , + `UpdateTime` datetime +); +CREATE PROJECTION ON `family_project` ( + `FamilyProjectID`, + `FamilyID`, + `VillageProjectID`, + `Creater`, + `CreateTime`, + `Reviser`, + `UpdateTime` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `grouppicture`; +CREATE TABLE `grouppicture` ( + `PictureID` int(10), + `GroupID` int(10) , + `Name` varchar(255) , + `Path` varchar(255) , + `Creater` varchar(255) , + `CreateTime` datetime , + `Reviser` varchar(255) , + `UpdateTime` datetime +); +CREATE PROJECTION ON `grouppicture` ( + `PictureID`, + `GroupID`, + `Name`, + `Path`, + `Creater`, + `CreateTime`, + `Reviser`, + `UpdateTime` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `helper`; +CREATE TABLE `helper` ( +`HelperID` int(10), +`HelpOrgID` int(10), +`HelperName` varchar(255), +`HelperPhone` varchar(255), +`HelperTitle` varchar(255), +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), +`UpdateTime` datetime +); +CREATE PROJECTION ON `helper` ( +`HelperID`, +`HelpOrgID`, +`HelperName`, +`HelperPhone`, +`HelperTitle`, +`Creater`, +`CreateTime`, +`Reviser`, +`UpdateTime` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `helperorg_village`; +CREATE TABLE `helperorg_village` ( + `HelperOrgVilliageID` int(10), + `VillageID` int(10) , + `HelpOrgID` int(10) , + `Creater` varchar(255) , + `CreateTime` datetime , + `Reviser` varchar(255) , + `UpdateTime` datetime +); +CREATE PROJECTION ON `helperorg_village` ( + `HelperOrgVilliageID`, + `VillageID`, + `HelpOrgID`, + `Creater`, + `CreateTime`, + `Reviser`, + `UpdateTime` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `helper_family`; +CREATE TABLE `helper_family` ( +`HelperFamilyID` int(10), +`FamilyID` int(10), +`HelperID` int(10), +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), +`UpdateTime` datetime +); +CREATE PROJECTION ON `helper_family` ( +`HelperFamilyID`, +`FamilyID`, +`HelperID`, +`Creater`, +`CreateTime`, +`Reviser`, +`UpdateTime` +) number = 1 partitioned on row_id; + + +DROP TABLE IF EXISTS `helporg`; +CREATE TABLE `helporg` ( +`HelpOrgID` int(10), +`HelpOrgName` varchar(255), +`OrgBelongto` varchar(255), +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), +`UpdateTime` datetime +); +CREATE PROJECTION ON `helporg` ( +`HelpOrgID`, +`HelpOrgName`, +`OrgBelongto`, +`Creater`, +`CreateTime`, +`Reviser`, +`UpdateTime` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `povertystatus`; +CREATE TABLE `povertystatus` ( +`PovertyStatusID` int(10), +`HelperFamilyID` int(10), +`PovertyStatusNote` varchar(255) +); +CREATE PROJECTION ON `povertystatus` ( +`PovertyStatusID`, +`HelperFamilyID`, +`PovertyStatusNote` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `projectstatus`; +CREATE TABLE `projectstatus` ( +`ProjectStatusID` int(10), +`FamilyProjectID` int(10), +`ProjectStatusNote` varchar(255), +`PicturePath` varchar(255), +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), +`UpdateTime` datetime +); +CREATE PROJECTION ON `projectstatus` ( +`ProjectStatusID`, +`FamilyProjectID`, +`ProjectStatusNote`, +`PicturePath`, +`Creater`, +`CreateTime`, +`Reviser`, +`UpdateTime` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `projectstatuspicture`; +CREATE TABLE `projectstatuspicture` ( +`ProjectStatusPictureID` int(10), +`VillageProjectID` int(10), +`Path` varchar(255) +); +CREATE PROJECTION ON `projectstatuspicture` ( +`ProjectStatusPictureID`, +`VillageProjectID`, +`Path` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `town`; +CREATE TABLE `town` ( +`TownID` int(10), +`Name` varchar(255), +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), +`UpdateTime` datetime +); +CREATE PROJECTION ON `town` ( +`TownID`, +`Name`, +`Creater`, +`CreateTime`, +`Reviser`, +`UpdateTime` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `village`; +CREATE TABLE `village` ( +`VillageID` int(10), +`Name` varchar(255), +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), +`UpdateTime` datetime, +`TownID` int(10) +); +CREATE PROJECTION ON `village` ( +`VillageID`, +`Name`, +`Creater`, +`CreateTime`, +`Reviser`, +`UpdateTime`, +`TownID` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `villageplan`; +CREATE TABLE `villageplan` ( +`VililagePlanID` int(10), +`VillageID` int(10), +`ProjectCategory` varchar(255), +`Year` int(10), +`ProjectName` varchar(255), +`ProjectContent` varchar(255), +`Unit` varchar(255), +`ProjectVolume` decimal(10,2), +`TotalAmount` decimal(10,2), +`FinanceAmount` decimal(10,2), +`DepartmentAmount` decimal(10,2), +`SocietyAmount` decimal(10,2), +`CreditAmount` decimal(10,2), +`HelpOrgAmount` decimal(10,2), +`SelfCollectedAmount` decimal(10,2), +`OtherAmount` decimal(10,2), +`CoveredFamily` int(10), +`CoveredPerson` int(10) , +`ProjectStatus` varchar(255) , +`ProjectSituation` varchar(255) , +`Creater` varchar(255) , +`CreateTime` datetime, +`Reviser` varchar(255), +`UpdateTime` datetime +); +CREATE PROJECTION ON `villageplan` ( +`VililagePlanID`, +`VillageID`, +`ProjectCategory`, +`Year`, +`ProjectName`, +`ProjectContent`, +`Unit`, +`ProjectVolume`, +`TotalAmount`, +`FinanceAmount`, +`DepartmentAmount`, +`SocietyAmount`, +`CreditAmount`, +`HelpOrgAmount`, +`SelfCollectedAmount`, +`OtherAmount`, +`CoveredFamily`, +`CoveredPerson`, +`ProjectStatus`, +`ProjectSituation`, +`Creater`, +`CreateTime`, +`Reviser`, +`UpdateTime` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `villageproject`; +CREATE TABLE `villageproject` ( +`VillageProjectID` int(10), +`VillageID` int(10), +`ProjectCategory` varchar(255), +`Year` int(10), +`ProjectName` varchar(255) , +`ProjectContent` varchar(255), +`Unit` varchar(255), +`ProjectVolume` decimal(10,2), +`TotalAmount` decimal(10,2), +`FinanceAmount` decimal(10,2), +`DepartmentAmount` decimal(10,2), +`SocietyAmount` decimal(10,2), +`CreditAmount` decimal(10,2), +`HelpOrgAmount` decimal(10,2), +`SelfCollectedAmount` decimal(10,2), +`OtherAmount` decimal(10,2), +`CoveredFamily` int(10), +`CoveredPerson` int(10), +`ProjectStatus` varchar(255), +`ProjectSituation` varchar(255) , +`ProjectOwner` varchar(255), +`Creater` varchar(255), +`CreateTime` datetime, +`Reviser` varchar(255), +`UpdateTime` datetime +); +CREATE PROJECTION ON `villageproject` ( +`VillageProjectID`, +`VillageID`, +`ProjectCategory`, +`Year`, +`ProjectName`, +`ProjectContent`, +`Unit`, +`ProjectVolume`, +`TotalAmount`, +`FinanceAmount`, +`DepartmentAmount`, +`SocietyAmount`, +`CreditAmount`, +`HelpOrgAmount`, +`SelfCollectedAmount`, +`OtherAmount`, +`CoveredFamily`, +`CoveredPerson`, +`ProjectStatus`, +`ProjectSituation`, +`ProjectOwner`, +`Creater`, +`CreateTime`, +`Reviser`, +`UpdateTime` +) number = 1 partitioned on row_id; + +DROP TABLE IF EXISTS `家庭成员信息`; +CREATE TABLE `家庭成员信息` ( +`市` varchar(255), +`县` varchar(255), +`乡` varchar(255), +`村` varchar(255), +`姓名` varchar(255), +`性别` varchar(255), +`证件号码` varchar(255), +`关系` varchar(255), +`民族` varchar(255), +`文化程度` varchar(255), +`在校生状况` varchar(255), +`健康状况` varchar(255), +`劳动能力` varchar(255), +`务工状况` varchar(255), +`务工时间` varchar(255), +`新农合` varchar(255), +`居民养老` varchar(255), +`添加原因` varchar(255), +`是否现役军人` varchar(255), +`是否参加城镇职工基本养老保险` varchar(255), +`在读学校名称` varchar(255) +); +CREATE PROJECTION ON `家庭成员信息` ( +`市`, +`县`, +`乡`, +`村`, +`姓名`, +`性别`, +`证件号码`, +`关系`, +`民族`, +`文化程度`, +`在校生状况`, +`健康状况`, +`劳动能力`, +`务工状况`, +`务工时间`, +`新农合`, +`居民养老`, +`添加原因`, +`是否现役军人`, +`是否参加城镇职工基本养老保险`, +`在读学校名称` +) number = 1 partitioned on row_id; +load table `家庭成员信息` from "/home/imdb/rawData/guizhou/家庭成员信息.tbl" with "|","\n"; + +DROP TABLE IF EXISTS `帮扶人信息`; +CREATE TABLE `帮扶人信息` ( +`市` varchar(255), +`县` varchar(255), +`乡` varchar(255), +`村` varchar(255), +`组` varchar(255), +`户主姓名` varchar(255), +`户主身份证` varchar(255), +`识别标准` varchar(255), +`贫困户属性` varchar(255), +`帮扶人姓名` varchar(255), +`帮扶单位名称` varchar(255), +`单位隶属关系` varchar(255), +`联系电话` varchar(255) , +`职务` varchar(255) +); +CREATE PROJECTION ON `帮扶人信息` ( +`市`, +`县`, +`乡`, +`村`, +`组`, +`户主姓名`, +`户主身份证`, +`识别标准`, +`贫困户属性`, +`帮扶人姓名`, +`帮扶单位名称`, +`单位隶属关系`, +`联系电话`, +`职务` +) number = 1 partitioned on row_id; +load table `帮扶人信息` from "/home/imdb/rawData/guizhou/贫困户帮扶信息.tbl" with "|","\n"; + +DROP TABLE IF EXISTS `移民搬迁信息采集`; +CREATE TABLE `移民搬迁信息采集` ( +`市` varchar(255), +`县` varchar(255), +`乡镇` varchar(255), +`村` varchar(255), +`组` varchar(255), +`识别标准` varchar(255), +`户主姓名` varchar(255), +`户主身份证号码` varchar(255), +`家庭人口数` varchar(255), +`搬迁类型` varchar(255), +`是否列入规划` varchar(255), +`搬迁意愿` varchar(255), +`搬迁方式` varchar(255), +`安置方式` varchar(255), +`安置地点` varchar(255), +`搬迁可能存在的困难` varchar(255) +); +CREATE PROJECTION ON `移民搬迁信息采集` ( +`市`, +`县`, +`乡镇`, +`村`, +`组`, +`识别标准`, +`户主姓名`, +`户主身份证号码`, +`家庭人口数`, +`搬迁类型`, +`是否列入规划`, +`搬迁意愿`, +`搬迁方式`, +`安置方式`, +`安置地点`, +`搬迁可能存在的困难` +) number = 1 partitioned on row_id; +load table `移民搬迁信息采集` from "/home/imdb/rawData/guizhou/移民搬迁信息采集.tbl" with "|","\n"; + +DROP TABLE IF EXISTS `贫困户信息`; +CREATE TABLE `贫困户信息` ( +`户主姓名` varchar(255), +`身份证` varchar(255), +`人数` varchar(255), +`年龄` varchar(255), +`联系方式` varchar(255), +`农户属性` varchar(255), +`识别标准` varchar(255), +`致贫原因` varchar(255), +`市` varchar(255), +`县` varchar(255), +`乡` varchar(255), +`村` varchar(255), +`组` varchar(255), +`人均收入(RMB)` varchar(255), +`开户银行` varchar(255), +`银行帐号` varchar(255), +`A17耕地面积(亩)` varchar(255), +`A17a有效灌溉面积(亩)` varchar(255), +`A17b田(亩)` varchar(255), +`A17c土(亩)` varchar(255), +`A18林地面积(亩)` varchar(255), +`A18a退耕还林面积(亩)` varchar(255), +`A18b林果面积(亩)` varchar(255), +`A19牧草地面积(亩)` varchar(255), +`A20水面面积(亩)` varchar(255), +`A21饮水是否困难` varchar(255), +`A22饮水是否安全` varchar(255), +`饮水情况` varchar(255), +`A23是否通生活用电` varchar(255), +`A24是否通广播电视` varchar(255), +`A25距离村主干路(公里)` varchar(255), +`A26入户路类型` varchar(255), +`拥有耐用消费品情况` varchar(255), +`A27住房面积(平方米)` varchar(255), +`A28主要住房是否危房` varchar(255), +`A29有无卫生厕所` varchar(255), +`A30主要燃料类型` varchar(255), +`建房时间(年份)` varchar(255), +`房屋主要结构` varchar(255), +`易地扶贫搬迁情况` varchar(255), +`A31是否加入农民合作社` varchar(255), +`A32未偿还借(贷)款(元)` varchar(255), +`A33家庭年人均纯收入(元)` varchar(255), +`N02家庭年收入(元)` varchar(255), +`A34全家工资性收入(元)` varchar(255), +`N03财产性收入(元)` varchar(255), +`A35全家生产经营性收入(元)` varchar(255), +`N01家庭年纯收入(元)` varchar(255), +`A36各类补贴(元)` varchar(255), +`N04家庭生产性支出(元)` varchar(255), +`A36a领取计划生育金(含"少生快富"和"奖励扶助")(元)` varchar(255), +`A36b领取低保金(元)` varchar(255), +`A36c领取养老保险金(元)` varchar(255), +`A36d新农合报销医疗费(元)` varchar(255), +`A36e医疗救助金(元)` varchar(255), +`A36f生态补偿金(含退耕还林,草原生态奖补等)(元)` varchar(255), +`审核状态` varchar(255), +`录入人` varchar(255), +`录入时间` varchar(255), +`修改人` varchar(255), +`修改时间` varchar(255), +`商品用经济作物面积(亩)` varchar(255), +`生产用房面积(平方米)` varchar(255), +`帮扶分类` varchar(255), +`是否有家庭贫困辍学学生` varchar(255), +`经度` varchar(255), +`纬度` varchar(255), +`高度` varchar(255), +`离最近的乡镇集市距离(公里)` varchar(255), +`是否军烈属` varchar(255) +); +CREATE PROJECTION ON `贫困户信息` ( +`户主姓名`, +`身份证`, +`人数`, +`年龄`, +`联系方式`, +`农户属性`, +`识别标准`, +`致贫原因`, +`市`, +`县`, +`乡`, +`村`, +`组`, +`人均收入(RMB)`, +`开户银行`, +`银行帐号`, +`A17耕地面积(亩)`, +`A17a有效灌溉面积(亩)`, +`A17b田(亩)`, +`A17c土(亩)`, +`A18林地面积(亩)`, +`A18a退耕还林面积(亩)`, +`A18b林果面积(亩)`, +`A19牧草地面积(亩)`, +`A20水面面积(亩)`, +`A21饮水是否困难`, +`A22饮水是否安全`, +`饮水情况`, +`A23是否通生活用电`, +`A24是否通广播电视`, +`A25距离村主干路(公里)`, +`A26入户路类型`, +`拥有耐用消费品情况`, +`A27住房面积(平方米)`, +`A28主要住房是否危房`, +`A29有无卫生厕所`, +`A30主要燃料类型`, +`建房时间(年份)`, +`房屋主要结构`, +`易地扶贫搬迁情况`, +`A31是否加入农民合作社`, +`A32未偿还借(贷)款(元)`, +`A33家庭年人均纯收入(元)`, +`N02家庭年收入(元)`, +`A34全家工资性收入(元)`, +`N03财产性收入(元)`, +`A35全家生产经营性收入(元)`, +`N01家庭年纯收入(元)`, +`A36各类补贴(元)`, +`N04家庭生产性支出(元)`, +`A36a领取计划生育金(含"少生快富"和"奖励扶助")(元)`, +`A36b领取低保金(元)`, +`A36c领取养老保险金(元)`, +`A36d新农合报销医疗费(元)`, +`A36e医疗救助金(元)`, +`A36f生态补偿金(含退耕还林,草原生态奖补等)(元)`, +`审核状态`, +`录入人`, +`录入时间`, +`修改人`, +`修改时间`, +`商品用经济作物面积(亩)`, +`生产用房面积(平方米)`, +`帮扶分类`, +`是否有家庭贫困辍学学生`, +`经度`, +`纬度`, +`高度`, +`离最近的乡镇集市距离(公里)`, +`是否军烈属` +) number = 1 partitioned on row_id; +load table `贫困户信息` from "/home/imdb/rawData/guizhou/贫困户信息.tbl" with "|","\n"; + +DROP TABLE IF EXISTS `贫困户帮扶计划信息`; +CREATE TABLE `贫困户帮扶计划信息` ( +`市` varchar(255), +`县` varchar(255), +`乡镇` varchar(255), +`村` varchar(255), +`项目类别` varchar(255), +`年度` varchar(255), +`项目名称` varchar(255), +`项目内容` varchar(255), +`计量单位` varchar(255), +`数量` varchar(255), +`计划资金总额(万元)` varchar(255), +`财政资金(万元)` varchar(255), +`行业部门资金(万元)` varchar(255), +`社会帮扶资金(万元)` varchar(255), +`信贷资金(万元)` varchar(255), +`帮扶单位资金(万元)` varchar(255), +`群众自筹资金(万元)` varchar(255), +`其他资金(万元)` varchar(255), +`覆盖户数` varchar(255), +`覆盖人数` varchar(255), +`创建人` varchar(255), +`操作时间` varchar(255) +); +CREATE PROJECTION ON `贫困户帮扶计划信息` ( +`市`, +`县`, +`乡镇`, +`村`, +`项目类别`, +`年度`, +`项目名称`, +`项目内容`, +`计量单位`, +`数量`, +`计划资金总额(万元)`, +`财政资金(万元)`, +`行业部门资金(万元)`, +`社会帮扶资金(万元)`, +`信贷资金(万元)`, +`帮扶单位资金(万元)`, +`群众自筹资金(万元)`, +`其他资金(万元)`, +`覆盖户数`, +`覆盖人数`, +`创建人`, +`操作时间` +) number = 1 partitioned on row_id; +load table `贫困户帮扶计划信息` from "/home/imdb/rawData/guizhou/贫困户帮扶计划信息.tbl" with "|","\n"; + +DROP TABLE IF EXISTS `贫困户项目实施信息`; +CREATE TABLE `贫困户项目实施信息` ( +`市` varchar(255), +`县` varchar(255), +`乡镇` varchar(255), +`村` varchar(255), +`项目类别` varchar(255), +`年度` varchar(255), +`项目名称` varchar(255), +`项目内容` varchar(255), +`计量单位` varchar(255), +`数量` varchar(255), +`资金总额(万元)` varchar(255), +`财政资金(万元)` varchar(255), +`行业部门资金(万元)` varchar(255), +`社会帮扶资金(万元)` varchar(255), +`信贷资金(万元)` varchar(255), +`帮扶单位资金(万元)` varchar(255), +`群众自筹资金(万元)` varchar(255), +`其他资金(万元)` varchar(255), +`覆盖户数` varchar(255), +`覆盖人数` varchar(255), +`创建人` varchar(255), +`操作时间` varchar(255) +); +CREATE PROJECTION ON `贫困户项目实施信息` ( +`市`, +`县`, +`乡镇`, +`村`, +`项目类别`, +`年度`, +`项目名称`, +`项目内容`, +`计量单位`, +`数量`, +`资金总额(万元)`, +`财政资金(万元)`, +`行业部门资金(万元)`, +`社会帮扶资金(万元)`, +`信贷资金(万元)`, +`帮扶单位资金(万元)`, +`群众自筹资金(万元)`, +`其他资金(万元)`, +`覆盖户数`, +`覆盖人数`, +`创建人`, +`操作时间` +) number = 1 partitioned on row_id; +load table `贫困户项目实施信息` from "/home/imdb/rawData/guizhou/贫困户项目实施信息.tbl" with "|","\n"; + +DROP TABLE IF EXISTS `贫困户项目实施详细信息`; +CREATE TABLE `贫困户项目实施详细信息` ( +`市` varchar(255), +`县` varchar(255), +`乡镇` varchar(255), +`村` varchar(255), +`项目类别` varchar(255), +`年度` varchar(255), +`项目名称` varchar(255), +`项目内容` varchar(255), +`计量单位` varchar(255), +`数量` varchar(255), +`姓名` varchar(255), +`身份证` varchar(255), +`组` varchar(255), +`农户属性` varchar(255) +); +CREATE PROJECTION ON `贫困户项目实施详细信息` ( +`市`, +`县`, +`乡镇`, +`村`, +`项目类别`, +`年度`, +`项目名称`, +`项目内容`, +`计量单位`, +`数量`, +`姓名`, +`身份证`, +`组`, +`农户属性` +) number = 1 partitioned on row_id; +load table `贫困户项目实施详细信息` from "/home/imdb/rawData/guizhou/贫困户项目实施详细信息.tbl" with "|","\n"; + +exit; diff --git a/sbin/claims-test/testcase/huishuiQ1.test b/sbin/claims-test/testcase/huishuiQ1.test new file mode 100644 index 000000000..25c048b79 --- /dev/null +++ b/sbin/claims-test/testcase/huishuiQ1.test @@ -0,0 +1,170 @@ + +SELECT * FROM town; + +SELECT * FROM town WHERE TownID = 1 LIMIT 1; + +select village.*, + town.Name +from village, town +where village.TownID = town.TownID + and village.TownID = 1; + +select village.*, + town.Name +from village, town +where village.TownID = town.TownID + and village.VillageID = 1 limit 1; + +select familygroup.GroupID, + familygroup.Name, + village.VillageID, + village.Name, + town.TownID, + town.Name, + familygroup.Creater, + familygroup.CreateTime, + familygroup.Reviser, + familygroup.UpdateTime, + grouppicture.PictureID, + grouppicture.Name, + grouppicture.Path +from familygroup, grouppicture, village, town +where familygroup.GroupID = grouppicture.GroupID + and familygroup.VillageID = village.VillageID + and village.TownID = town.TownID + and familygroup.VillageID = 1; + +select familygroup.GroupID, + familygroup.Name, + village.VillageID, + village.Name, + town.TownID, + town.Name, + familygroup.Creater, + familygroup.CreateTime, + familygroup.Reviser, + familygroup.UpdateTime, + grouppicture.PictureID, + grouppicture.Name, + grouppicture.Path +FROM familygroup, grouppicture, village, town +where familygroup.GroupID = grouppicture.GroupID + and familygroup.VillageID = village.VillageID + and village.TownID = town.TownID + and grouppicture.GroupID = 1 limit 1; + +SELECT family.FamilyID, + familygroup.GroupID, + familygroup.Name , + village.VillageID, + village.Name , + town.TownID, + town.Name , + family.IsPoorFamily, + family.MasterName, + family.RID, + family.HeadCount, + familyimmigration.PlanMoveTime, + familyimmigration.ActualMoveTime, + familyimmigration.PlanDismantleTime, + familyimmigration.ActualDismantleTime +FROM family, familygroup, village, town, familyimmigration +where family.GroupID = familygroup.GroupID + and familygroup.VillageID = village.VillageID + and village.TownID = town.TownID + and family.FamilyID = familyimmigration.FamilyID + and family.IsImmigrant = 1 + and family.GroupID = 1; + +SELECT family.FamilyID, + familygroup.GroupID, + familygroup.Name , + village.VillageID, + village.Name , + town.TownID, + town.Name, + family.IsPoorFamily, + family.MasterName, + family.RID, + family.HeadCount, + familyimmigration.PlanMoveTime, + familyimmigration.ActualMoveTime, + familyimmigration.PlanDismantleTime, + familyimmigration.ActualDismantleTime +FROM family ,familygroup, village, town, familyimmigration +where family.GroupID = familygroup.GroupID + and familygroup.VillageID = village.VillageID + and village.TownID = town.TownID + and family.FamilyID = familyimmigration.FamilyID + and family.IsImmigrant = 1 + and familyimmigration.ActualMoveTime = 1; + +SELECT family.*, + familygroup.GroupID, + familygroup.Name , + village.VillageID, + village.Name , + town.TownID, + town.Name +FROM family, familygroup, village, town +where family.GroupID = familygroup.GroupID + and familygroup.VillageID = village.VillageID + and village.TownID = town.TownID + and family.FamilyID = 1 LIMIT 1; + +SELECT family.*, + familygroup.GroupID, + familygroup.Name , + village.VillageID, + village.Name , + town.TownID, + town.Name +FROM family, familygroup, village, town +where family.GroupID = familygroup.GroupID + and familygroup.VillageID = village.VillageID + and village.TownID = town.TownID + and family.RID = 1 LIMIT 1; + +SELECT family.*, + familygroup.GroupID, + familygroup.Name , + village.VillageID, + village.Name , + town.TownID, + town.Name +FROM family, familygroup, village, town +where family.GroupID = familygroup.GroupID +and familygroup.VillageID = village.VillageID +and village.TownID = town.TownID +and family.IsImmigrant=1 +and family.RID = 1 LIMIT 1; + +SELECT family.*, + familygroup.GroupID, + familygroup.Name , + village.VillageID, + village.Name , + town.TownID, + town.Name +FROM family, familygroup, village, town +where family.GroupID = familygroup.GroupID +and familygroup.VillageID = village.VillageID +and village.TownID = town.TownID +and IsImmigrant = 1 +and family.GroupID = 1; + +SELECT family.*, + familygroup.GroupID, + familygroup.Name , + village.VillageID, + village.Name , + town.TownID, + town.Name +FROM family, familygroup, village, town +where family.GroupID = familygroup.GroupID + and familygroup.VillageID = village.VillageID + and village.TownID = town.TownID + and IsImmigrant = 1 + and family.GroupID = 1; + +exit; diff --git a/sbin/claims-test/testcase/huishuiQ2.test b/sbin/claims-test/testcase/huishuiQ2.test new file mode 100644 index 000000000..0d305d1aa --- /dev/null +++ b/sbin/claims-test/testcase/huishuiQ2.test @@ -0,0 +1,110 @@ + +SELECT * FROM `town`; + +SELECT * FROM `town` WHERE `TownID` = 1 LIMIT 1; + +SELECT village.*, + t.`Name` TownName +FROM village +LEFT JOIN `town` t ON village.`TownID` = t.`TownID` +WHERE village.TownID = 1; + +SELECT village.*, + t.Name TownName +FROM village +LEFT JOIN `town` t ON village.TownID = t.TownID +WHERE VillageID = 1 LIMIT 1; + +SELECT g.`GroupID`, g.`Name`, + v.`VillageID`, v.`Name` VillageName, + t.`TownID`, t.`Name` TownName, + g.`Creater`, g.`CreateTime`, + g.`Reviser`, g.`UpdateTime`, + p.`PictureID`, p.`Name` PictureName, p.`Path` PicturePath +FROM `familygroup` g +LEFT JOIN `grouppicture` p +ON g.`GroupID` = p.`GroupID` +LEFT JOIN `village` v ON g.`VillageID` = v.`VillageID` +LEFT JOIN `town` t ON v.`TownID` = t.`TownID` +WHERE g.`VillageID` = 1; + +SELECT g.`GroupID`, g.`Name`, + v.`VillageID`, v.`Name` VillageName, + t.`TownID`, t.`Name` TownName, + g.`Creater`, g.`CreateTime`, + g.`Reviser`, g.`UpdateTime`, + p.`PictureID`, p.`Name` PictureName, p.`Path` PicturePath +FROM `familygroup` g +LEFT JOIN `grouppicture` p ON g.`GroupID` = p.`GroupID` +LEFT JOIN `village` v ON g.`VillageID` = v.`VillageID` +LEFT JOIN `town` t ON v.`TownID` = t.`TownID` +WHERE g.`GroupID` = 1 +LIMIT 1; + +SELECT family.*,g.`GroupID`, g.`Name` GroupName, + v.`VillageID`, v.`Name` VillageName, t.`TownID`, + t.`Name` TownName +FROM `family` +LEFT JOIN `familygroup` g ON family.`GroupID` = g.`GroupID` +LEFT JOIN `village` v ON g.`VillageID` = v.`VillageID` +LEFT JOIN `town` t ON v.`TownID` = t.`TownID` +WHERE `FamilyID` = 1 LIMIT 1; + +SELECT family.*,g.`GroupID`, g.`Name` GroupName, + v.`VillageID`, v.`Name` VillageName, t.`TownID`, + t.`Name` TownName +FROM `family` +LEFT JOIN `familygroup` g ON family.`GroupID` = g.`GroupID` +LEFT JOIN `village` v ON g.`VillageID` = v.`VillageID` +LEFT JOIN `town` t ON v.`TownID` = t.`TownID` +WHERE family.`RID` =1 +LIMIT 1; + +SELECT family.*,g.`GroupID`, g.`Name` GroupName, + v.`VillageID`, v.`Name` VillageName, t.`TownID`, + t.`Name` TownName +FROM `family` +LEFT JOIN `familygroup` g ON family.`GroupID` = g.`GroupID` +LEFT JOIN `village` v ON g.`VillageID` = v.`VillageID` +LEFT JOIN `town` t ON v.`TownID` = t.`TownID` +WHERE family.`IsImmigrant`=1 AND family.`RID` = 1 +LIMIT 1; + +SELECT family.*,g.`GroupID`, g.`Name` GroupName, + v.`VillageID`, v.`Name` VillageName, t.`TownID`, + t.`Name` TownName +FROM `family` +LEFT JOIN `familygroup` g ON family.`GroupID` = g.`GroupID` +LEFT JOIN `village` v ON g.`VillageID` = v.`VillageID` +LEFT JOIN `town` t ON v.`TownID` = t.`TownID` +WHERE `IsImmigrant` = 1 AND family.`GroupID` = 1; + +SELECT h.`HelperID`, h.`HelperName`, h.`HelperPhone`, h.`HelperTitle`, + ho.`HelpOrgID`, ho.`HelpOrgName`, + f.`FamilyID`, f.`RID`, f.`MasterName`, + h.`Creater`, h.`CreateTime`, h.`Reviser`, h.`UpdateTime` +FROM `family` f +LEFT JOIN `helper_family` hf ON f.`FamilyID` = hf.`FamilyID` +LEFT JOIN `helper` h ON hf.`HelperID` = h.`HelperID` +LEFT JOIN `helporg` ho ON h.`HelpOrgID` = ho.`HelpOrgID` +WHERE f.`FamilyID`= 1; + +SELECT * FROM `familyimmigration` WHERE `FamilyID` = 1 LIMIT 1; + +DELETE FROM familyimmigration WHERE FamilyID =1; + +SELECT * FROM `familyincome` WHERE `FamilyID` = 1 LIMIT 1; + +DELETE FROM familyincome WHERE FamilyID =1; + +SELECT * FROM `familyliving` WHERE `FamilyID` = 1 LIMIT 1; + +delete from familyliving where FamilyID = 1; + +SELECT * FROM `familyperson` WHERE `FamilyID` = 1; + +DELETE FROM familyperson WHERE RID = 1; + +delete from familypicture where PictureID = 1; + +exit; \ No newline at end of file diff --git a/sbin/claims-test/testcase/outer_join.test b/sbin/claims-test/testcase/outer_join.test new file mode 100644 index 000000000..b58e03de9 --- /dev/null +++ b/sbin/claims-test/testcase/outer_join.test @@ -0,0 +1,26 @@ +select * from A right join B on A.id = B.id; +select * from B left join A on A.id = B.id; +select * from A left join B on A.id = B.id; +select * from B right join A on A.id = B.id; + +select * from A left join B on A.num = B.name; +select * from B right join A on A.num = B.name; +select * from A right join B on A.num = B.name; +select * from B left join A on A.num = B.name; + +select * from A right join B on A.id = B.id AND A.num = B.name; +select * from B left join A on A.id = B.id AND A.num = B.name; +select * from A right join B on A.id = B.id AND A.strdate = B.strdate; +select * from B left join A on A.id = B.id AND A.strdate = B.strdate; + +select * from A right join B on A.id = B.id AND A.num = 111; +select * from A left join B on A.id = B.id AND A.num = 111; +select * from B right join A on A.id = B.id AND B.name = 212; +select * from B left join A on A.id = B.id AND B.name = 212; +select * from A right join B on A.id = B.id where B.name = 212; +select * from A left join B on A.id = B.id where B.name = 212; +select * from A right join B on A.id = B.id where A.num = 112; +select * from A left join B on A.id = B.id where A.num = 112; + + +exit; diff --git a/sbin/claims-test/testcase/outer_join_ddl.test b/sbin/claims-test/testcase/outer_join_ddl.test new file mode 100644 index 000000000..cd22cbfe3 --- /dev/null +++ b/sbin/claims-test/testcase/outer_join_ddl.test @@ -0,0 +1,128 @@ + +drop table A; +drop table B; + +create table A(id int unsigned, num int, strdate varchar(10) ); +create projection on A(id,num,strdate) number = 1 partitioned on id; +create table B(id int unsigned, name int, strdate varchar(10) ); +create projection on B(id,name,strdate) number = 1 partitioned on id; + +show tables; +desc A; +desc B; + +insert into A values(1, 111 ,"20160401"); +insert into A values(2, 112 ,"20160401"); +insert into A values(3, 113 ,"20160401"); +insert into A values(4, 114 ,"20160331"); +insert into A values(5, 115 ,"20160331"); +insert into A values(6, 116 ,"20160331"); +insert into A values(21, 111 ,"20160401"); +insert into A values(22, 112 ,"20160401"); +insert into A values(23, 113 ,"20160401"); +insert into A values(24, 114 ,"20160331"); +insert into A values(25, 115 ,"20160331"); +insert into A values(26, 116 ,"20160331"); +insert into A values(31, 111 ,"20160401"); +insert into A values(32, 112 ,"20160401"); +insert into A values(33, 113 ,"20160401"); +insert into A values(34, 114 ,"20160331"); +insert into A values(35, 115 ,"20160331"); +insert into A values(36, 116 ,"20160331"); +insert into A values(41, 111 ,"20160401"); +insert into A values(42, 112 ,"20160401"); +insert into A values(43, 113 ,"20160401"); +insert into A values(44, 114 ,"20160331"); +insert into A values(45, 115 ,"20160331"); +insert into A values(46, 116 ,"20160331"); +insert into A values(101, 111 ,"20160401"); +insert into A values(102, 112 ,"20160401"); +insert into A values(103, 113 ,"20160401"); +insert into A values(104, 114 ,"20160331"); +insert into A values(105, 115 ,"20160331"); +insert into A values(106, 116 ,"20160331"); +insert into A values(121, 111 ,"20160401"); +insert into A values(122, 112 ,"20160401"); +insert into A values(123, 113 ,"20160401"); +insert into A values(124, 114 ,"20160331"); +insert into A values(125, 115 ,"20160331"); +insert into A values(126, 116 ,"20160331"); +insert into A values(131, 111 ,"20160401"); +insert into A values(132, 112 ,"20160401"); +insert into A values(133, 113 ,"20160401"); +insert into A values(134, 114 ,"20160331"); +insert into A values(135, 115 ,"20160331"); +insert into A values(136, 116 ,"20160331"); +insert into A values(141, 111 ,"20160401"); +insert into A values(142, 112 ,"20160401"); +insert into A values(143, 113 ,"20160401"); +insert into A values(144, 114 ,"20160331"); +insert into A values(145, 115 ,"20160331"); +insert into A values(146, 116 ,"20160331"); + + +select * from A; + +insert into B values(1, 111 ,"20160401"); +insert into B values(2, 212 ,"20160401"); +insert into B values(3, 213 ,"20160401"); +insert into B values(4, 214 ,"20160331"); +insert into B values(8, 215 ,"20160331"); +insert into B values(9, 216 ,"20160331"); +insert into B values(10, 217 ,"20160331"); +insert into B values(11, 218 ,"20160331"); +insert into B values(12, 212 ,"20160401"); +insert into B values(13, 213 ,"20160401"); +insert into B values(14, 214 ,"20160331"); +insert into B values(18, 215 ,"20160331"); +insert into B values(19, 216 ,"20160331"); +insert into B values(20, 217 ,"20160331"); +insert into B values(21, 218 ,"20160331"); +insert into B values(22, 212 ,"20160401"); +insert into B values(23, 213 ,"20160401"); +insert into B values(24, 214 ,"20160331"); +insert into B values(28, 215 ,"20160331"); +insert into B values(29, 216 ,"20160331"); +insert into B values(30, 217 ,"20160331"); +insert into B values(31, 218 ,"20160331"); +insert into B values(32, 212 ,"20160401"); +insert into B values(33, 213 ,"20160401"); +insert into B values(34, 214 ,"20160331"); +insert into B values(38, 215 ,"20160331"); +insert into B values(39, 216 ,"20160331"); +insert into B values(40, 217 ,"20160331"); +insert into B values(41, 218 ,"20160331"); +insert into B values(101, 111 ,"20160401"); +insert into B values(102, 212 ,"20160401"); +insert into B values(103, 213 ,"20160401"); +insert into B values(104, 214 ,"20160331"); +insert into B values(108, 215 ,"20160331"); +insert into B values(109, 216 ,"20160331"); +insert into B values(110, 217 ,"20160331"); +insert into B values(111, 218 ,"20160331"); +insert into B values(112, 212 ,"20160401"); +insert into B values(113, 213 ,"20160401"); +insert into B values(114, 214 ,"20160331"); +insert into B values(118, 215 ,"20160331"); +insert into B values(119, 216 ,"20160331"); +insert into B values(120, 217 ,"20160331"); +insert into B values(121, 218 ,"20160331"); +insert into B values(122, 212 ,"20160401"); +insert into B values(123, 213 ,"20160401"); +insert into B values(124, 214 ,"20160331"); +insert into B values(128, 215 ,"20160331"); +insert into B values(129, 216 ,"20160331"); +insert into B values(130, 217 ,"20160331"); +insert into B values(131, 218 ,"20160331"); +insert into B values(132, 212 ,"20160401"); +insert into B values(133, 213 ,"20160401"); +insert into B values(134, 214 ,"20160331"); +insert into B values(138, 215 ,"20160331"); +insert into B values(139, 216 ,"20160331"); +insert into B values(140, 217 ,"20160331"); +insert into B values(141, 218 ,"20160331"); + +select * from B; + + +exit; diff --git a/sbin/claims-test/testcase/tpc_sql/tpc_sql_1.test b/sbin/claims-test/testcase/tpc_sql/tpc_sql_1.test new file mode 100644 index 000000000..c673d459a --- /dev/null +++ b/sbin/claims-test/testcase/tpc_sql/tpc_sql_1.test @@ -0,0 +1,28 @@ +SELECT +L_RETURNFLAG, +L_LINESTATUS, +SUM(L_QUANTITY) AS SUM_QTY, +SUM(L_EXTENDEDPRICE) AS SUM_BASE_PRICE, +SUM(L_EXTENDEDPRICE * (1 - L_DISCOUNT)) AS SUM_DISC_PRICE, +SUM(L_EXTENDEDPRICE * (1 - L_DISCOUNT) * (1 + L_TAX)) AS SUM_CHARGE, +AVG(L_QUANTITY) AS AVG_QTY, +AVG(L_EXTENDEDPRICE) AS AVG_PRICE, +AVG(L_DISCOUNT) AS AVG_DISC, +COUNT(*) AS COUNT_ORDER +FROM +LINEITEM +GROUP BY +L_RETURNFLAG, +L_LINESTATUS +ORDER BY +L_RETURNFLAG, +L_LINESTATUS, +SUM_QTY, +SUM_BASE_PRICE, +SUM_DISC_PRICE, +SUM_CHARGE, +AVG_QTY, +AVG_PRICE, +AVG_DISC, +COUNT_ORDER; +exit; diff --git a/sbin/claims-test/testcase/tpc_sql/tpc_sql_10.test b/sbin/claims-test/testcase/tpc_sql/tpc_sql_10.test new file mode 100644 index 000000000..b74e262f4 --- /dev/null +++ b/sbin/claims-test/testcase/tpc_sql/tpc_sql_10.test @@ -0,0 +1,39 @@ +SELECT + C_CUSTKEY, + C_NAME, + SUM(L_EXTENDEDPRICE * (1 - L_DISCOUNT)) AS REVENUE, + C_ACCTBAL, + N_NAME, + C_ADDRESS, + C_PHONE, + C_COMMENT +FROM + LINEITEM, + ORDERS, + CUSTOMER, + NATION +WHERE + C_CUSTKEY = O_CUSTKEY + AND L_ORDERKEY = O_ORDERKEY + AND O_ORDERDATE >= '1994-09-01' + AND O_ORDERDATE < '1994-12-01' + AND L_RETURNFLAG = 'R' + AND C_NATIONKEY = N_NATIONKEY +GROUP BY + C_CUSTKEY, + C_NAME, + C_ACCTBAL, + C_PHONE, + N_NAME, + C_ADDRESS, + C_COMMENT +ORDER BY + C_CUSTKEY, + C_NAME, + REVENUE, + C_ACCTBAL, + N_NAME, + C_ADDRESS, + C_PHONE, + C_COMMENT; +exit; diff --git a/sbin/claims-test/testcase/tpc_sql/tpc_sql_12.test b/sbin/claims-test/testcase/tpc_sql/tpc_sql_12.test new file mode 100644 index 000000000..af5e28f27 --- /dev/null +++ b/sbin/claims-test/testcase/tpc_sql/tpc_sql_12.test @@ -0,0 +1,31 @@ +SELECT + L_SHIPMODE, + SUM(CASE + WHEN O_ORDERPRIORITY = '1-URGENT' + OR O_ORDERPRIORITY = '2-HIGH' + THEN 1 + ELSE 0 + END) AS HIGH_LINE_COUNT, + SUM(CASE + WHEN O_ORDERPRIORITY <> '1-URGENT' + AND O_ORDERPRIORITY <> '2-HIGH' + THEN 1 + ELSE 0 + END) AS LOW_LINE_COUNT +FROM + ORDERS, + LINEITEM +WHERE + O_ORDERKEY = L_ORDERKEY + AND L_SHIPMODE IN ('RAIL', 'MAIL') + AND L_COMMITDATE < L_RECEIPTDATE + AND L_SHIPDATE < L_COMMITDATE + AND L_RECEIPTDATE >= '1997-01-01' + AND L_RECEIPTDATE <'1998-01-01' +GROUP BY + L_SHIPMODE +ORDER BY + L_SHIPMODE, + HIGH_LINE_COUNT, + LOW_LINE_COUNT; +exit; diff --git a/sbin/claims-test/testcase/tpc_sql/tpc_sql_13.test b/sbin/claims-test/testcase/tpc_sql/tpc_sql_13.test new file mode 100644 index 000000000..4439139be --- /dev/null +++ b/sbin/claims-test/testcase/tpc_sql/tpc_sql_13.test @@ -0,0 +1,21 @@ +SELECT + C_COUNT, + COUNT(*) AS CUSTDIST +FROM + ( + SELECT + C_CUSTKEY, + COUNT(O_ORDERKEY) AS C_COUNT + FROM + CUSTOMER LEFT OUTER JOIN ORDERS ON + C_CUSTKEY = O_CUSTKEY + AND O_COMMENT NOT LIKE '%UNUSUAL%DEPOSITS%' + GROUP BY + C_CUSTKEY + ) AS C_ORDERS +GROUP BY + C_COUNT +ORDER BY + CUSTDIST DESC, + C_COUNT DESC; +exit; diff --git a/sbin/claims-test/testcase/tpc_sql/tpc_sql_17.test b/sbin/claims-test/testcase/tpc_sql/tpc_sql_17.test new file mode 100644 index 000000000..5f1a6d047 --- /dev/null +++ b/sbin/claims-test/testcase/tpc_sql/tpc_sql_17.test @@ -0,0 +1,15 @@ +SELECT + SUM(L_EXTENDEDPRICE) / 7.0 AS AVG_YEARLY +FROM + LINEITEM, + PART, + (SELECT L_PARTKEY AS AGG_PARTKEY, 0.2 * AVG(L_QUANTITY) AS AVG_QUANTITY FROM LINEITEM GROUP BY L_PARTKEY) PART_AGG +WHERE + P_PARTKEY = L_PARTKEY + AND AGG_PARTKEY = L_PARTKEY + AND P_BRAND = 'BRAND#52' + AND P_CONTAINER = 'JUMBO JAR' + AND L_QUANTITY < AVG_QUANTITY +ORDER BY + AVG_YEARLY; +exit; diff --git a/sbin/claims-test/testcase/tpc_sql/tpc_sql_3.test b/sbin/claims-test/testcase/tpc_sql/tpc_sql_3.test new file mode 100644 index 000000000..fb0a8f377 --- /dev/null +++ b/sbin/claims-test/testcase/tpc_sql/tpc_sql_3.test @@ -0,0 +1,25 @@ +SELECT + L_ORDERKEY, + SUM(L_EXTENDEDPRICE * (1 - L_DISCOUNT)) AS REVENUE, + O_ORDERDATE, + O_SHIPPRIORITY +FROM + LINEITEM, + ORDERS, + CUSTOMER +WHERE + C_MKTSEGMENT = 'HOUSEHOLD' + AND C_CUSTKEY = O_CUSTKEY + AND L_ORDERKEY = O_ORDERKEY + AND O_ORDERDATE < '1995-03-04' + AND L_SHIPDATE > '1995-03-04' +GROUP BY + L_ORDERKEY, + O_ORDERDATE, + O_SHIPPRIORITY +ORDER BY + L_ORDERKEY, + REVENUE DESC, + O_ORDERDATE, + O_SHIPPRIORITY; +exit; diff --git a/sbin/claims-test/testcase/tpc_sql/tpc_sql_5.test b/sbin/claims-test/testcase/tpc_sql/tpc_sql_5.test new file mode 100644 index 000000000..dfe2f85e7 --- /dev/null +++ b/sbin/claims-test/testcase/tpc_sql/tpc_sql_5.test @@ -0,0 +1,26 @@ +SELECT + N_NAME, + SUM(L_EXTENDEDPRICE * (1 - L_DISCOUNT)) AS REVENUE +FROM + CUSTOMER, + ORDERS, + LINEITEM, + SUPPLIER, + NATION, + REGION +WHERE + C_CUSTKEY = O_CUSTKEY + AND L_ORDERKEY = O_ORDERKEY + AND L_SUPPKEY = S_SUPPKEY + AND C_NATIONKEY = S_NATIONKEY + AND S_NATIONKEY = N_NATIONKEY + AND N_REGIONKEY = R_REGIONKEY + AND R_NAME = 'AMERICA' + AND O_ORDERDATE >= '1993-01-01' + AND O_ORDERDATE <='1994-12-31' +GROUP BY + N_NAME +ORDER BY + N_NAME, + REVENUE DESC; +exit; diff --git a/sbin/claims-test/testcase/tpc_sql/tpc_sql_6.test b/sbin/claims-test/testcase/tpc_sql/tpc_sql_6.test new file mode 100644 index 000000000..81540d0a8 --- /dev/null +++ b/sbin/claims-test/testcase/tpc_sql/tpc_sql_6.test @@ -0,0 +1,12 @@ +SELECT +SUM(L_EXTENDEDPRICE * L_DISCOUNT) AS REVENUE +FROM +LINEITEM +WHERE +L_SHIPDATE >= '1993-01-01' +AND L_SHIPDATE < '1994-12-31' +AND L_DISCOUNT BETWEEN 0.04 - 0.01 AND 0.04 + 0.01 +AND L_QUANTITY < 25 +ORDER BY +REVENUE; +exit; diff --git a/sbin/claims-test/testcase/update.test b/sbin/claims-test/testcase/update.test new file mode 100644 index 000000000..04775ee95 --- /dev/null +++ b/sbin/claims-test/testcase/update.test @@ -0,0 +1,35 @@ +drop table if exists t1; +create table t1(a int, b int, c int); +create projection on t1(a, b, c) number = 1 partitioned on row_id; +show tables; +desc t1; +insert into t1 values(1, 1, 1); +insert into t1 values(1, 2, 1); +insert into t1 values(1, 3, 1); +insert into t1 values(1, 4, 1); +insert into t1 values(1, 5, 1); +insert into t1 values(1, 6, 1); +insert into t1 values(1, 7, 1); +insert into t1 values(1, 8, 1); +insert into t1 values(1, 9, 1); + +select * from t1; + +update t1 set a = 2, c = 2 where b = 2; +select * from t1; +update t1 set a = 3, c = 3 where b = 3; +select * from t1; +update t1 set a = 4, c = 4 where b = 4; +select * from t1; +update t1 set a = 5, c = 5 where b = 5; +select * from t1; +update t1 set a = 6, c = 6 where b = 6; +select * from t1; +update t1 set a = 7, c = 7 where b = 7; +select * from t1; +update t1 set a = 8, c = 8 where b = 8; +select * from t1; +update t1 set a = 9, c = 9 where b = 9; +select * from t1; + +exit; diff --git a/sbin/claims-test/testresult/ddl_tpch_sf10_1partition-1.result b/sbin/claims-test/testresult/ddl_tpch_sf10_1partition-1.result deleted file mode 100644 index 91fbc8451..000000000 --- a/sbin/claims-test/testresult/ddl_tpch_sf10_1partition-1.result +++ /dev/null @@ -1,659 +0,0 @@ - ______ __ ___ __ .___ ___. _______. - / || | / \ | | | \/ | / | - | ,----'| | / ^ \ | | | \ / | | (----` - | | | | / /_\ \ | | | |\/| | \ \ - | `----.| `----. / _____ \ | | | | | | .----) | - \______||_______|/__/ \__\ |__| |__| |__| |_______/ - - -----------CLuster-Aware In-Memory Sql query engine---------- - - -[?1034hCLAIMS>create table PART( - > P_PARTKEY bigint unsigned, - > P_NAME varchar(55), - > P_MFGR varchar(25), - > P_BRAND varchar(10), - > P_TYPE varchar(25), - > P_SIZE int, - > P_CONTAINER varchar(10), - > P_RETAILPRICE decimal(20, 4), - > P_COMMENT varchar(23) - > ); -create table successfully -CLAIMS>create projection on PART( - > P_PARTKEY, - > P_NAME, - > P_MFGR, - > P_BRAND, - > P_TYPE, - > P_SIZE, - > P_CONTAINER, - > P_RETAILPRICE, - > P_COMMENT - > ) number = 1 partitioned on P_PARTKEY; -create projection successfully -CLAIMS>load table PART from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl " with '|','\n'; -load data successfully (34.6878 sec) - -WARNINGS: -Line: 10 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 19 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 11 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 34 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 43 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 58 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 67 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 35 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 82 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 91 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 59 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 106 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 115 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 130 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 139 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 83 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 107 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 131 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 154 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 155 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 163 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 178 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 202 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 187 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 179 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 226 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 211 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 203 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 250 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 235 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 227 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 274 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 259 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 251 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 283 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 298 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 275 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 307 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 322 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 299 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 346 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 323 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 331 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 370 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 355 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 347 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 394 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 379 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 371 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 403 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 395 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 418 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 442 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 427 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 419 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 466 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 451 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 490 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 443 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 475 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 514 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 538 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 499 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table SUPPLIER( - > S_SUPPKEY bigint unsigned, - > S_NAME varchar(25), - > S_ADDRESS varchar(40), - > S_NATIONKEY bigint unsigned, - > S_PHONE varchar(15), - > S_ACCTBAL decimal(20,4), - > S_COMMENT varchar(101) - > ); -create table successfully -CLAIMS>create projection on SUPPLIER( - > S_SUPPKEY, - > S_NAME, - > S_ADDRESS, - > S_NATIONKEY, - > S_PHONE, - > S_ACCTBAL, - > S_COMMENT - > ) number = 1 partitioned on S_SUPPKEY; -create projection successfully -CLAIMS>load table SUPPLIER from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/supp lier.tbl" with '|','\n'; -load data successfully (2.03393 sec) - -WARNINGS: -Line: 14 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 38 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 25 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 62 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 49 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 86 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 73 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 110 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 97 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 121 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 134 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 145 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 158 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 169 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 182 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 193 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 206 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 217 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 230 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 241 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 254 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 265 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 278 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 289 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 302 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 313 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 326 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 350 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 337 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 29 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 53 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 77 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 101 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 125 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 149 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 173 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 197 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 221 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 245 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 269 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 293 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 317 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 341 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 27 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 51 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 75 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 99 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 123 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 147 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 171 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 195 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 219 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 243 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 267 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 291 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 315 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 339 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 28 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 52 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table PARTSUPP( - > PS_PARTKEY bigint unsigned, - > PS_SUPPKEY bigint unsigned, - > PS_AVAILQTY int, - > PS_SUPPLYCOST decimal(20,2), - > PS_COMMENT varchar(199) - > ); -create table successfully -CLAIMS> - > - > - > create projection on PARTSUPP( - > PS_PARTKEY, - > PS_SUPPKEY, - > PS_AVAILQTY, - > PS_SUPPLYCOST, - > PS_COMMENT - > ) number = 1 partitioned on PS_PARTKEY; -create projection successfully -CLAIMS>load table PARTSUPP from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/part supp.tbl" with '|','\n'; -load data successfully (78.7187 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 16 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 40 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 64 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 88 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 112 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 15 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 39 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 63 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 87 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 111 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 135 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 136 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 159 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 160 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 183 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 207 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 184 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 208 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 231 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 232 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 255 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 256 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 279 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 280 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 303 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 304 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 327 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 328 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 351 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 352 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 375 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 376 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 399 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 408 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 400 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 423 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 432 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 424 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 447 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 456 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 448 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 471 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 472 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 480 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 495 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 496 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 504 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table CUSTOMER( - > C_CUSTKEY bigint unsigned, - > C_NAME varchar(25), - > C_ADDRESS varchar(40), - > C_NATIONKEY bigint unsigned, - > C_PHONE varchar(15), - > C_ACCTBAL decimal(20,4), - > C_MKTSEGMENT varchar(10), - > C_COMMENT varchar(117) - > ); -create table successfully -CLAIMS>create projection on CUSTOMER( - > C_CUSTKEY, - > C_NAME, - > C_ADDRESS, - > C_NATIONKEY, - > C_PHONE, - > C_ACCTBAL, - > C_MKTSEGMENT, - > C_COMMENT - > ) number = 1 partitioned on C_CUSTKEY; -create projection successfully -CLAIMS>load table CUSTOMER from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/cust omer.tbl" with '|','\n'; -load data successfully (36.0025 sec) - -WARNINGS: -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 28 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 52 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 76 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 100 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 26 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 50 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 74 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 98 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 122 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 124 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 146 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 148 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 170 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 172 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 194 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 218 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 196 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 242 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 220 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 266 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 244 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 290 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 268 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 292 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 6 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 30 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 54 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 78 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 102 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 126 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 150 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 174 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 198 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 222 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 246 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 270 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 294 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 25 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 49 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 73 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 97 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 121 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 145 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 169 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 193 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 217 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 241 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 265 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 289 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 29 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 53 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 77 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 101 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 125 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 149 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 173 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 197 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 221 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 245 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table ORDERS( - > O_ORDERKEY bigint unsigned, - > O_CUSTKEY bigint unsigned, - > O_ORDERSTATUS varchar(1), - > O_TOTALPRICE decimal(20,4), - > O_ORDERDATE date, - > O_ORDERPRIORITY varchar(15), - > O_CLERK varchar(15), - > O_SHIPPRIORITY int, - > O_COMMENT varchar(79) - > ); -create table successfully -CLAIMS>create projection on ORDERS( - > O_ORDERKEY, - > O_CUSTKEY, - > O_ORDERSTATUS, - > O_TOTALPRICE, - > O_ORDERDATE, - > O_ORDERPRIORITY, - > O_CLERK, - > O_SHIPPRIORITY, - > O_COMMENT - > ) number = 1 partitioned on O_ORDERKEY; -create projection successfully -CLAIMS>load table ORDERS from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/orders .tbl" with '|','\n'; -load data successfully (176.183 sec) - -WARNINGS: -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 29 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 53 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 77 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 101 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 125 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 27 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 51 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 75 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 99 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 123 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 149 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 147 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 173 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 197 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 171 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 221 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 245 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 195 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 269 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 219 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 293 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 317 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 243 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 267 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 341 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 291 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 365 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 315 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 389 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 413 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 339 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 437 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 363 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 387 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 411 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 435 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 15 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 461 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 39 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 485 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 63 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 459 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 509 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 87 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 483 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 111 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 533 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 135 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 557 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 581 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 159 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 507 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 605 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 183 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 531 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 207 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 555 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 231 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 255 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 579 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 629 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table LINEITEM( - > L_ORDERKEY bigint unsigned, - > L_PARTKEY bigint unsigned, - > L_SUPPKEY bigint unsigned, - > L_LINENUMBER int, - > L_QUANTITY decimal(20,4), - > L_EXTENDEDPRICE decimal(20,4), - > L_DISCOUNT decimal(20,4), - > L_TAX decimal(20,4), - > L_RETURNFLAG varchar(1), - > L_LINESTATUS varchar(1), - > L_SHIPDATE date, - > L_COMMITDATE date, - > L_RECEIPTDATE date, - > L_SHIPINSTRUCT varchar(25), - > L_SHIPMODE varchar(10), - > L_COMMENT varchar(44) - > ); -create table successfully -CLAIMS> - > create projection on LINEITEM( - > L_ORDERKEY, - > L_PARTKEY, - > L_SUPPKEY, - > L_LINENUMBER, - > L_QUANTITY, - > L_EXTENDEDPRICE, - > L_DISCOUNT, - > L_TAX, - > L_RETURNFLAG, - > L_LINESTATUS, - > L_SHIPDATE, - > L_COMMITDATE, - > L_RECEIPTDATE, - > L_SHIPINSTRUCT, - > L_SHIPMODE, - > L_COMMENT - > ) number = 1 partitioned on L_ORDERKEY; -create projection successfully -CLAIMS> - > load table LINEITEM from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/line item.tbl" with '|','\n'; -load data successfully (1027.7 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 18 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 42 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 66 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 90 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 114 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 10 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 34 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 58 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 82 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 106 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 130 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 7 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 31 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 55 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 79 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 103 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 127 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 138 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 151 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 162 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 175 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 154 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 186 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 199 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 210 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 223 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 178 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 234 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 247 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 258 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 202 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 282 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 271 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 226 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 306 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 295 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 330 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 250 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 319 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 354 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 343 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 378 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 274 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 367 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 391 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 298 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 322 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table NATION( - > N_NATIONKEY bigint unsigned, - > N_NAME varchar(25), - > N_REGIONKEY bigint unsigned, - > N_COMMENT varchar(152) - > ); -create table successfully -CLAIMS> - > create projection on NATION( - > N_NATIONKEY, - > N_NAME, - > N_REGIONKEY, - > N_COMMENT - > ) number = 1 partitioned on N_NATIONKEY; -create projection successfully -CLAIMS> - > load table NATION from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/nation .tbl" with '|','\n'; -load data successfully (0.326701 sec) - -WARNINGS: -Line: 13 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 22 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 25 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 19 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 18 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 9 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 10 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 11 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 12 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 14 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 15 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 16 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 17 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 21 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 6 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 20 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 8 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 23 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 7 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > - > - > create table REGION( - > R_REGIONKEY bigint unsigned, - > R_NAME varchar(25), - > R_COMMENT varchar(152) - > ); -create table successfully -CLAIMS> - > create projection on REGION( - > R_REGIONKEY, - > R_NAME, - > R_COMMENT - > ) number = 1 partitioned on R_REGIONKEY; -create projection successfully -CLAIMS>load table REGION from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/region .tbl" with '|','\n'; -load data successfully (0.211902 sec) - -WARNINGS: -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > exit; diff --git a/sbin/claims-test/testresult/ddl_tpch_sf10_4partition-1.result b/sbin/claims-test/testresult/ddl_tpch_sf10_4partition-1.result deleted file mode 100644 index 5c66973d8..000000000 --- a/sbin/claims-test/testresult/ddl_tpch_sf10_4partition-1.result +++ /dev/null @@ -1,659 +0,0 @@ - ______ __ ___ __ .___ ___. _______. - / || | / \ | | | \/ | / | - | ,----'| | / ^ \ | | | \ / | | (----` - | | | | / /_\ \ | | | |\/| | \ \ - | `----.| `----. / _____ \ | | | | | | .----) | - \______||_______|/__/ \__\ |__| |__| |__| |_______/ - - -----------CLuster-Aware In-Memory Sql query engine---------- - - -[?1034hCLAIMS>create table PART( - > P_PARTKEY bigint unsigned, - > P_NAME varchar(55), - > P_MFGR varchar(25), - > P_BRAND varchar(10), - > P_TYPE varchar(25), - > P_SIZE int, - > P_CONTAINER varchar(10), - > P_RETAILPRICE decimal(20,4), - > P_COMMENT varchar(23) - > ); -create table successfully -CLAIMS>create projection on PART( - > P_PARTKEY, - > P_NAME, - > P_MFGR, - > P_BRAND, - > P_TYPE, - > P_SIZE, - > P_CONTAINER, - > P_RETAILPRICE, - > P_COMMENT - > ) number = 4 partitioned on P_PARTKEY; -create projection successfully -CLAIMS>load table PART from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl " with '|','\n'; -load data successfully (24.4233 sec) - -WARNINGS: -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 28 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 52 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 76 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 100 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 124 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 148 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 172 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 196 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 220 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 244 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 268 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 292 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 316 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 340 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 364 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 388 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 412 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 436 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 460 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 484 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 508 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 532 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 556 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 580 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 604 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 628 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 652 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 676 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 700 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 724 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 748 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 772 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 796 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 820 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 844 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 868 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 892 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 916 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 940 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 964 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 988 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1012 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1036 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1060 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1084 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1108 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1132 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1156 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1180 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1204 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1228 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1252 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1276 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1300 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1324 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1348 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1372 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1396 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1420 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1444 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1468 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1492 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table SUPPLIER( - > S_SUPPKEY bigint unsigned, - > S_NAME varchar(25), - > S_ADDRESS varchar(40), - > S_NATIONKEY bigint unsigned, - > S_PHONE varchar(15), - > S_ACCTBAL decimal(20, 4), - > S_COMMENT varchar(101) - > ); -create table successfully -CLAIMS>create projection on SUPPLIER( - > S_SUPPKEY, - > S_NAME, - > S_ADDRESS, - > S_NATIONKEY, - > S_PHONE, - > S_ACCTBAL, - > S_COMMENT - > ) number = 4 partitioned on S_SUPPKEY; -create projection successfully -CLAIMS>load table SUPPLIER from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/supp lier.tbl" with '|','\n'; -load data successfully (2.2888 sec) - -WARNINGS: -Line: 7 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 31 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 55 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 79 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 103 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 127 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 151 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 175 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 199 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 223 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 247 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 271 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 295 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 319 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 343 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 367 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 391 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 415 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 439 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 463 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 487 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 511 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 535 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 559 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 583 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 607 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 631 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 655 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 679 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 703 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 727 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 751 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 775 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 799 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 823 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 847 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 871 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 895 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 919 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 943 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 967 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 991 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1015 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1039 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1063 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1087 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1111 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1135 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1159 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1183 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1207 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1231 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1255 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1279 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1303 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1327 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1351 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1375 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1399 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1423 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1447 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1471 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1495 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table PARTSUPP( - > PS_PARTKEY bigint unsigned, - > PS_SUPPKEY bigint unsigned, - > PS_AVAILQTY int, - > PS_SUPPLYCOST decimal(20,2), - > PS_COMMENT varchar(199) - > ); -create table successfully -CLAIMS> - > - > - > create projection on PARTSUPP( - > PS_PARTKEY, - > PS_SUPPKEY, - > PS_AVAILQTY, - > PS_SUPPLYCOST, - > PS_COMMENT - > ) number = 4 partitioned on PS_PARTKEY; -create projection successfully -CLAIMS>load table PARTSUPP from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/part supp.tbl" with '|','\n'; -load data successfully (105.701 sec) - -WARNINGS: -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 26 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 50 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 74 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 98 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 122 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 146 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 170 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 194 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 218 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 242 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 266 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 290 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 314 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 338 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 362 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 386 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 410 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 434 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 458 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 482 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 506 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 530 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 554 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 578 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 602 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 626 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 650 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 674 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 698 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 722 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 746 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 770 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 794 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 818 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 842 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 866 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 890 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 914 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 938 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 962 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 986 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1010 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1034 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1058 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1082 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1106 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1130 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1154 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1178 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1202 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1226 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1250 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1274 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1298 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1322 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1346 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1370 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1394 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1418 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1442 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1466 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1490 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table CUSTOMER( - > C_CUSTKEY bigint unsigned, - > C_NAME varchar(25), - > C_ADDRESS varchar(40), - > C_NATIONKEY bigint unsigned, - > C_PHONE varchar(15), - > C_ACCTBAL decimal(20,4), - > C_MKTSEGMENT varchar(10), - > C_COMMENT varchar(117) - > ); -create table successfully -CLAIMS>create projection on CUSTOMER( - > C_CUSTKEY, - > C_NAME, - > C_ADDRESS, - > C_NATIONKEY, - > C_PHONE, - > C_ACCTBAL, - > C_MKTSEGMENT, - > C_COMMENT - > ) number = 4 partitioned on C_CUSTKEY; -create projection successfully -CLAIMS>load table CUSTOMER from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/cust omer.tbl" with '|','\n'; -load data successfully (36.9049 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 408 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 432 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 456 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 480 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 504 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 528 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 552 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 576 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 600 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 624 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 648 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 672 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 696 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 720 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 744 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 768 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 792 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 816 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 840 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 864 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 888 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 912 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 936 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 960 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 984 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1008 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1032 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1056 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1080 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1104 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1128 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1152 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1176 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1200 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1224 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1248 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1272 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1296 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1320 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1344 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1368 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1392 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1416 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1440 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1464 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1488 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 1512 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table ORDERS( - > O_ORDERKEY bigint unsigned, - > O_CUSTKEY bigint unsigned, - > O_ORDERSTATUS varchar(1), - > O_TOTALPRICE decimal(20,4), - > O_ORDERDATE date, - > O_ORDERPRIORITY varchar(15), - > O_CLERK varchar(15), - > O_SHIPPRIORITY int, - > O_COMMENT varchar(79) - > ); -create table successfully -CLAIMS>create projection on ORDERS( - > O_ORDERKEY, - > O_CUSTKEY, - > O_ORDERSTATUS, - > O_TOTALPRICE, - > O_ORDERDATE, - > O_ORDERPRIORITY, - > O_CLERK, - > O_SHIPPRIORITY, - > O_COMMENT - > ) number = 4 partitioned on O_ORDERKEY; -create projection successfully -CLAIMS>load table ORDERS from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/orders .tbl" with '|','\n'; -load data successfully (103.319 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 408 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 432 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 456 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 480 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 504 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 528 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 552 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 576 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 600 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 624 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 648 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 672 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 696 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 720 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 744 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 768 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 792 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 816 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 840 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 864 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 888 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 912 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 936 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 960 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 984 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1008 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1032 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1056 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1080 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1104 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1128 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1152 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1176 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1200 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1224 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1248 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1272 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1296 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1320 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1344 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1368 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1392 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1416 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1440 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1464 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1488 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1512 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table LINEITEM( - > L_ORDERKEY bigint unsigned, - > L_PARTKEY bigint unsigned, - > L_SUPPKEY bigint unsigned, - > L_LINENUMBER int, - > L_QUANTITY decimal(20,4), - > L_EXTENDEDPRICE decimal(20,4), - > L_DISCOUNT decimal(20,4), - > L_TAX decimal(20,4), - > L_RETURNFLAG varchar(1), - > L_LINESTATUS varchar(1), - > L_SHIPDATE date, - > L_COMMITDATE date, - > L_RECEIPTDATE date, - > L_SHIPINSTRUCT varchar(25), - > L_SHIPMODE varchar(10), - > L_COMMENT varchar(44) - > ); -create table successfully -CLAIMS> - > create projection on LINEITEM( - > L_ORDERKEY, - > L_PARTKEY, - > L_SUPPKEY, - > L_LINENUMBER, - > L_QUANTITY, - > L_EXTENDEDPRICE, - > L_DISCOUNT, - > L_TAX, - > L_RETURNFLAG, - > L_LINESTATUS, - > L_SHIPDATE, - > L_COMMITDATE, - > L_RECEIPTDATE, - > L_SHIPINSTRUCT, - > L_SHIPMODE, - > L_COMMENT - > ) number = 4 partitioned on L_ORDERKEY; -create projection successfully -CLAIMS> - > load table LINEITEM from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/line item.tbl" with '|','\n'; -load data successfully (899.308 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 408 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 432 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 456 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 480 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 504 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 528 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 552 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 576 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 600 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 624 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 648 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 672 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 696 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 720 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 744 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 768 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 792 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 816 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 840 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 864 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 888 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 912 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 936 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 960 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 984 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1008 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1032 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1056 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1080 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1104 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1128 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1152 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1176 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1200 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1224 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1248 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1272 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1296 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1320 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1344 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1368 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1392 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1416 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1440 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1464 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1488 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1512 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table NATION( - > N_NATIONKEY bigint unsigned, - > N_NAME varchar(25), - > N_REGIONKEY bigint unsigned, - > N_COMMENT varchar(152) - > ); -create table successfully -CLAIMS> - > create projection on NATION( - > N_NATIONKEY, - > N_NAME, - > N_REGIONKEY, - > N_COMMENT - > ) number = 4 partitioned on N_NATIONKEY; -create projection successfully -CLAIMS> - > load table NATION from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/nation .tbl" with '|','\n'; -load data successfully (0.188071 sec) - -WARNINGS: -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 10 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 12 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 14 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 16 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 20 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 22 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 7 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 23 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 6 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 25 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 11 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 13 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 15 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 17 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 18 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 8 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 21 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 9 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 19 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > - > - > create table REGION( - > R_REGIONKEY bigint unsigned, - > R_NAME varchar(25), - > R_COMMENT varchar(152) - > ); -create table successfully -CLAIMS> - > create projection on REGION( - > R_REGIONKEY, - > R_NAME, - > R_COMMENT - > ) number = 4 partitioned on R_REGIONKEY; -create projection successfully -CLAIMS>load table REGION from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/region .tbl" with '|','\n'; -load data successfully (0.181775 sec) - -WARNINGS: -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > exit; diff --git a/sbin/claims-test/testresult/ddl_tpch_sf10_8partition-1.result b/sbin/claims-test/testresult/ddl_tpch_sf10_8partition-1.result deleted file mode 100644 index d71f90f90..000000000 --- a/sbin/claims-test/testresult/ddl_tpch_sf10_8partition-1.result +++ /dev/null @@ -1,659 +0,0 @@ - ______ __ ___ __ .___ ___. _______. - / || | / \ | | | \/ | / | - | ,----'| | / ^ \ | | | \ / | | (----` - | | | | / /_\ \ | | | |\/| | \ \ - | `----.| `----. / _____ \ | | | | | | .----) | - \______||_______|/__/ \__\ |__| |__| |__| |_______/ - - -----------CLuster-Aware In-Memory Sql query engine---------- - - -[?1034hCLAIMS>create table PART( - > P_PARTKEY bigint unsigned, - > P_NAME varchar(55), - > P_MFGR varchar(25), - > P_BRAND varchar(10), - > P_TYPE varchar(25), - > P_SIZE int, - > P_CONTAINER varchar(10), - > P_RETAILPRICE decimal(20,4), - > P_COMMENT varchar(23) - > ); -create table successfully -CLAIMS>create projection on PART( - > P_PARTKEY, - > P_NAME, - > P_MFGR, - > P_BRAND, - > P_TYPE, - > P_SIZE, - > P_CONTAINER, - > P_RETAILPRICE, - > P_COMMENT - > ) number = 8 partitioned on P_PARTKEY; -create projection successfully -CLAIMS>load table PART from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl " with '|','\n'; -load data successfully (7.80764 sec) - -WARNINGS: -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 29 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 53 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 77 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 101 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 125 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 149 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 173 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 197 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 221 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 245 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 269 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 293 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 317 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 341 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 365 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 389 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 413 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 437 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 461 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 485 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 509 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 533 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 557 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 581 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 605 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 629 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 653 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 677 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 701 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 725 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 749 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 773 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 797 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 821 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 845 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 869 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 893 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 917 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 941 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 965 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 989 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1013 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1037 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1061 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1085 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1109 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1133 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1157 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1181 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1205 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1229 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1253 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1277 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1301 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1325 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1349 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1373 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1397 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1421 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1445 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1469 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns -Line: 1493 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/part.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table SUPPLIER( - > S_SUPPKEY bigint unsigned, - > S_NAME varchar(25), - > S_ADDRESS varchar(40), - > S_NATIONKEY bigint unsigned, - > S_PHONE varchar(15), - > S_ACCTBAL decimal(20,4), - > S_COMMENT varchar(101) - > ); -create table successfully -CLAIMS>create projection on SUPPLIER( - > S_SUPPKEY, - > S_NAME, - > S_ADDRESS, - > S_NATIONKEY, - > S_PHONE, - > S_ACCTBAL, - > S_COMMENT - > ) number = 8 partitioned on S_SUPPKEY; -create projection successfully -CLAIMS>load table SUPPLIER from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/supp lier.tbl" with '|','\n'; -load data successfully (2.583 sec) - -WARNINGS: -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 27 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 51 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 75 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 99 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 123 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 147 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 171 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 195 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 219 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 243 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 267 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 291 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 315 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 339 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 363 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 387 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 411 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 435 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 459 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 483 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 507 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 531 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 555 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 579 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 603 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 627 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 651 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 675 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 699 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 723 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 747 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 771 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 795 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 819 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 843 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 867 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 891 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 915 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 939 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 963 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 987 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1011 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1035 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1059 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1083 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1107 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1131 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1155 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1179 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1203 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1227 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1251 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1275 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1299 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1323 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1347 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1371 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1395 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1419 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1443 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1467 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1491 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/supplier.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table PARTSUPP( - > PS_PARTKEY bigint unsigned, - > PS_SUPPKEY bigint unsigned, - > PS_AVAILQTY int, - > PS_SUPPLYCOST decimal(20,2), - > PS_COMMENT varchar(199) - > ); -create table successfully -CLAIMS> - > - > - > create projection on PARTSUPP( - > PS_PARTKEY, - > PS_SUPPKEY, - > PS_AVAILQTY, - > PS_SUPPLYCOST, - > PS_COMMENT - > ) number = 8 partitioned on PS_PARTKEY; -create projection successfully -CLAIMS>load table PARTSUPP from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/part supp.tbl" with '|','\n'; -load data successfully (76.2355 sec) - -WARNINGS: -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 25 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 49 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 73 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 97 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 121 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 145 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 169 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 193 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 217 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 241 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 265 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 289 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 313 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 337 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 361 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 385 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 409 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 433 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 457 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 481 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 505 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 529 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 553 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 577 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 601 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 625 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 649 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 673 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 697 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 721 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 745 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 769 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 793 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 817 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 841 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 865 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 889 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 913 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 937 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 961 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 985 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1009 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1033 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1057 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1081 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1105 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1129 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1153 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1177 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1201 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1225 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1249 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1273 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1297 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1321 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1345 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1369 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1393 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1417 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1441 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1465 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1489 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/partsupp.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table CUSTOMER( - > C_CUSTKEY bigint unsigned, - > C_NAME varchar(25), - > C_ADDRESS varchar(40), - > C_NATIONKEY bigint unsigned, - > C_PHONE varchar(15), - > C_ACCTBAL decimal(20,4), - > C_MKTSEGMENT varchar(10), - > C_COMMENT varchar(117) - > ); -create table successfully -CLAIMS>create projection on CUSTOMER( - > C_CUSTKEY, - > C_NAME, - > C_ADDRESS, - > C_NATIONKEY, - > C_PHONE, - > C_ACCTBAL, - > C_MKTSEGMENT, - > C_COMMENT - > ) number = 8 partitioned on C_CUSTKEY; -create projection successfully -CLAIMS>load table CUSTOMER from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/cust omer.tbl" with '|','\n'; -load data successfully (22.6057 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 14 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 38 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 62 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 86 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 110 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 134 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 158 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 182 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 206 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 230 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 254 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 278 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 302 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 326 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 350 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 374 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 398 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 408 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 422 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 432 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 446 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 456 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 470 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 480 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 504 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 494 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 518 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 528 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 542 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 552 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 566 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 576 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 590 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 600 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 614 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 624 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 638 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 648 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 662 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 672 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 686 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 696 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 710 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 720 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 734 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 744 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns -Line: 758 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/customer.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table ORDERS( - > O_ORDERKEY bigint unsigned, - > O_CUSTKEY bigint unsigned, - > O_ORDERSTATUS varchar(1), - > O_TOTALPRICE decimal(20,4), - > O_ORDERDATE date, - > O_ORDERPRIORITY varchar(15), - > O_CLERK varchar(15), - > O_SHIPPRIORITY int, - > O_COMMENT varchar(79) - > ); -create table successfully -CLAIMS>create projection on ORDERS( - > O_ORDERKEY, - > O_CUSTKEY, - > O_ORDERSTATUS, - > O_TOTALPRICE, - > O_ORDERDATE, - > O_ORDERPRIORITY, - > O_CLERK, - > O_SHIPPRIORITY, - > O_COMMENT - > ) number = 8 partitioned on O_ORDERKEY; -create projection successfully -CLAIMS>load table ORDERS from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/orders .tbl" with '|','\n'; -load data successfully (77.1838 sec) - -WARNINGS: -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 26 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 50 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 74 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 98 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 122 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 146 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 170 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 194 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 218 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 242 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 266 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 290 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 314 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 338 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 362 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 386 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 410 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 434 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 458 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 482 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 506 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 530 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 554 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 578 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 602 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 626 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 650 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 674 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 698 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 722 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 746 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 770 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 794 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 818 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 842 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 866 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 890 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 914 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 938 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 962 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 986 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1010 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1034 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1058 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1082 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1106 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1130 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1154 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1178 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1202 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1226 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1250 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1274 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1298 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1322 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1346 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1370 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1394 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1418 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1442 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1466 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns -Line: 1490 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/orders.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table LINEITEM( - > L_ORDERKEY bigint unsigned, - > L_PARTKEY bigint unsigned, - > L_SUPPKEY bigint unsigned, - > L_LINENUMBER int, - > L_QUANTITY decimal(20,4), - > L_EXTENDEDPRICE decimal(20,4), - > L_DISCOUNT decimal(20,4), - > L_TAX decimal(20,4), - > L_RETURNFLAG varchar(1), - > L_LINESTATUS varchar(1), - > L_SHIPDATE date, - > L_COMMITDATE date, - > L_RECEIPTDATE date, - > L_SHIPINSTRUCT varchar(25), - > L_SHIPMODE varchar(10), - > L_COMMENT varchar(44) - > ); -create table successfully -CLAIMS> - > create projection on LINEITEM( - > L_ORDERKEY, - > L_PARTKEY, - > L_SUPPKEY, - > L_LINENUMBER, - > L_QUANTITY, - > L_EXTENDEDPRICE, - > L_DISCOUNT, - > L_TAX, - > L_RETURNFLAG, - > L_LINESTATUS, - > L_SHIPDATE, - > L_COMMITDATE, - > L_RECEIPTDATE, - > L_SHIPINSTRUCT, - > L_SHIPMODE, - > L_COMMENT - > ) number = 8 partitioned on L_ORDERKEY; -create projection successfully -CLAIMS> - > load table LINEITEM from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/line item.tbl" with '|','\n'; -load data successfully (942.684 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 408 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 432 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 456 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 480 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 504 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 528 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 552 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 576 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 600 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 624 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 648 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 672 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 696 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 720 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 744 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 768 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 792 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 816 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 840 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 864 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 888 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 912 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 936 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 960 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 984 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1008 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1032 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1056 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1080 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1104 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1128 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1152 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1176 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1200 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1224 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1248 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1272 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1296 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1320 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1344 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1368 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1392 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1416 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1440 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1464 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1488 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1512 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/lineitem.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table NATION( - > N_NATIONKEY bigint unsigned, - > N_NAME varchar(25), - > N_REGIONKEY bigint unsigned, - > N_COMMENT varchar(152) - > ); -create table successfully -CLAIMS> - > create projection on NATION( - > N_NATIONKEY, - > N_NAME, - > N_REGIONKEY, - > N_COMMENT - > ) number = 8 partitioned on N_NATIONKEY; -create projection successfully -CLAIMS> - > load table NATION from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/nation .tbl" with '|','\n'; -load data successfully (0.299075 sec) - -WARNINGS: -Line: 16 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 7 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 25 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 12 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 9 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 22 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 14 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 8 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 23 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 17 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 6 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 18 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 19 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 20 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 21 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 11 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 10 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 13 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns -Line: 15 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/nation.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > - > - > create table REGION( - > R_REGIONKEY bigint unsigned, - > R_NAME varchar(25), - > R_COMMENT varchar(152) - > ); -create table successfully -CLAIMS> - > create projection on REGION( - > R_REGIONKEY, - > R_NAME, - > R_COMMENT - > ) number = 8 partitioned on R_REGIONKEY; -create projection successfully -CLAIMS>load table REGION from "/home/imdb/rawData/tpch-raw-data/tpch_sf10/region .tbl" with '|','\n'; -load data successfully (0.34907 sec) - -WARNINGS: -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf10/region.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > exit; diff --git a/sbin/claims-test/testresult/ddl_tpch_sf1_1partition-1.result b/sbin/claims-test/testresult/ddl_tpch_sf1_1partition-1.result deleted file mode 100644 index b80d0c858..000000000 --- a/sbin/claims-test/testresult/ddl_tpch_sf1_1partition-1.result +++ /dev/null @@ -1,659 +0,0 @@ - ______ __ ___ __ .___ ___. _______. - / || | / \ | | | \/ | / | - | ,----'| | / ^ \ | | | \ / | | (----` - | | | | / /_\ \ | | | |\/| | \ \ - | `----.| `----. / _____ \ | | | | | | .----) | - \______||_______|/__/ \__\ |__| |__| |__| |_______/ - - -----------CLuster-Aware In-Memory Sql query engine---------- - - -[?1034hCLAIMS>create table PART( - > P_PARTKEY bigint unsigned, - > P_NAME varchar(55), - > P_MFGR varchar(25), - > P_BRAND varchar(10), - > P_TYPE varchar(25), - > P_SIZE int, - > P_CONTAINER varchar(10), - > P_RETAILPRICE decimal(20,4), - > P_COMMENT varchar(23) - > ); -create table successfully -CLAIMS>create projection on PART( - > P_PARTKEY, - > P_NAME, - > P_MFGR, - > P_BRAND, - > P_TYPE, - > P_SIZE, - > P_CONTAINER, - > P_RETAILPRICE, - > P_COMMENT - > ) number = 1 partitioned on P_PARTKEY; -create projection successfully -CLAIMS>load table PART from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl" with '|','\n'; -load data successfully (6.44511 sec) - -WARNINGS: -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 28 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 52 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 76 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 100 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 124 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 148 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 172 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 196 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 220 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 244 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 268 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 292 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 316 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 340 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 364 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 388 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 412 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 436 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 460 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 484 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 508 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 532 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 556 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 580 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 604 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 628 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 652 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 676 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 700 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 724 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 748 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 772 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 796 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 820 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 844 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 868 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 892 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 916 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 940 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 964 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 988 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1012 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1036 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1060 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1084 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1108 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1132 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1156 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1180 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1204 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1228 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1252 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1276 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1300 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1324 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1348 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1372 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1396 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1420 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1444 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1468 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1492 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table SUPPLIER( - > S_SUPPKEY bigint unsigned, - > S_NAME varchar(25), - > S_ADDRESS varchar(40), - > S_NATIONKEY bigint unsigned, - > S_PHONE varchar(15), - > S_ACCTBAL decimal(20,4), - > S_COMMENT varchar(101) - > ); -create table successfully -CLAIMS>create projection on SUPPLIER( - > S_SUPPKEY, - > S_NAME, - > S_ADDRESS, - > S_NATIONKEY, - > S_PHONE, - > S_ACCTBAL, - > S_COMMENT - > ) number = 1 partitioned on S_SUPPKEY; -create projection successfully -CLAIMS>load table SUPPLIER from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/suppl ier.tbl" with '|','\n'; -load data successfully (0.300669 sec) - -WARNINGS: -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 28 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 52 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 76 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 100 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 124 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 148 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 172 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 196 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 220 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 244 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 268 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 292 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 316 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 340 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 364 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 388 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 412 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 436 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 460 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 484 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 508 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 532 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 556 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 580 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 604 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 628 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 652 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 676 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 700 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 724 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 748 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 772 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 796 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 820 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 844 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 868 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 892 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 916 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 940 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 964 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 988 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1012 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1036 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1060 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1084 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1108 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1132 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1156 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1180 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1204 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1228 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1252 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1276 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1300 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1324 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1348 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1372 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1396 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1420 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1444 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1468 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1492 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table PARTSUPP( - > PS_PARTKEY bigint unsigned, - > PS_SUPPKEY bigint unsigned, - > PS_AVAILQTY int, - > PS_SUPPLYCOST decimal(20,2), - > PS_COMMENT varchar(199) - > ); -create table successfully -CLAIMS> - > - > - > create projection on PARTSUPP( - > PS_PARTKEY, - > PS_SUPPKEY, - > PS_AVAILQTY, - > PS_SUPPLYCOST, - > PS_COMMENT - > ) number = 1 partitioned on PS_PARTKEY; -create projection successfully -CLAIMS>load table PARTSUPP from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/parts upp.tbl" with '|','\n'; -load data successfully (6.39342 sec) - -WARNINGS: -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 28 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 52 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 76 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 100 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 124 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 148 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 172 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 196 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 220 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 244 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 268 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 292 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 316 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 340 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 364 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 388 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 412 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 436 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 460 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 484 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 508 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 532 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 556 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 580 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 604 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 628 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 652 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 676 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 700 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 724 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 748 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 772 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 796 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 820 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 844 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 868 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 892 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 916 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 940 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 964 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 988 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1012 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1036 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1060 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1084 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1108 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1132 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1156 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1180 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1204 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1228 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1252 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1276 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1300 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1324 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1348 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1372 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1396 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1420 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1444 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1468 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1492 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table CUSTOMER( - > C_CUSTKEY bigint unsigned, - > C_NAME varchar(25), - > C_ADDRESS varchar(40), - > C_NATIONKEY bigint unsigned, - > C_PHONE varchar(15), - > C_ACCTBAL decimal(20,4), - > C_MKTSEGMENT varchar(10), - > C_COMMENT varchar(117) - > ); -create table successfully -CLAIMS>create projection on CUSTOMER( - > C_CUSTKEY, - > C_NAME, - > C_ADDRESS, - > C_NATIONKEY, - > C_PHONE, - > C_ACCTBAL, - > C_MKTSEGMENT, - > C_COMMENT - > ) number = 1 partitioned on C_CUSTKEY; -create projection successfully -CLAIMS>load table CUSTOMER from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/custo mer.tbl" with '|','\n'; -load data successfully (5.82933 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 408 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 432 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 456 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 480 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 504 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 528 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 552 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 576 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 600 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 624 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 648 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 672 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 696 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 720 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 744 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 768 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 792 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 816 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 840 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 864 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 888 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 912 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 936 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 960 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 984 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1008 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1032 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1056 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1080 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1104 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1128 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1152 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1176 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1200 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1224 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1248 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1272 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1296 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1320 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1344 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1368 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1392 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1416 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1440 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1464 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1488 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1512 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table ORDERS( - > O_ORDERKEY bigint unsigned, - > O_CUSTKEY bigint unsigned, - > O_ORDERSTATUS varchar(1), - > O_TOTALPRICE decimal(20,4), - > O_ORDERDATE date, - > O_ORDERPRIORITY varchar(15), - > O_CLERK varchar(15), - > O_SHIPPRIORITY int, - > O_COMMENT varchar(79) - > ); -create table successfully -CLAIMS>create projection on ORDERS( - > O_ORDERKEY, - > O_CUSTKEY, - > O_ORDERSTATUS, - > O_TOTALPRICE, - > O_ORDERDATE, - > O_ORDERPRIORITY, - > O_CLERK, - > O_SHIPPRIORITY, - > O_COMMENT - > ) number = 1 partitioned on O_ORDERKEY; -create projection successfully -CLAIMS>load table ORDERS from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/orders. tbl" with '|','\n'; -load data successfully (3.99672 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 408 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 432 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 456 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 480 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 504 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 528 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 552 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 576 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 600 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 624 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 648 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 672 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 696 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 720 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 744 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 768 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 792 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 816 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 840 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 864 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 888 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 912 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 936 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 960 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 984 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1008 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1032 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1056 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1080 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1104 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1128 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1152 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1176 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1200 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1224 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1248 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1272 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1296 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1320 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1344 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1368 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1392 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1416 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1440 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1464 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1488 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1512 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table LINEITEM( - > L_ORDERKEY bigint unsigned, - > L_PARTKEY bigint unsigned, - > L_SUPPKEY bigint unsigned, - > L_LINENUMBER int, - > L_QUANTITY decimal(20,4), - > L_EXTENDEDPRICE decimal(20,4), - > L_DISCOUNT decimal(20,4), - > L_TAX decimal(20,4), - > L_RETURNFLAG varchar(1), - > L_LINESTATUS varchar(1), - > L_SHIPDATE date, - > L_COMMITDATE date, - > L_RECEIPTDATE date, - > L_SHIPINSTRUCT varchar(25), - > L_SHIPMODE varchar(10), - > L_COMMENT varchar(44) - > ); -create table successfully -CLAIMS> - > create projection on LINEITEM( - > L_ORDERKEY, - > L_PARTKEY, - > L_SUPPKEY, - > L_LINENUMBER, - > L_QUANTITY, - > L_EXTENDEDPRICE, - > L_DISCOUNT, - > L_TAX, - > L_RETURNFLAG, - > L_LINESTATUS, - > L_SHIPDATE, - > L_COMMITDATE, - > L_RECEIPTDATE, - > L_SHIPINSTRUCT, - > L_SHIPMODE, - > L_COMMENT - > ) number = 1 partitioned on L_ORDERKEY; -create projection successfully -CLAIMS> - > load table LINEITEM from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/linei tem.tbl" with '|','\n'; -load data successfully (105.917 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 408 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 432 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 456 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 480 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 504 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 528 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 552 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 576 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 600 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 624 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 648 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 672 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 696 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 720 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 744 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 768 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 792 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 816 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 840 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 864 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 888 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 912 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 936 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 960 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 984 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1008 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1032 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1056 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1080 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1104 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1128 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1152 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1176 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1200 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1224 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1248 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1272 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1296 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1320 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1344 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1368 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1392 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1416 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1440 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1464 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1488 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1512 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table NATION( - > N_NATIONKEY bigint unsigned, - > N_NAME varchar(25), - > N_REGIONKEY bigint unsigned, - > N_COMMENT varchar(152) - > ); -create table successfully -CLAIMS> - > create projection on NATION( - > N_NATIONKEY, - > N_NAME, - > N_REGIONKEY, - > N_COMMENT - > ) number = 1 partitioned on N_NATIONKEY; -create projection successfully -CLAIMS> - > load table NATION from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/nation. tbl" with '|','\n'; -load data successfully (0.203755 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 14 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 25 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 6 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 9 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 10 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 16 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 7 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 8 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 21 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 20 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 19 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 17 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 23 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 11 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 18 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 12 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 13 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 15 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 22 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > - > - > create table REGION( - > R_REGIONKEY bigint unsigned, - > R_NAME varchar(25), - > R_COMMENT varchar(152) - > ); -create table successfully -CLAIMS> - > create projection on REGION( - > R_REGIONKEY, - > R_NAME, - > R_COMMENT - > ) number = 1 partitioned on R_REGIONKEY; -create projection successfully -CLAIMS>load table REGION from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/region. tbl" with '|','\n'; -load data successfully (0.06646 sec) - -WARNINGS: -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > exit; diff --git a/sbin/claims-test/testresult/ddl_tpch_sf1_4partition-1.result b/sbin/claims-test/testresult/ddl_tpch_sf1_4partition-1.result deleted file mode 100644 index b82e068e3..000000000 --- a/sbin/claims-test/testresult/ddl_tpch_sf1_4partition-1.result +++ /dev/null @@ -1,659 +0,0 @@ - ______ __ ___ __ .___ ___. _______. - / || | / \ | | | \/ | / | - | ,----'| | / ^ \ | | | \ / | | (----` - | | | | / /_\ \ | | | |\/| | \ \ - | `----.| `----. / _____ \ | | | | | | .----) | - \______||_______|/__/ \__\ |__| |__| |__| |_______/ - - -----------CLuster-Aware In-Memory Sql query engine---------- - - -[?1034hCLAIMS>create table PART( - > P_PARTKEY bigint unsigned, - > P_NAME varchar(55), - > P_MFGR varchar(25), - > P_BRAND varchar(10), - > P_TYPE varchar(25), - > P_SIZE int, - > P_CONTAINER varchar(10), - > P_RETAILPRICE decimal(20, 4), - > P_COMMENT varchar(23) - > ); -create table successfully -CLAIMS>create projection on PART( - > P_PARTKEY, - > P_NAME, - > P_MFGR, - > P_BRAND, - > P_TYPE, - > P_SIZE, - > P_CONTAINER, - > P_RETAILPRICE, - > P_COMMENT - > ) number = 4 partitioned on P_PARTKEY; -create projection successfully -CLAIMS>load table PART from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl" with '|','\n'; -load data successfully (5.12668 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 408 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 432 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 456 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 480 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 504 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 528 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 552 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 576 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 600 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 624 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 648 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 672 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 696 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 720 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 744 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 768 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 792 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 816 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 840 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 864 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 888 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 912 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 936 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 960 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 984 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1008 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1032 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1056 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1080 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1104 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1128 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1152 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1176 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1200 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1224 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1248 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1272 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1296 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1320 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1344 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1368 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1392 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1416 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1440 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1464 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1488 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1512 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table SUPPLIER( - > S_SUPPKEY bigint unsigned, - > S_NAME varchar(25), - > S_ADDRESS varchar(40), - > S_NATIONKEY bigint unsigned, - > S_PHONE varchar(15), - > S_ACCTBAL decimal(20,4), - > S_COMMENT varchar(101) - > ); -create table successfully -CLAIMS>create projection on SUPPLIER( - > S_SUPPKEY, - > S_NAME, - > S_ADDRESS, - > S_NATIONKEY, - > S_PHONE, - > S_ACCTBAL, - > S_COMMENT - > ) number = 4 partitioned on S_SUPPKEY; -create projection successfully -CLAIMS>load table SUPPLIER from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/suppl ier.tbl" with '|','\n'; -load data successfully (0.356682 sec) - -WARNINGS: -Line: 12 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 36 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 60 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 84 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 108 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 132 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 156 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 180 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 204 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 228 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 252 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 276 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 300 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 324 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 348 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 372 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 396 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 420 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 444 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 468 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 492 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 516 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 540 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 564 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 588 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 612 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 636 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 660 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 684 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 708 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 732 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 756 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 780 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 804 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 828 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 852 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 876 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 900 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 924 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 948 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 972 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 996 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1020 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1044 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1068 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1092 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1116 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1140 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1164 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1188 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1212 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1236 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1260 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1284 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1308 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1332 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1356 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1380 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1404 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1428 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1452 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1476 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1500 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table PARTSUPP( - > PS_PARTKEY bigint unsigned, - > PS_SUPPKEY bigint unsigned, - > PS_AVAILQTY int, - > PS_SUPPLYCOST decimal(20,2), - > PS_COMMENT varchar(199) - > ); -create table successfully -CLAIMS> - > - > - > create projection on PARTSUPP( - > PS_PARTKEY, - > PS_SUPPKEY, - > PS_AVAILQTY, - > PS_SUPPLYCOST, - > PS_COMMENT - > ) number = 1 partitioned on PS_PARTKEY; -create projection successfully -CLAIMS>load table PARTSUPP from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/parts upp.tbl" with '|','\n'; -load data successfully (19.1024 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 25 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 49 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 73 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 97 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 121 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 145 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 169 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 193 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 217 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 241 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 265 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 289 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 313 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 337 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 361 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 385 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 409 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 433 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 457 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 481 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 505 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 529 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 553 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 6 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 408 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 577 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 601 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 625 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 30 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 432 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 649 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 54 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 456 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 673 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 78 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 102 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 697 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 480 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 721 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 126 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 504 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 745 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 769 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 528 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 150 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 793 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table CUSTOMER( - > C_CUSTKEY bigint unsigned, - > C_NAME varchar(25), - > C_ADDRESS varchar(40), - > C_NATIONKEY bigint unsigned, - > C_PHONE varchar(15), - > C_ACCTBAL decimal(20, 4), - > C_MKTSEGMENT varchar(10), - > C_COMMENT varchar(117) - > ); -create table successfully -CLAIMS>create projection on CUSTOMER( - > C_CUSTKEY, - > C_NAME, - > C_ADDRESS, - > C_NATIONKEY, - > C_PHONE, - > C_ACCTBAL, - > C_MKTSEGMENT, - > C_COMMENT - > ) number = 4 partitioned on C_CUSTKEY; -create projection successfully -CLAIMS>load table CUSTOMER from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/custo mer.tbl" with '|','\n'; -load data successfully (4.95498 sec) - -WARNINGS: -Line: 13 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 37 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 61 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 85 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 109 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 133 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 157 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 181 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 205 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 229 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 253 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 277 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 301 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 325 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 349 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 373 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 397 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 421 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 445 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 469 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 493 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 517 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 541 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 565 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 589 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 613 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 637 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 661 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 685 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 709 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 733 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 757 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 781 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 805 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 829 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 853 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 877 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 901 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 925 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 949 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 973 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 997 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1021 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1045 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1069 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1093 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1117 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1141 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1165 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1189 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1213 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1237 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1261 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1285 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1309 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1333 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1357 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1381 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1405 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1429 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1453 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1477 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1501 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table ORDERS( - > O_ORDERKEY bigint unsigned, - > O_CUSTKEY bigint unsigned, - > O_ORDERSTATUS varchar(1), - > O_TOTALPRICE decimal(20, 4), - > O_ORDERDATE date, - > O_ORDERPRIORITY varchar(15), - > O_CLERK varchar(15), - > O_SHIPPRIORITY int, - > O_COMMENT varchar(79) - > ); -create table successfully -CLAIMS>create projection on ORDERS( - > O_ORDERKEY, - > O_CUSTKEY, - > O_ORDERSTATUS, - > O_TOTALPRICE, - > O_ORDERDATE, - > O_ORDERPRIORITY, - > O_CLERK, - > O_SHIPPRIORITY, - > O_COMMENT - > ) number = 4 partitioned on O_ORDERKEY; -create projection successfully -CLAIMS>load table ORDERS from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/orders. tbl" with '|','\n'; -load data successfully (26.9864 sec) - -WARNINGS: -Line: 21 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 45 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 69 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 93 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 117 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 141 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 165 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 189 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 213 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 237 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 261 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 285 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 309 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 333 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 357 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 381 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 405 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 429 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 453 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 477 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 501 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 525 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 549 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 573 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 597 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 621 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 645 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 669 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 693 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 717 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 741 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 765 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 789 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 813 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 837 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 861 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 885 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 909 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 933 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 957 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 981 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1005 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1029 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1053 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1077 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1101 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1125 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1149 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1173 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1197 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1221 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1245 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1269 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1293 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1317 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1341 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1365 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1389 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1413 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1437 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1461 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1485 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1509 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table LINEITEM( - > L_ORDERKEY bigint unsigned, - > L_PARTKEY bigint unsigned, - > L_SUPPKEY bigint unsigned, - > L_LINENUMBER int, - > L_QUANTITY decimal(20, 4), - > L_EXTENDEDPRICE decimal(20,4), - > L_DISCOUNT decimal(20, 4), - > L_TAX decimal(20, 4), - > L_RETURNFLAG varchar(1), - > L_LINESTATUS varchar(1), - > L_SHIPDATE date, - > L_COMMITDATE date, - > L_RECEIPTDATE date, - > L_SHIPINSTRUCT varchar(25), - > L_SHIPMODE varchar(10), - > L_COMMENT varchar(44) - > ); -create table successfully -CLAIMS> - > create projection on LINEITEM( - > L_ORDERKEY, - > L_PARTKEY, - > L_SUPPKEY, - > L_LINENUMBER, - > L_QUANTITY, - > L_EXTENDEDPRICE, - > L_DISCOUNT, - > L_TAX, - > L_RETURNFLAG, - > L_LINESTATUS, - > L_SHIPDATE, - > L_COMMITDATE, - > L_RECEIPTDATE, - > L_SHIPINSTRUCT, - > L_SHIPMODE, - > L_COMMENT - > ) number = 4 partitioned on L_ORDERKEY; -create projection successfully -CLAIMS> - > load table LINEITEM from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/linei tem.tbl" with '|','\n'; -load data successfully (90.4732 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 408 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 432 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 456 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 480 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 504 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 528 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 552 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 576 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 600 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 624 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 648 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 672 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 696 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 720 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 744 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 768 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 792 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 816 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 840 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 864 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 888 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 912 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 936 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 960 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 984 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1008 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1032 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1056 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1080 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1104 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1128 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1152 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1176 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1200 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1224 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1248 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1272 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1296 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1320 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1344 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1368 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1392 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1416 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1440 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1464 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1488 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1512 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table NATION( - > N_NATIONKEY bigint unsigned, - > N_NAME varchar(25), - > N_REGIONKEY bigint unsigned, - > N_COMMENT varchar(152) - > ); -create table successfully -CLAIMS> - > create projection on NATION( - > N_NATIONKEY, - > N_NAME, - > N_REGIONKEY, - > N_COMMENT - > ) number = 4 partitioned on N_NATIONKEY; -create projection successfully -CLAIMS> - > load table NATION from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/nation. tbl" with '|','\n'; -load data successfully (0.187574 sec) - -WARNINGS: -Line: 7 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 25 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 19 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 9 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 8 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 11 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 10 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 12 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 17 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 16 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 20 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 6 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 18 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 14 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 21 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 15 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 22 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 23 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 13 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > - > - > create table REGION( - > R_REGIONKEY bigint unsigned, - > R_NAME varchar(25), - > R_COMMENT varchar(152) - > ); -create table successfully -CLAIMS> - > create projection on REGION( - > R_REGIONKEY, - > R_NAME, - > R_COMMENT - > ) number = 4 partitioned on R_REGIONKEY; -create projection successfully -CLAIMS>load table REGION from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/region. tbl" with '|','\n'; -load data successfully (0.355984 sec) - -WARNINGS: -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > exit; diff --git a/sbin/claims-test/testresult/ddl_tpch_sf1_8partition-1.result b/sbin/claims-test/testresult/ddl_tpch_sf1_8partition-1.result deleted file mode 100644 index 036ede0ef..000000000 --- a/sbin/claims-test/testresult/ddl_tpch_sf1_8partition-1.result +++ /dev/null @@ -1,657 +0,0 @@ - ______ __ ___ __ .___ ___. _______. - / || | / \ | | | \/ | / | - | ,----'| | / ^ \ | | | \ / | | (----` - | | | | / /_\ \ | | | |\/| | \ \ - | `----.| `----. / _____ \ | | | | | | .----) | - \______||_______|/__/ \__\ |__| |__| |__| |_______/ - - -----------CLuster-Aware In-Memory Sql query engine---------- - - -[?1034hCLAIMS>create table PART( - > P_PARTKEY bigint unsigned, - > P_NAME varchar(55), - > P_MFGR varchar(25), - > P_BRAND varchar(10), - > P_TYPE varchar(25), - > P_SIZE int, - > P_CONTAINER varchar(10), - > P_RETAILPRICE decimal(20,4), - > P_COMMENT varchar(23) - > ); -create table successfully -CLAIMS>create projection on PART( - > P_PARTKEY, - > P_NAME, - > P_MFGR, - > P_BRAND, - > P_TYPE, - > P_SIZE, - > P_CONTAINER, - > P_RETAILPRICE, - > P_COMMENT - > ) number = 8 partitioned on P_PARTKEY; -create projection successfully -CLAIMS>load table PART from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl" with '|','\n'; -load data successfully (5.74215 sec) - -WARNINGS: -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 28 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 52 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 76 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 100 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 124 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 148 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 172 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 196 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 220 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 244 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 268 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 292 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 316 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 340 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 364 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 388 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 412 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 436 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 460 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 484 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 508 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 532 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 556 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 580 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 604 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 628 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 652 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 676 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 700 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 724 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 748 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 772 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 796 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 820 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 844 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 868 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 892 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 916 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 940 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 964 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 988 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1012 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1036 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1060 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1084 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1108 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1132 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1156 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1180 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1204 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1228 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1252 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1276 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1300 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1324 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1348 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1372 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1396 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1420 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1444 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1468 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns -Line: 1492 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/part.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table SUPPLIER( - > S_SUPPKEY bigint unsigned, - > S_NAME varchar(25), - > S_ADDRESS varchar(40), - > S_NATIONKEY bigint unsigned, - > S_PHONE varchar(15), - > S_ACCTBAL decimal(20,4), - > S_COMMENT varchar(101) - > ); -create table successfully -CLAIMS>create projection on SUPPLIER( - > S_SUPPKEY, - > S_NAME, - > S_ADDRESS, - > S_NATIONKEY, - > S_PHONE, - > S_ACCTBAL, - > S_COMMENT - > ) number = 8 partitioned on S_SUPPKEY; -create projection successfully -CLAIMS>load table SUPPLIER from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/suppl ier.tbl" with '|','\n'; -load data successfully (0.449802 sec) - -WARNINGS: -Line: 17 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 41 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 65 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 89 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 113 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 137 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 161 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 185 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 209 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 233 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 257 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 281 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 305 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 329 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 353 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 377 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 401 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 425 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 449 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 473 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 497 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 521 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 545 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 569 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 593 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 617 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 641 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 665 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 689 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 713 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 737 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 761 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 785 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 809 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 833 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 857 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 881 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 905 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 929 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 953 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 977 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1001 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1025 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1049 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1073 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1097 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1121 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1145 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1169 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1193 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1217 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1241 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1265 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1289 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1313 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1337 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1361 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1385 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1409 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1433 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1457 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1481 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns -Line: 1505 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/supplier.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table PARTSUPP( - > PS_PARTKEY bigint unsigned, - > PS_SUPPKEY bigint unsigned, - > PS_AVAILQTY int, - > PS_SUPPLYCOST decimal(20, 2), - > PS_COMMENT varchar(199) - > ); -create table successfully -CLAIMS> - > create projection on PARTSUPP( - > PS_PARTKEY, - > PS_SUPPKEY, - > PS_AVAILQTY, - > PS_SUPPLYCOST, - > PS_COMMENT - > ) number = 8 partitioned on PS_PARTKEY; -create projection successfully -CLAIMS>load table PARTSUPP from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/parts upp.tbl" with '|','\n'; -load data successfully (17.8721 sec) - -WARNINGS: -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 25 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 49 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 73 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 97 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 121 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 145 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 169 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 193 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 217 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 241 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 265 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 289 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 313 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 337 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 361 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 385 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 409 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 433 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 457 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 481 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 505 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 529 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 553 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 577 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 601 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 625 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 649 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 673 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 697 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 721 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 745 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 769 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 793 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 817 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 841 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 865 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 889 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 913 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 937 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 961 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 985 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1009 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1033 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1057 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1081 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1105 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1129 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1153 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1177 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1201 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1225 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1249 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1273 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1297 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1321 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1345 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1369 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1393 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1417 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1441 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1465 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns -Line: 1489 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/partsupp.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > create table CUSTOMER( - > C_CUSTKEY bigint unsigned, - > C_NAME varchar(25), - > C_ADDRESS varchar(40), - > C_NATIONKEY bigint unsigned, - > C_PHONE varchar(15), - > C_ACCTBAL decimal(20, 4), - > C_MKTSEGMENT varchar(10), - > C_COMMENT varchar(117) - > ); -create table successfully -CLAIMS>create projection on CUSTOMER( - > C_CUSTKEY, - > C_NAME, - > C_ADDRESS, - > C_NATIONKEY, - > C_PHONE, - > C_ACCTBAL, - > C_MKTSEGMENT, - > C_COMMENT - > ) number = 8 partitioned on C_CUSTKEY; -create projection successfully -CLAIMS>load table CUSTOMER from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/custo mer.tbl" with '|','\n'; -load data successfully (2.76142 sec) - -WARNINGS: -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 28 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 52 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 76 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 100 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 124 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 148 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 172 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 196 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 220 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 244 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 268 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 292 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 316 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 340 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 364 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 388 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 412 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 436 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 460 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 484 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 508 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 532 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 556 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 580 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 604 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 628 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 652 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 676 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 700 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 724 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 748 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 772 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 796 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 820 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 844 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 868 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 892 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 916 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 940 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 964 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 988 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1012 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1036 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1060 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1084 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1108 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1132 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1156 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1180 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1204 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1228 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1252 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1276 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1300 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1324 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1348 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1372 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1396 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1420 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1444 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1468 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns -Line: 1492 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/customer.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table ORDERS( - > O_ORDERKEY bigint unsigned, - > O_CUSTKEY bigint unsigned, - > O_ORDERSTATUS varchar(1), - > O_TOTALPRICE decimal(20, 4), - > O_ORDERDATE date, - > O_ORDERPRIORITY varchar(15), - > O_CLERK varchar(15), - > O_SHIPPRIORITY int, - > O_COMMENT varchar(79) - > ); -create table successfully -CLAIMS>create projection on ORDERS( - > O_ORDERKEY, - > O_CUSTKEY, - > O_ORDERSTATUS, - > O_TOTALPRICE, - > O_ORDERDATE, - > O_ORDERPRIORITY, - > O_CLERK, - > O_SHIPPRIORITY, - > O_COMMENT - > ) number = 8 partitioned on O_ORDERKEY; -create projection successfully -CLAIMS>load table ORDERS from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/orders. tbl" with '|','\n'; -load data successfully (21.9481 sec) - -WARNINGS: -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 26 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 50 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 74 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 98 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 122 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 146 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 170 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 194 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 218 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 242 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 266 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 290 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 314 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 338 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 362 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 386 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 410 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 434 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 458 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 482 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 506 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 530 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 554 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 578 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 602 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 626 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 650 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 674 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 698 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 722 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 746 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 770 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 794 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 818 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 842 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 866 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 890 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 914 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 938 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 962 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 986 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1010 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1034 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1058 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1082 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1106 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1130 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1154 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1178 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1202 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1226 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1250 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1274 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1298 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1322 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1346 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1370 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1394 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1418 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1442 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1466 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns -Line: 1490 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/orders.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table LINEITEM( - > L_ORDERKEY bigint unsigned, - > L_PARTKEY bigint unsigned, - > L_SUPPKEY bigint unsigned, - > L_LINENUMBER int, - > L_QUANTITY decimal(20, 4), - > L_EXTENDEDPRICE decimal(20, 4), - > L_DISCOUNT decimal(20, 4), - > L_TAX decimal(20, 4), - > L_RETURNFLAG varchar(1), - > L_LINESTATUS varchar(1), - > L_SHIPDATE date, - > L_COMMITDATE date, - > L_RECEIPTDATE date, - > L_SHIPINSTRUCT varchar(25), - > L_SHIPMODE varchar(10), - > L_COMMENT varchar(44) - > ); -create table successfully -CLAIMS> - > create projection on LINEITEM( - > L_ORDERKEY, - > L_PARTKEY, - > L_SUPPKEY, - > L_LINENUMBER, - > L_QUANTITY, - > L_EXTENDEDPRICE, - > L_DISCOUNT, - > L_TAX, - > L_RETURNFLAG, - > L_LINESTATUS, - > L_SHIPDATE, - > L_COMMITDATE, - > L_RECEIPTDATE, - > L_SHIPINSTRUCT, - > L_SHIPMODE, - > L_COMMENT - > ) number = 8 partitioned on L_ORDERKEY; -create projection successfully -CLAIMS> - > load table LINEITEM from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/linei tem.tbl" with '|','\n'; -load data successfully (143.626 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 48 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 72 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 96 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 120 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 144 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 168 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 192 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 216 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 240 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 264 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 288 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 312 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 336 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 360 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 384 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 408 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 432 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 456 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 480 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 504 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 528 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 552 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 576 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 600 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 624 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 648 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 672 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 696 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 720 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 744 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 768 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 792 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 816 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 840 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 864 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 888 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 912 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 936 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 960 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 984 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1008 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1032 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1056 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1080 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1104 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1128 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1152 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1176 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1200 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1224 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1248 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1272 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1296 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1320 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1344 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1368 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1392 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1416 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1440 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1464 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1488 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns -Line: 1512 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/lineitem.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > create table NATION( - > N_NATIONKEY bigint unsigned, - > N_NAME varchar(25), - > N_REGIONKEY bigint unsigned, - > N_COMMENT varchar(152) - > ); -create table successfully -CLAIMS> - > create projection on NATION( - > N_NATIONKEY, - > N_NAME, - > N_REGIONKEY, - > N_COMMENT - > ) number = 8 partitioned on N_NATIONKEY; -create projection successfully -CLAIMS> - > load table NATION from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/nation. tbl" with '|','\n'; -load data successfully (0.221829 sec) - -WARNINGS: -Line: 24 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 14 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 9 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 16 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 13 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 10 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 17 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 15 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 18 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 19 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 7 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 8 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 25 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 22 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 12 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 21 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 6 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 11 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 23 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns -Line: 20 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/nation.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > - > - > - > create table REGION( - > R_REGIONKEY bigint unsigned, - > R_NAME varchar(25), - > R_COMMENT varchar(152) - > ); -create table successfully -CLAIMS> - > create projection on REGION( - > R_REGIONKEY, - > R_NAME, - > R_COMMENT - > ) number = 8 partitioned on R_REGIONKEY; -create projection successfully -CLAIMS>load table REGION from "/home/imdb/rawData/tpch-raw-data/tpch_sf1/region. tbl" with '|','\n'; -load data successfully (0.206466 sec) - -WARNINGS: -Line: 1 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns -Line: 5 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns -Line: 3 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns -Line: 4 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns -Line: 2 in file: /home/imdb/rawData/tpch-raw-data/tpch_sf1/region.tbl was truncated; it contained more data than there were input columns - - -CLAIMS> - > exit; diff --git a/sbin/slave-scripts/start-slave.sh b/sbin/slave-scripts/start-slave.sh deleted file mode 100755 index 67ec857fd..000000000 --- a/sbin/slave-scripts/start-slave.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh - -cd $CLAIMS_HOME -timestr=`date '+%Y-%m-%d'` -logpath=$CLAIMS_HOME/claimslogs - -if [ ! -d "$logpath" ]; then - echo " path not exist, mkdir: "$logpath - mkdir -p "$logpath" -fi - -thisip=`ifconfig | grep "10.11.1.*"` -thisip=${thisip:20:11} - -logfile=$logpath/claimsserver-$thisip-$timestr.log - -if [ -z $1 ]; then - echo "please start with config file!" >> $logfile - echo "please start with config file!" - exit -fi - -claimspid=`ps x | grep -w ./claimsserver | grep -v grep | awk '{print $1}'` -if [ "$claimspid" != "" ]; then - echo "claimsserver pid $claimspid" - kill -9 $claimspid -fi - -ulimit -c unlimited -echo "========run claimsserver on slave:[$thisip] time:$(date '+%Y-%m-%d %H:%M:%S')========" >> $logfile -./claimsserver -c $1 >> $logfile & diff --git a/sbin/slave-scripts/stop-slave.sh b/sbin/slave-scripts/stop-slave.sh deleted file mode 100755 index 1b37f4c47..000000000 --- a/sbin/slave-scripts/stop-slave.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -#claimsps=`ps ux | grep claimsserver | grep -v grep` -claimspids=`ps x | grep -w ./claimsserver | grep -v grep | awk '{print $1}'` -if [ "$claimspids" != "" ]; then -for claimspid in $claimspids -do -echo "kill claimsserver pid : $claimspid" -kill -9 $claimspid -done -fi diff --git a/sbin/start-all.sh b/sbin/start-all.sh new file mode 100755 index 000000000..c881defa0 --- /dev/null +++ b/sbin/start-all.sh @@ -0,0 +1,24 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd 2-claims-conf/ +source ./load-config.sh +source ./generate-config.sh +cd ../../ +# now in CLAIMS_HOME + +# start cluster +for node in $master $slaves +do +{ + ssh -f -n -l $user $node "$claimshome/sbin/start-node.sh $claimshome/sbin/2-claims-conf/config-$node>/dev/null 2>&1" + echo -e "$node claimsserver start [\033[32mOK\033[0m]" + + if [ "$node" = "$master" ]; then + sleep 9 + else + sleep 1 + fi +} +done diff --git a/sbin/start-node.sh b/sbin/start-node.sh new file mode 100755 index 000000000..c07cdace4 --- /dev/null +++ b/sbin/start-node.sh @@ -0,0 +1,49 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR +cd 2-claims-conf +source ./load-config.sh +#source ./generate-config.sh +cd ../../ +# now in CLAIMS_HOME + +timestr=$(date +%Y-%m-%d) + +if [ ! -d "$logpath" ]; then + echo "log path not exist, mkdir:$logpath" + mkdir -p "$logpath" +fi + +if [ -z $1 ]; then + echo "please start with config file!" >> $logpath/noconfig-$timestr + echo "please start with config file!" + exit 1 +fi + +echo "-----------------------------------" +echo "configfile: 【$1】" +echo "-----------------------------------" + +echo -e "\033[31m`pwd`\033[0m" + +thisip=${1#*config-} +thislog=$logpath/claimsserver-$thisip-$timestr.log + +./sbin/stop-node.sh + +# for debug begin ###### +cd install +ulimit -c unlimited +cd ../ +# for debug end ######## + +echo "========run claimsserver on:[$thisip] time:[$(date '+%Y-%m-%d %H:%M:%S')]========" >> $thislog +./install/claimsserver -c $1 >> $thislog & +claimsserverpid=$! +echo "claimsserver=$claimsserverpid" > $runclaimsprocid +echo -e "$thisip start claimsserver pid:[$claimsserverpid][\033[32mOK\033[0m]" + +echo "start tracker for debug..." +#./sbin/claims-test/statustracker.sh & + diff --git a/sbin/stop-all.sh b/sbin/stop-all.sh new file mode 100755 index 000000000..ad95cc600 --- /dev/null +++ b/sbin/stop-all.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR + +cd 2-claims-conf/ +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +for node in $slaves $master +do + ssh -f -n -l $user $node "$claimshome/sbin/stop-node.sh $1>/dev/null 2>&1" & + echo -e "$node claimsserver stop [\033[32mOK\033[0m]" +done diff --git a/sbin/stop-node.sh b/sbin/stop-node.sh new file mode 100755 index 000000000..16695f591 --- /dev/null +++ b/sbin/stop-node.sh @@ -0,0 +1,51 @@ +#!/bin/sh + +CURRDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $CURRDIR + +cd 2-claims-conf/ +source ./load-config.sh +cd ../../ +# now in CLAIMS_HOME + +if [ "$1" = "all" ]; then + + claimspids=`ps x | grep -w ./install/claimsserver | grep -v grep | awk '{print $1}'` + if [ "$claimspids" != "" ]; then + for claimspid in $claimspids + do + echo "stop claimsserver pid:$claimspid" + kill -9 $claimspid + done + fi + + if [ -f "$runclaimsprocid" ]; then + rm -f $runclaimsprocid + fi + clientpids=`ps x | grep -w ./install/client | grep -v grep | awk '{print $1}'` + if [ "$clientpids" != "" ]; then + for clientpid in $clientpids + do + echo "claims client pid : [$clientpid]" + kill -9 $clientpid + done + fi + gtestpids=`ps x | grep -w ./install/test | grep -v grep | awk '{print $1}'` + if [ "$gtestpids" != "" ]; then + for gtestpid in $gtestpids + do + echo "claims gtest pid : [$gtestpid]" + kill -9 $gtestpid + done + fi +else + + if [ -f "$runclaimsprocid" ]; then + claimspids=`sed '/^claimsserver=/!d;s/.*=//' $runclaimsprocid` + if [ "$claimspids" != "" ]; then + echo "stop claimsserver pid : [$claimspids]" + kill -9 $claimspids + fi + rm -f $runclaimsprocid + fi +fi diff --git a/sql_parser/Test/Makefile.am b/sql_parser/Test/Makefile.am index 4c3384dd2..f1ff5b5c4 100755 --- a/sql_parser/Test/Makefile.am +++ b/sql_parser/Test/Makefile.am @@ -1,8 +1,7 @@ AM_CPPFLAGS=-fPIC -DTHERON_XS\ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux \ --I${THERON_HOME}/Include \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ -I${GTEST_HOME}/include @@ -25,10 +24,9 @@ LDADD = \ ${BOOST_HOME}/stage/lib/libboost_serialization.so \ ${BOOST_HOME}/stage/lib/libboost_system.a \ ${BOOST_HOME}/stage/lib/libboost_system.so \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.so \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a \ + ${HADOOP_HOME}/lib/native/libhdfs.so\ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ - ${THERON_HOME}/Lib/libtherond.a \ ${GTEST_HOME}/libgtest.a diff --git a/sql_parser/Test/test_new_sql.cpp b/sql_parser/Test/test_new_sql.cpp index 4a3e03e69..dc5176b06 100644 --- a/sql_parser/Test/test_new_sql.cpp +++ b/sql_parser/Test/test_new_sql.cpp @@ -1,3 +1,5 @@ +#include "../../exec_tracker/segment_exec_status.h" + /* * Copyright [2012-2015] DaSE@ECNU * @@ -36,6 +38,7 @@ #include "../../physical_operator/physical_operator_base.h" using claims::logical_operator::LogicalQueryPlanRoot; using claims::physical_operator::PhysicalOperatorBase; +using claims::SegmentExecStatus; using std::endl; using std::cout; @@ -59,7 +62,8 @@ int TestNewSql() { raw_ast->Print(); cout << "--------------begin push down condition ------------" << endl; #endif - raw_ast->PushDownCondition(NULL); + PushDownConditionContext pdccnxt; + raw_ast->PushDownCondition(pdccnxt); #ifdef PRINTCONTEXT raw_ast->Print(); cout << "--------------begin logical plan -------------------" << endl; @@ -83,12 +87,12 @@ int TestNewSql() { physical_plan->Print(); cout << "--------------begin output result -------------------" << endl; #endif - - physical_plan->Open(); - while (physical_plan->Next(NULL)) { + SegmentExecStatus* exec_status = new SegmentExecStatus(make_pair(0, 0)); + physical_plan->Open(exec_status); + while (physical_plan->Next(exec_status, NULL)) { } ResultSet* result_set = physical_plan->GetResultSet(); - physical_plan->Close(); + physical_plan->Close(exec_status); result_set->print(); diff --git a/sql_parser/ast_node/Makefile.am b/sql_parser/ast_node/Makefile.am index 815add5fc..3937e1cbd 100755 --- a/sql_parser/ast_node/Makefile.am +++ b/sql_parser/ast_node/Makefile.am @@ -1,10 +1,9 @@ AM_CPPFLAGS= -fPIC -fpermissive\ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${THERON_HOME}/Include \ -I${GTEST_HOME}/include AM_LDFLAGS=-lc -lm -lrt -lboost_serialization -lxs -lpthread -lglog @@ -26,18 +25,19 @@ LDADD = ../../Executor/libexecutor.a \ ../../utility/libutility.a \ /usr/local/lib/libglog.a \ /usr/local/lib/libglog.so \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a\ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.so \ - ${THERON_HOME}/Lib/libtherond.a \ ${GTEST_HOME}/libgtest.a noinst_LIBRARIES=libast_node.a libast_node_a_SOURCES = \ - ast_delete_stmt.cpp ast_delete.h \ + ast_desc_stmt.cpp ast_desc_stmt.h \ + ast_delete_stmt.cpp ast_delete_stmt.h \ + ast_update_stmt.cpp ast_update_stmt.h \ ast_show_stmt.cpp ast_show_stmt.h \ ast_select_stmt.cpp ast_select_stmt.h \ ast_create_stmt.cpp ast_create_stmt.h \ diff --git a/sql_parser/ast_node/ast_desc_stmt.cpp b/sql_parser/ast_node/ast_desc_stmt.cpp new file mode 100644 index 000000000..b211df89f --- /dev/null +++ b/sql_parser/ast_node/ast_desc_stmt.cpp @@ -0,0 +1,80 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/sql_parser/ast_node/ast_desc_stmt.cpp + * + * Created on: Feb 26, 2016 + * Author: yuyang + * Email: youngfish93@hotmail.com + * + * Description: + * + */ +#include +#include +#include +#include +#include +#include "../ast_node/ast_desc_stmt.h" +#include "../../common/error_define.h" +#include "./ast_select_stmt.h" +#include "../../catalog/catalog.h" +#include "../../catalog/table.h" +#include "../../Environment.h" +#include "../ast_node/ast_node.h" +using std::cout; +using std::endl; +using std::cin; +using std::string; +using std::setw; +// namespace claims { +// namespace sql_parser { + +AstDescStmt::AstDescStmt(AstNodeType ast_node_type, string table_name) + : AstNode(ast_node_type), table_name_(table_name) {} + +AstDescStmt::~AstDescStmt() {} + +void AstDescStmt::Print(int level) const { + cout << setw(level * TAB_SIZE) << " " + << "|Desc Stmt|" << endl; + cout << setw((level + 1) * TAB_SIZE) << " " + << "table name: " << table_name_ << endl; +} +RetCode AstDescStmt::SemanticAnalisys(SemanticContext* sem_cntx) { + int ret = rSuccess; + cout << "SA table name :" << table_name_ << endl; + if ((table_name_.empty())) { + ret = claims::common::rTableNotExisted; + LOG(ERROR) << "No table name!" << std::endl; + sem_cntx->error_msg_ = "No table name!"; + return ret; + } + Catalog* local_catalog = Environment::getInstance()->getCatalog(); + TableDescriptor* table = local_catalog->getTable(table_name_); + if (table == NULL) { + ret = claims::common::rTableillegal; + LOG(ERROR) << "The table " + table_name_ + " does not exist!" << std::endl; + sem_cntx->error_msg_ = "The table " + table_name_ + " does not exist!"; + return ret; + } + return ret; +} + +// } /* namespace sql_parser */ +// } /* namespace claims */ diff --git a/sql_parser/ast_node/ast_desc_stmt.h b/sql_parser/ast_node/ast_desc_stmt.h new file mode 100644 index 000000000..6c8ce795a --- /dev/null +++ b/sql_parser/ast_node/ast_desc_stmt.h @@ -0,0 +1,75 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/sql_parser/ast_node/ast_desc_stmt.h + * + * Created on: Feb 26, 2016 + * Author: yuyang + * Email: youngfish93@hotmail.com + * + * Description: + * + */ + +#ifndef SQL_PARSER_AST_NODE_AST_DESC_STMT_H_ +#define SQL_PARSER_AST_NODE_AST_DESC_STMT_H_ + +#include +#include +#include "./ast_node.h" +using std::string; +using std::vector; + +// namespace claims { +// namespace sql_parser { + +/** + * @brief the AST of description clause. + * Desc mainly provides column name, column type, default value, + * key or not, can be null or not and extra information. + */ +class AstDescStmt : public AstNode { + public: + /** + * @brief AstDescTable Constructor. + */ + AstDescStmt(AstNodeType ast_node_type, string table_name); + /** + * @brief AstDescTable Destructor. + */ + ~AstDescStmt(); + RetCode SemanticAnalisys(SemanticContext *sem_cntx); + + void Print(int level = 0) const; + + public: + AstNodeType ast_node_type_; + string table_name_; + vector column_name_; + vector type_; + vector is_key_; + vector nullable_; + vector default_value_; + vector extra_; + vector size_; +}; + +// } // namespace sql_parser +// } // namespace claims + +#endif // SQL_PARSER_AST_NODE_AST_DESC_STMT_H_ diff --git a/sql_parser/ast_node/ast_expr_node.cpp b/sql_parser/ast_node/ast_expr_node.cpp index c0d01c4d1..6bad6aa99 100644 --- a/sql_parser/ast_node/ast_expr_node.cpp +++ b/sql_parser/ast_node/ast_expr_node.cpp @@ -104,7 +104,8 @@ void AstExprConst::ReplaceAggregation(AstNode*& agg_column, } RetCode AstExprConst::GetLogicalPlan(ExprNode*& logic_expr, - LogicalOperator* child_logic_plan) { + LogicalOperator* const left_lplan, + LogicalOperator* const right_lplan) { data_type actual_type = t_string; if (expr_type_ == "CONST_INT") { if (atol(data_.c_str()) > INT_MAX) { @@ -249,9 +250,11 @@ void AstExprUnary::GetRefTable(set& ref_table) { } RetCode AstExprUnary::GetLogicalPlan(ExprNode*& logic_expr, - LogicalOperator* child_logic_plan) { + LogicalOperator* const left_lplan, + LogicalOperator* const right_lplan) { data_type get_type, actual_type; OperType oper; + int oper_flag = 0; oper = OperType::oper_none; if (expr_type_ == "+") { } else if (expr_type_ == "-") { @@ -259,7 +262,11 @@ RetCode AstExprUnary::GetLogicalPlan(ExprNode*& logic_expr, } else if (expr_type_ == "!" || expr_type_ == "NOT") { oper = OperType::oper_not; } else if (expr_type_ == "IS_NULL") { + oper = OperType::oper_is_null; + oper_flag = 1; } else if (expr_type_ == "IS_NOT_NULL") { + oper = OperType::oper_is_not_null; + oper_flag = 1; } else if (expr_type_ == "IS_BOOL") { } else if (expr_type_ == "IS_NOT_BOOL") { } else if (expr_type_ == "EXSIST") { @@ -292,15 +299,21 @@ RetCode AstExprUnary::GetLogicalPlan(ExprNode*& logic_expr, child_logic_expr = new ExprConst(ExprNodeType::t_qexpr, t_u_long, "COUNT(1)", "1"); } else { - ret = arg0_->GetLogicalPlan(child_logic_expr, child_logic_plan); + ret = arg0_->GetLogicalPlan(child_logic_expr, left_lplan, right_lplan); } if (rSuccess != ret) { return ret; } assert(NULL != child_logic_expr); - logic_expr = - new ExprUnary(ExprNodeType::t_qexpr_unary, child_logic_expr->actual_type_, - expr_str_, oper, child_logic_expr); + if (oper_flag == 0) { + logic_expr = new ExprUnary(ExprNodeType::t_qexpr_unary, + child_logic_expr->actual_type_, expr_str_, oper, + child_logic_expr); + } else { + logic_expr = new ExprUnary(ExprNodeType::t_qexpr_unary, t_boolean, + child_logic_expr->actual_type_, expr_str_, oper, + child_logic_expr); + } return rSuccess; } RetCode AstExprUnary::SolveSelectAlias( @@ -448,15 +461,16 @@ void AstExprCalBinary::GetRefTable(set& ref_table) { } RetCode AstExprCalBinary::GetLogicalPlan(ExprNode*& logic_expr, - LogicalOperator* child_logic_plan) { + LogicalOperator* const left_lplan, + LogicalOperator* const right_lplan) { ExprNode* left_expr_node = NULL; ExprNode* right_expr_node = NULL; RetCode ret = rSuccess; - ret = arg0_->GetLogicalPlan(left_expr_node, child_logic_plan); + ret = arg0_->GetLogicalPlan(left_expr_node, left_lplan, right_lplan); if (rSuccess != ret) { return ret; } - ret = arg1_->GetLogicalPlan(right_expr_node, child_logic_plan); + ret = arg1_->GetLogicalPlan(right_expr_node, left_lplan, right_lplan); if (rSuccess != ret) { return ret; } @@ -594,6 +608,10 @@ void AstExprCmpBinary::Print(int level) const { } RetCode AstExprCmpBinary::SemanticAnalisys(SemanticContext* sem_cnxt) { RetCode ret = rSuccess; + if (expr_type_ == "EXPR_IN_SELECT") { + ret = claims::common::rNotSupport; + return ret; + } if (NULL != arg0_) { ret = arg0_->SemanticAnalisys(sem_cnxt); if (rSuccess != ret) { @@ -661,7 +679,8 @@ void AstExprCmpBinary::GetRefTable(set& ref_table) { } RetCode AstExprCmpBinary::GetLogicalPlan(ExprNode*& logic_expr, - LogicalOperator* child_logic_plan) { + LogicalOperator* const left_lplan, + LogicalOperator* const right_lplan) { ExprNode* left_expr_node = NULL; ExprNode* right_expr_node = NULL; data_type get_type = t_boolean; @@ -683,11 +702,11 @@ RetCode AstExprCmpBinary::GetLogicalPlan(ExprNode*& logic_expr, oper = OperType::oper_not_equal; } if (OperType::oper_none != oper) { - ret = arg0_->GetLogicalPlan(left_expr_node, child_logic_plan); + ret = arg0_->GetLogicalPlan(left_expr_node, left_lplan, right_lplan); if (rSuccess != ret) { return ret; } - ret = arg1_->GetLogicalPlan(right_expr_node, child_logic_plan); + ret = arg1_->GetLogicalPlan(right_expr_node, left_lplan, right_lplan); if (rSuccess != ret) { return ret; } @@ -703,7 +722,7 @@ RetCode AstExprCmpBinary::GetLogicalPlan(ExprNode*& logic_expr, vector left_node, tmp_node; vector > right_node; // just one expr at left - ret = arg0_->GetLogicalPlan(left_expr_node, child_logic_plan); + ret = arg0_->GetLogicalPlan(left_expr_node, left_lplan, right_lplan); if (rSuccess != ret) { return ret; } @@ -711,7 +730,8 @@ RetCode AstExprCmpBinary::GetLogicalPlan(ExprNode*& logic_expr, // one column per tuple for (AstNode* lnode = arg1_; lnode != NULL;) { AstExprList* list_node = reinterpret_cast(lnode); - ret = list_node->expr_->GetLogicalPlan(right_expr_node, child_logic_plan); + ret = list_node->expr_->GetLogicalPlan(right_expr_node, left_lplan, + right_lplan); if (rSuccess != ret) { return ret; } @@ -739,7 +759,8 @@ RetCode AstExprCmpBinary::GetLogicalPlan(ExprNode*& logic_expr, // collect several left expressions for (AstNode* lnode = arg0_; lnode != NULL;) { AstExprList* list_node = reinterpret_cast(lnode); - ret = list_node->expr_->GetLogicalPlan(left_expr_node, child_logic_plan); + ret = list_node->expr_->GetLogicalPlan(left_expr_node, left_lplan, + right_lplan); if (rSuccess != ret) { return ret; } @@ -751,8 +772,8 @@ RetCode AstExprCmpBinary::GetLogicalPlan(ExprNode*& logic_expr, AstExprList* list_node = reinterpret_cast(lnode); for (AstNode* llnode = list_node->expr_; llnode != NULL;) { AstExprList* llist_node = reinterpret_cast(llnode); - ret = llist_node->expr_->GetLogicalPlan(right_expr_node, - child_logic_plan); + ret = llist_node->expr_->GetLogicalPlan(right_expr_node, left_lplan, + right_lplan); if (rSuccess != ret) { return ret; } @@ -781,6 +802,7 @@ RetCode AstExprCmpBinary::GetLogicalPlan(ExprNode*& logic_expr, } return rSuccess; } + RetCode AstExprCmpBinary::SolveSelectAlias( SelectAliasSolver* const select_alias_solver) { if (NULL != arg0_) { @@ -1050,33 +1072,34 @@ void AstExprFunc::GetRefTable(set& ref_table) { } RetCode AstExprFunc::GetLogicalPlan(ExprNode*& logic_expr, - LogicalOperator* child_logic_plan) { + LogicalOperator* const left_lplan, + LogicalOperator* const right_lplan) { ExprNode* arg0_logic_expr = NULL; ExprNode* arg1_logic_expr = NULL; ExprNode* arg2_logic_expr = NULL; if (expr_type_ == "UPPER") { if (NULL != arg0_) { - arg0_->GetLogicalPlan(arg0_logic_expr, child_logic_plan); + arg0_->GetLogicalPlan(arg0_logic_expr, left_lplan, right_lplan); } if (NULL != arg1_) { - arg1_->GetLogicalPlan(arg1_logic_expr, child_logic_plan); + arg1_->GetLogicalPlan(arg1_logic_expr, left_lplan, right_lplan); } if (NULL != arg2_) { - arg2_->GetLogicalPlan(arg2_logic_expr, child_logic_plan); + arg2_->GetLogicalPlan(arg2_logic_expr, left_lplan, right_lplan); } logic_expr = new ExprUnary(ExprNodeType::t_qexpr_unary, t_string, expr_str_, OperType::oper_upper, arg0_logic_expr); } else if (expr_type_ == "SUBSTRING_EXPR_EXPR" || expr_type_ == "SUBSTRING_EXPR_FROM_EXPR") { if (NULL != arg0_) { - arg0_->GetLogicalPlan(arg0_logic_expr, child_logic_plan); + arg0_->GetLogicalPlan(arg0_logic_expr, left_lplan, right_lplan); } if (NULL != arg1_) { - arg1_->GetLogicalPlan(arg1_logic_expr, child_logic_plan); + arg1_->GetLogicalPlan(arg1_logic_expr, left_lplan, right_lplan); } if (NULL != arg2_) { - arg2_->GetLogicalPlan(arg2_logic_expr, child_logic_plan); + arg2_->GetLogicalPlan(arg2_logic_expr, left_lplan, right_lplan); } arg2_logic_expr = new ExprConst(ExprNodeType::t_qexpr, t_int, string("64"), string("64")); // 64 is the size of value @@ -1087,13 +1110,13 @@ RetCode AstExprFunc::GetLogicalPlan(ExprNode*& logic_expr, } else if (expr_type_ == "SUBSTRING_EXPR_EXPR_EXPR" || expr_type_ == "SUBSTRING_EXPR_FROM_EXPR_FOR_EXPR") { if (NULL != arg0_) { - arg0_->GetLogicalPlan(arg0_logic_expr, child_logic_plan); + arg0_->GetLogicalPlan(arg0_logic_expr, left_lplan, right_lplan); } if (NULL != arg1_) { - arg1_->GetLogicalPlan(arg1_logic_expr, child_logic_plan); + arg1_->GetLogicalPlan(arg1_logic_expr, left_lplan, right_lplan); } if (NULL != arg2_) { - arg2_->GetLogicalPlan(arg2_logic_expr, child_logic_plan); + arg2_->GetLogicalPlan(arg2_logic_expr, left_lplan, right_lplan); } logic_expr = new ExprTernary(ExprNodeType::t_qexpr_ternary, t_string, expr_str_, @@ -1101,39 +1124,39 @@ RetCode AstExprFunc::GetLogicalPlan(ExprNode*& logic_expr, arg1_logic_expr, arg2_logic_expr); } else if (expr_type_ == "TRIM_TRAILING") { if (NULL != arg0_) { - arg0_->GetLogicalPlan(arg0_logic_expr, child_logic_plan); + arg0_->GetLogicalPlan(arg0_logic_expr, left_lplan, right_lplan); } if (NULL != arg1_) { - arg1_->GetLogicalPlan(arg1_logic_expr, child_logic_plan); + arg1_->GetLogicalPlan(arg1_logic_expr, left_lplan, right_lplan); } if (NULL != arg2_) { - arg2_->GetLogicalPlan(arg2_logic_expr, child_logic_plan); + arg2_->GetLogicalPlan(arg2_logic_expr, left_lplan, right_lplan); } logic_expr = new ExprBinary(ExprNodeType::t_qexpr_cal, t_string, t_string, expr_str_, OperType::oper_trailing_trim, arg0_logic_expr, arg1_logic_expr); } else if (expr_type_ == "TRIM_LEADING") { if (NULL != arg0_) { - arg0_->GetLogicalPlan(arg0_logic_expr, child_logic_plan); + arg0_->GetLogicalPlan(arg0_logic_expr, left_lplan, right_lplan); } if (NULL != arg1_) { - arg1_->GetLogicalPlan(arg1_logic_expr, child_logic_plan); + arg1_->GetLogicalPlan(arg1_logic_expr, left_lplan, right_lplan); } if (NULL != arg2_) { - arg2_->GetLogicalPlan(arg2_logic_expr, child_logic_plan); + arg2_->GetLogicalPlan(arg2_logic_expr, left_lplan, right_lplan); } logic_expr = new ExprBinary(ExprNodeType::t_qexpr_cal, t_string, t_string, expr_str_, OperType::oper_leading_trim, arg0_logic_expr, arg1_logic_expr); } else if (expr_type_ == "TRIM_BOTH") { if (NULL != arg0_) { - arg0_->GetLogicalPlan(arg0_logic_expr, child_logic_plan); + arg0_->GetLogicalPlan(arg0_logic_expr, left_lplan, right_lplan); } if (NULL != arg1_) { - arg1_->GetLogicalPlan(arg1_logic_expr, child_logic_plan); + arg1_->GetLogicalPlan(arg1_logic_expr, left_lplan, right_lplan); } if (NULL != arg2_) { - arg2_->GetLogicalPlan(arg2_logic_expr, child_logic_plan); + arg2_->GetLogicalPlan(arg2_logic_expr, left_lplan, right_lplan); } if (NULL == arg0_) { arg0_logic_expr = @@ -1146,13 +1169,13 @@ RetCode AstExprFunc::GetLogicalPlan(ExprNode*& logic_expr, arg0_logic_expr, arg1_logic_expr); } else if (expr_type_ == "BETWEEN_AND") { if (NULL != arg0_) { - arg0_->GetLogicalPlan(arg0_logic_expr, child_logic_plan); + arg0_->GetLogicalPlan(arg0_logic_expr, left_lplan, right_lplan); } if (NULL != arg1_) { - arg1_->GetLogicalPlan(arg1_logic_expr, child_logic_plan); + arg1_->GetLogicalPlan(arg1_logic_expr, left_lplan, right_lplan); } if (NULL != arg2_) { - arg2_->GetLogicalPlan(arg2_logic_expr, child_logic_plan); + arg2_->GetLogicalPlan(arg2_logic_expr, left_lplan, right_lplan); } data_type get_type = TypeConversionMatrix::type_conversion_matrix [arg0_logic_expr->actual_type_][arg1_logic_expr->actual_type_]; @@ -1170,13 +1193,13 @@ RetCode AstExprFunc::GetLogicalPlan(ExprNode*& logic_expr, std::vector case_then; for (AstNode* it = arg1_; it != NULL;) { AstExprFunc* fnode = reinterpret_cast(it); - fnode->arg0_->GetLogicalPlan(arg0_logic_expr, child_logic_plan); - fnode->arg1_->GetLogicalPlan(arg1_logic_expr, child_logic_plan); + fnode->arg0_->GetLogicalPlan(arg0_logic_expr, left_lplan, right_lplan); + fnode->arg1_->GetLogicalPlan(arg1_logic_expr, left_lplan, right_lplan); case_when.push_back(arg0_logic_expr); case_then.push_back(arg1_logic_expr); it = fnode->arg2_; } - arg2_->GetLogicalPlan(arg2_logic_expr, child_logic_plan); + arg2_->GetLogicalPlan(arg2_logic_expr, left_lplan, right_lplan); case_then.push_back(arg2_logic_expr); logic_expr = new ExprCaseWhen(ExprNodeType::t_qexpr_case_when, case_then[0]->actual_type_, expr_str_, @@ -1186,8 +1209,8 @@ RetCode AstExprFunc::GetLogicalPlan(ExprNode*& logic_expr, std::vector case_then; for (AstNode* it = arg1_; it != NULL;) { AstExprFunc* fnode = reinterpret_cast(it); - fnode->arg0_->GetLogicalPlan(arg0_logic_expr, child_logic_plan); - fnode->arg1_->GetLogicalPlan(arg1_logic_expr, child_logic_plan); + fnode->arg0_->GetLogicalPlan(arg0_logic_expr, left_lplan, right_lplan); + fnode->arg1_->GetLogicalPlan(arg1_logic_expr, left_lplan, right_lplan); case_when.push_back(arg0_logic_expr); case_then.push_back(arg1_logic_expr); it = fnode->arg2_; @@ -1206,11 +1229,11 @@ RetCode AstExprFunc::GetLogicalPlan(ExprNode*& logic_expr, std::vector case_then; ExprNode* arg_logic_expr = NULL; assert(arg0_ != NULL); - arg0_->GetLogicalPlan(arg_logic_expr, child_logic_plan); + arg0_->GetLogicalPlan(arg_logic_expr, left_lplan, right_lplan); for (AstNode* it = arg1_; it != NULL;) { AstExprFunc* fnode = reinterpret_cast(it); - fnode->arg0_->GetLogicalPlan(arg0_logic_expr, child_logic_plan); - fnode->arg1_->GetLogicalPlan(arg1_logic_expr, child_logic_plan); + fnode->arg0_->GetLogicalPlan(arg0_logic_expr, left_lplan, right_lplan); + fnode->arg1_->GetLogicalPlan(arg1_logic_expr, left_lplan, right_lplan); data_type get_type = TypeConversionMatrix::type_conversion_matrix [arg_logic_expr->actual_type_][arg0_logic_expr->actual_type_]; ExprBinary* tmp_node = new ExprBinary( @@ -1233,11 +1256,11 @@ RetCode AstExprFunc::GetLogicalPlan(ExprNode*& logic_expr, std::vector case_then; ExprNode* arg_logic_expr = NULL; assert(arg0_ != NULL); - arg0_->GetLogicalPlan(arg_logic_expr, child_logic_plan); + arg0_->GetLogicalPlan(arg_logic_expr, left_lplan, right_lplan); for (AstNode* it = arg1_; it != NULL;) { AstExprFunc* fnode = reinterpret_cast(it); - fnode->arg0_->GetLogicalPlan(arg0_logic_expr, child_logic_plan); - fnode->arg1_->GetLogicalPlan(arg1_logic_expr, child_logic_plan); + fnode->arg0_->GetLogicalPlan(arg0_logic_expr, left_lplan, right_lplan); + fnode->arg1_->GetLogicalPlan(arg1_logic_expr, left_lplan, right_lplan); data_type get_type = TypeConversionMatrix::type_conversion_matrix [arg_logic_expr->actual_type_][arg0_logic_expr->actual_type_]; ExprBinary* tmp_node = new ExprBinary( @@ -1247,16 +1270,16 @@ RetCode AstExprFunc::GetLogicalPlan(ExprNode*& logic_expr, case_then.push_back(arg1_logic_expr); it = fnode->arg2_; } - arg2_->GetLogicalPlan(arg2_logic_expr, child_logic_plan); + arg2_->GetLogicalPlan(arg2_logic_expr, left_lplan, right_lplan); case_then.push_back(arg2_logic_expr); logic_expr = new ExprCaseWhen(ExprNodeType::t_qexpr_case_when, case_then[0]->actual_type_, expr_str_, case_when, case_then); } else if (expr_type_ == "DATE_ADD") { - arg0_->GetLogicalPlan(arg0_logic_expr, child_logic_plan); + arg0_->GetLogicalPlan(arg0_logic_expr, left_lplan, right_lplan); AstExprFunc* fnode = reinterpret_cast(arg1_); - fnode->arg0_->GetLogicalPlan(arg1_logic_expr, child_logic_plan); + fnode->arg0_->GetLogicalPlan(arg1_logic_expr, left_lplan, right_lplan); if (fnode->expr_type_ == "INTERVAL_DAY") { logic_expr = new ExprDate( ExprNodeType::t_qexpr_date_add_sub, t_date, t_date_day, expr_str_, @@ -1282,9 +1305,9 @@ RetCode AstExprFunc::GetLogicalPlan(ExprNode*& logic_expr, assert(false); } } else if (expr_type_ == "DATE_SUB") { - arg0_->GetLogicalPlan(arg0_logic_expr, child_logic_plan); + arg0_->GetLogicalPlan(arg0_logic_expr, left_lplan, right_lplan); AstExprFunc* fnode = reinterpret_cast(arg1_); - fnode->arg0_->GetLogicalPlan(arg1_logic_expr, child_logic_plan); + fnode->arg0_->GetLogicalPlan(arg1_logic_expr, left_lplan, right_lplan); if (fnode->expr_type_ == "INTERVAL_DAY") { logic_expr = new ExprDate( ExprNodeType::t_qexpr_date_add_sub, t_date, t_date_day, expr_str_, diff --git a/sql_parser/ast_node/ast_expr_node.h b/sql_parser/ast_node/ast_expr_node.h index 70b736090..80ff1af11 100644 --- a/sql_parser/ast_node/ast_expr_node.h +++ b/sql_parser/ast_node/ast_expr_node.h @@ -65,7 +65,8 @@ class AstExprConst : public AstNode { void ReplaceAggregation(AstNode*& agg_column, set& agg_node, bool need_collect); RetCode GetLogicalPlan(ExprNode*& logic_expr, - LogicalOperator* child_logic_plan); + LogicalOperator* const left_lplan, + LogicalOperator* const right_lplan); string expr_type_; string data_; @@ -88,7 +89,8 @@ class AstExprUnary : public AstNode { bool need_collect); void GetRefTable(set& ref_table); RetCode GetLogicalPlan(ExprNode*& logic_expr, - LogicalOperator* child_logic_plan); + LogicalOperator* const left_lplan, + LogicalOperator* const right_lplan); RetCode SolveSelectAlias(SelectAliasSolver* const select_alias_solver); AstNode* arg0_; @@ -111,7 +113,8 @@ class AstExprFunc : public AstNode { bool need_collect); void GetRefTable(set& ref_table); RetCode GetLogicalPlan(ExprNode*& logic_expr, - LogicalOperator* child_logic_plan); + LogicalOperator* const left_lplan, + LogicalOperator* const right_lplan); RetCode SolveSelectAlias(SelectAliasSolver* const select_alias_solver); AstNode* arg0_; @@ -137,7 +140,8 @@ class AstExprCalBinary : public AstNode { void GetSubExpr(vector& sub_expr, bool is_top_and); void GetRefTable(set& ref_table); RetCode GetLogicalPlan(ExprNode*& logic_expr, - LogicalOperator* child_logic_plan); + LogicalOperator* const left_lplan, + LogicalOperator* const right_lplan); RetCode SolveSelectAlias(SelectAliasSolver* const select_alias_solver); AstNode* arg0_; @@ -163,7 +167,8 @@ class AstExprCmpBinary : public AstNode { bool need_collect); void GetRefTable(set& ref_table); RetCode GetLogicalPlan(ExprNode*& logic_expr, - LogicalOperator* child_logic_plan); + LogicalOperator* const left_lplan, + LogicalOperator* const right_lplan); RetCode SolveSelectAlias(SelectAliasSolver* const select_alias_solver); AstNode* arg0_; diff --git a/sql_parser/ast_node/ast_node.cpp b/sql_parser/ast_node/ast_node.cpp index 30296f50f..e0f9153d1 100644 --- a/sql_parser/ast_node/ast_node.cpp +++ b/sql_parser/ast_node/ast_node.cpp @@ -123,13 +123,31 @@ RetCode AstNode::GetEqualJoinPair( } return rSuccess; } +RetCode AstNode::GetJoinCondition(vector& condition, + const vector& join_condition, + LogicalOperator* left_lplan, + LogicalOperator* right_lplan) { + RetCode ret = rSuccess; + ExprNode* expr_node = NULL; + for (auto it = join_condition.begin(); it != join_condition.end(); ++it) { + ret = (*it)->GetLogicalPlan(expr_node, left_lplan, right_lplan); + if (rSuccess != ret) { + LOG(ERROR) << "get join condition upon from list, due to [err: " << ret + << " ] !" << endl; + return ret; + } + assert(NULL != expr_node); + condition.push_back(expr_node); + } + return rSuccess; +} RetCode AstNode::GetFilterCondition(vector& condition, const vector& normal_condition, LogicalOperator* logic_plan) { RetCode ret = rSuccess; ExprNode* expr_node = NULL; for (auto it = normal_condition.begin(); it != normal_condition.end(); ++it) { - ret = (*it)->GetLogicalPlan(expr_node, logic_plan); + ret = (*it)->GetLogicalPlan(expr_node, logic_plan, NULL); if (rSuccess != ret) { LOG(ERROR) << "get normal condition upon from list, due to [err: " << ret << " ] !" << endl; @@ -185,7 +203,7 @@ RetCode AstStmtList::SemanticAnalisys(SemanticContext* sem_cnxt) { } return rSuccess; } -RetCode AstStmtList::PushDownCondition(PushDownConditionContext* pdccnxt) { +RetCode AstStmtList::PushDownCondition(PushDownConditionContext& pdccnxt) { if (NULL != stmt_) { stmt_->PushDownCondition(pdccnxt); } @@ -499,7 +517,10 @@ void SemanticContext::PrintContext(string flag) { cout << "---------------------\n" << endl; } -PushDownConditionContext::PushDownConditionContext() { from_tables_.clear(); } +PushDownConditionContext::PushDownConditionContext() { + from_tables_.clear(); + sub_expr_info_.clear(); +} bool PushDownConditionContext::IsTableSubSet(set& expr_tables, set& from_tables) { for (auto it = expr_tables.begin(); it != expr_tables.end(); ++it) { diff --git a/sql_parser/ast_node/ast_node.h b/sql_parser/ast_node/ast_node.h index 18e8f44d4..060a12567 100644 --- a/sql_parser/ast_node/ast_node.h +++ b/sql_parser/ast_node/ast_node.h @@ -49,7 +49,6 @@ using claims::common::rSuccess; // namespace claims { // namespace sql_parser { -typedef int RetCode; enum AstNodeType { AST_NODE, AST_STMT_LIST, @@ -120,7 +119,11 @@ enum AstNodeType { AST_STRINGVAL, AST_BOOL, AST_SHOW_STMT, - AST_DELETE_STMT + AST_DELETE_STMT, + AST_DESC_STMT, + AST_UPDATE_STMT, + AST_UPDATE_SET_LIST + }; // the order should be keep enum SubExprType { @@ -205,6 +208,10 @@ class PushDownConditionContext { is_set(false) {} }; PushDownConditionContext(); + ~PushDownConditionContext() { + from_tables_.clear(); + sub_expr_info_.clear(); + } void GetSubExprInfo(AstNode* expr); SubExprType GetSubExprType(AstNode* sub_expr, int ref_table_num); bool IsEqualJoinCondition(AstNode* sub_expr); @@ -213,6 +220,7 @@ class PushDownConditionContext { vector& normal_condi); std::vector sub_expr_info_; set from_tables_; + bool is_outer_{false}; }; class SelectAliasSolver { public: @@ -254,20 +262,26 @@ class AstNode { virtual void GetSubExpr(vector& sub_expr, bool is_top_and); virtual void GetRefTable(set& ref_table); - virtual RetCode PushDownCondition(PushDownConditionContext* pdccnxt) { + virtual RetCode PushDownCondition(PushDownConditionContext& pdccnxt) { return rSuccess; } virtual RetCode GetLogicalPlan(LogicalOperator*& logic_plan) { return rSuccess; } + virtual RetCode GetLogicalPlan(ExprNode*& logic_expr, - LogicalOperator* child_logic_plan) { + LogicalOperator* const left_lplan, + LogicalOperator* const right_lplan) { return rSuccess; } RetCode GetEqualJoinPair(vector& join_pair, LogicalOperator* args_lplan, LogicalOperator* next_lplan, const vector& equal_join_condition); + RetCode GetJoinCondition(vector& condition, + const vector& normal_condition, + LogicalOperator* left_lplan, + LogicalOperator* right_lplan); RetCode GetFilterCondition(vector& condition, const vector& normal_condition, LogicalOperator* logic_plan); @@ -284,6 +298,7 @@ struct ParseResult { AstNode* ast; const char* sql_clause; int error_number; + string error_info_; }; /** @@ -297,7 +312,7 @@ class AstStmtList : public AstNode { ~AstStmtList(); void Print(int level = 0) const; RetCode SemanticAnalisys(SemanticContext* sem_cnxt); - RetCode PushDownCondition(PushDownConditionContext* pdccnxt); + RetCode PushDownCondition(PushDownConditionContext& pdccnxt); RetCode GetLogicalPlan(LogicalOperator*& logic_plan); AstNode* stmt_; AstNode* next_; diff --git a/sql_parser/ast_node/ast_select_stmt.cpp b/sql_parser/ast_node/ast_select_stmt.cpp index 0f9140154..f1308a608 100644 --- a/sql_parser/ast_node/ast_select_stmt.cpp +++ b/sql_parser/ast_node/ast_select_stmt.cpp @@ -50,6 +50,7 @@ #include "../../logical_operator/logical_sort.h" #include "../../logical_operator/logical_subquery.h" #include "../../logical_operator/logical_delete_filter.h" +#include "../../logical_operator/logical_outer_join.h" #include "../ast_node/ast_expr_node.h" #include "../ast_node/ast_node.h" @@ -67,7 +68,7 @@ using claims::logical_operator::LogicalSort; using claims::logical_operator::LogicalLimit; using claims::logical_operator::LogicalSubquery; using claims::logical_operator::LogicalDeleteFilter; - +using claims::catalog::Attribute; using std::bitset; using std::endl; using std::cout; @@ -277,24 +278,23 @@ RetCode AstFromList::SemanticAnalisys(SemanticContext* sem_cnxt) { return rSuccess; } -RetCode AstFromList::PushDownCondition(PushDownConditionContext* pdccnxt) { - PushDownConditionContext* cur_pdccnxt = new PushDownConditionContext(); - cur_pdccnxt->sub_expr_info_ = pdccnxt->sub_expr_info_; +RetCode AstFromList::PushDownCondition(PushDownConditionContext& pdccnxt) { + PushDownConditionContext cur_pdccnxt; + cur_pdccnxt.sub_expr_info_ = pdccnxt.sub_expr_info_; if (NULL != args_) { - cur_pdccnxt->from_tables_.clear(); + cur_pdccnxt.from_tables_.clear(); args_->PushDownCondition(cur_pdccnxt); - pdccnxt->from_tables_.insert(cur_pdccnxt->from_tables_.begin(), - cur_pdccnxt->from_tables_.end()); + pdccnxt.from_tables_.insert(cur_pdccnxt.from_tables_.begin(), + cur_pdccnxt.from_tables_.end()); } if (NULL != next_) { - cur_pdccnxt->from_tables_.clear(); + cur_pdccnxt.from_tables_.clear(); next_->PushDownCondition(cur_pdccnxt); - pdccnxt->from_tables_.insert(cur_pdccnxt->from_tables_.begin(), - cur_pdccnxt->from_tables_.end()); + pdccnxt.from_tables_.insert(cur_pdccnxt.from_tables_.begin(), + cur_pdccnxt.from_tables_.end()); } - pdccnxt->SetCondition(equal_join_condition_, normal_condition_); - delete cur_pdccnxt; + pdccnxt.SetCondition(equal_join_condition_, normal_condition_); return rSuccess; } RetCode AstFromList::GetLogicalPlan(LogicalOperator*& logic_plan) { @@ -308,26 +308,57 @@ RetCode AstFromList::GetLogicalPlan(LogicalOperator*& logic_plan) { next_->GetLogicalPlan(next_lplan); } if (NULL != next_lplan) { - if (equal_join_condition_.size() > 0) { + if (!equal_join_condition_.empty()) { vector join_pair; join_pair.clear(); + // "args_lplan" and "next_lplan" order in GetEqualJoinPair' should be + // same as "arg_lplan" and "next_lplan" in "new LogicalJoin" ret = GetEqualJoinPair(join_pair, args_lplan, next_lplan, equal_join_condition_); if (rSuccess != ret) { return ret; } - logic_plan = new LogicalEqualJoin(join_pair, args_lplan, next_lplan); + vector condition; + condition.clear(); + ret = GetJoinCondition(condition, equal_join_condition_, next_lplan, + args_lplan); + if (rSuccess != ret) { + return ret; + } + ret = GetJoinCondition(condition, normal_condition_, next_lplan, + args_lplan); + if (rSuccess != ret) { + return ret; + } + // judge from join_type "left" "right" + string join_type = ""; + // this->args_ can be AST_TABLE, but treat as join. + if (AST_JOIN == (static_cast(this->args_))->ast_node_type_) { + join_type = (static_cast(this->args_))->join_type_; + } + if (-1 != join_type.find("left")) { + int join = 0; + logic_plan = new LogicalOuterJoin(join_pair, args_lplan, next_lplan, 0, + condition); + } else if (-1 != join_type.find("right")) { + logic_plan = new LogicalOuterJoin(join_pair, args_lplan, next_lplan, 1, + condition); + } else if (-1 != join_type.find("full")) { + logic_plan = new LogicalOuterJoin(join_pair, args_lplan, next_lplan, 2, + condition); + } else { + logic_plan = + new LogicalEqualJoin(join_pair, args_lplan, next_lplan, condition); + } } else { - logic_plan = new LogicalCrossJoin(args_lplan, next_lplan); - } - if (normal_condition_.size() > 0) { vector condition; condition.clear(); - ret = GetFilterCondition(condition, normal_condition_, logic_plan); + ret = GetJoinCondition(condition, normal_condition_, next_lplan, + args_lplan); if (rSuccess != ret) { return ret; } - logic_plan = new LogicalFilter(logic_plan, condition); + logic_plan = new LogicalCrossJoin(next_lplan, args_lplan, condition); } } else { logic_plan = args_lplan; @@ -377,6 +408,8 @@ RetCode AstTable::SemanticAnalisys(SemanticContext* sem_cnxt) { Environment::getInstance()->getCatalog()->getTable(table_name_); if (NULL == tbl) { LOG(ERROR) << "table: " << table_name_ << " dosen't exist!" << endl; + sem_cnxt->error_msg_ = + "table: '\e[1m" + table_name_ + "\e[0m' dosen't exist "; return rTableNotExisted; } if (table_alias_ == "NULL") { @@ -394,9 +427,9 @@ RetCode AstTable::SemanticAnalisys(SemanticContext* sem_cnxt) { << endl; return rSuccess; } -RetCode AstTable::PushDownCondition(PushDownConditionContext* pdccnxt) { - pdccnxt->from_tables_.insert(table_alias_); - pdccnxt->SetCondition(equal_join_condition_, normal_condition_); +RetCode AstTable::PushDownCondition(PushDownConditionContext& pdccnxt) { + pdccnxt.from_tables_.insert(table_alias_); + pdccnxt.SetCondition(equal_join_condition_, normal_condition_); return rSuccess; } // TODO(FZH) diver table_name_ to LogicalScan @@ -450,7 +483,7 @@ RetCode AstTable::GetLogicalPlan(LogicalOperator*& logic_plan) { ExprNode* qnode = NULL; for (auto it = normal_condition_.begin(); it != normal_condition_.end(); ++it) { - ret = (*it)->GetLogicalPlan(qnode, logic_plan); + ret = (*it)->GetLogicalPlan(qnode, logic_plan, NULL); if (rSuccess != ret) { LOG(ERROR) << "get normal condition upon a table, due to [err: " << ret << " ] !" << endl; @@ -534,14 +567,15 @@ RetCode AstSubquery::SemanticAnalisys(SemanticContext* sem_cnxt) { return sem_cnxt->AddTableColumn(column_to_table); } -RetCode AstSubquery::PushDownCondition(PushDownConditionContext* pdccnxt) { +RetCode AstSubquery::PushDownCondition(PushDownConditionContext& pdccnxt) { RetCode ret = rSuccess; - ret = subquery_->PushDownCondition(NULL); + PushDownConditionContext child_pdccnxt; + ret = subquery_->PushDownCondition(child_pdccnxt); if (rSuccess != ret) { return ret; } - pdccnxt->from_tables_.insert(subquery_alias_); - pdccnxt->SetCondition(equal_join_condition_, normal_condition_); + pdccnxt.from_tables_.insert(subquery_alias_); + pdccnxt.SetCondition(equal_join_condition_, normal_condition_); return rSuccess; } // may be deliver subquery output schema @@ -616,6 +650,8 @@ AstJoin::AstJoin(AstNodeType ast_node_type, int join_type, AstNode* left_table, } if (bit_num[3] == 1) { join_type_ = join_type_ + "left "; + left_table_ = right_table; + right_table_ = left_table; } if (bit_num[4] == 1) { join_type_ = join_type_ + "right "; @@ -623,6 +659,9 @@ AstJoin::AstJoin(AstNodeType ast_node_type, int join_type, AstNode* left_table, if (bit_num[5] == 1) { join_type_ = join_type_ + "natural "; } + if (bit_num[6] == 1) { + join_type_ = join_type_ + "full "; + } } join_type_ = join_type_ + "join"; } @@ -696,31 +735,57 @@ RetCode AstJoin::SemanticAnalisys(SemanticContext* sem_cnxt) { // join_sem_cnxt.~SemanticContext(); return ret; } -RetCode AstJoin::PushDownCondition(PushDownConditionContext* pdccnxt) { - PushDownConditionContext* cur_pdccnxt = new PushDownConditionContext(); - cur_pdccnxt->sub_expr_info_ = pdccnxt->sub_expr_info_; +RetCode AstJoin::PushDownCondition(PushDownConditionContext& pdccnxt) { + PushDownConditionContext cur_pdccnxt; + // cout << "join type = " << join_type_ << endl; + // pdccnxt.sub_expr_info -- conditions from where clause + if (-1 == join_type_.find("outer")) { + cur_pdccnxt.sub_expr_info_ = pdccnxt.sub_expr_info_; + } + + // join_condition_->condition -- conditions from on clause if (NULL != join_condition_) { - cur_pdccnxt->GetSubExprInfo( + cur_pdccnxt.GetSubExprInfo( reinterpret_cast(join_condition_)->condition_); } - cur_pdccnxt->from_tables_.clear(); - PushDownConditionContext* child_pdccnxt = new PushDownConditionContext(); - child_pdccnxt->sub_expr_info_ = cur_pdccnxt->sub_expr_info_; - child_pdccnxt->from_tables_.clear(); + cur_pdccnxt.from_tables_.clear(); + PushDownConditionContext child_pdccnxt; + child_pdccnxt.sub_expr_info_ = cur_pdccnxt.sub_expr_info_; + child_pdccnxt.from_tables_.clear(); + left_table_->PushDownCondition(child_pdccnxt); - cur_pdccnxt->from_tables_.insert(child_pdccnxt->from_tables_.begin(), - child_pdccnxt->from_tables_.end()); + cur_pdccnxt.from_tables_.insert(child_pdccnxt.from_tables_.begin(), + child_pdccnxt.from_tables_.end()); + + child_pdccnxt.from_tables_.clear(); + if (-1 == join_type_.find("outer")) { + right_table_->PushDownCondition(child_pdccnxt); + cur_pdccnxt.from_tables_.insert(child_pdccnxt.from_tables_.begin(), + child_pdccnxt.from_tables_.end()); + } else { + if (right_table_->ast_node_type_ == AST_TABLE) { + cur_pdccnxt.from_tables_.insert( + reinterpret_cast(right_table_)->table_alias_); + } + } + + // cout << "!!!" << cur_pdccnxt.sub_expr_info_.size() << endl; + cur_pdccnxt.SetCondition(equal_join_condition_, normal_condition_); + // cout << "equal :" << equal_join_condition_.size() << endl; + // cout << "normal :" << normal_condition_.size() << endl; - child_pdccnxt->from_tables_.clear(); - right_table_->PushDownCondition(child_pdccnxt); - cur_pdccnxt->from_tables_.insert(child_pdccnxt->from_tables_.begin(), - child_pdccnxt->from_tables_.end()); + pdccnxt.from_tables_.insert(cur_pdccnxt.from_tables_.begin(), + cur_pdccnxt.from_tables_.end()); + if (-1 != join_type_.find("outer")) { + // cout << "When pushdown, normal condi num is: " + // << pdccnxt.sub_expr_info_.size() << endl; - cur_pdccnxt->SetCondition(equal_join_condition_, normal_condition_); + for (int i = 0; i < pdccnxt.sub_expr_info_.size(); i++) { + normal_condition_.push_back(pdccnxt.sub_expr_info_[i]->sub_expr_); + } + } - pdccnxt->from_tables_.insert(cur_pdccnxt->from_tables_.begin(), - cur_pdccnxt->from_tables_.end()); return rSuccess; } RetCode AstJoin::GetLogicalPlan(LogicalOperator*& logic_plan) { @@ -735,7 +800,26 @@ RetCode AstJoin::GetLogicalPlan(LogicalOperator*& logic_plan) { if (rSuccess != ret) { return ret; } - if (equal_join_condition_.size() > 0) { + + // cout << "equal join condition num = " << equal_join_condition_.size() << + // endl; + // cout << "normal condition num = " << normal_condition_.size() << endl; + if (!equal_join_condition_.empty()) { + vector condition; + condition.clear(); + ret = GetJoinCondition(condition, equal_join_condition_, left_plan, + right_plan); + if (rSuccess != ret) { + return ret; + } + // As for outer join, normal condition can not be processed in hash join + if (-1 == join_type_.find("outer")) { + ret = + GetJoinCondition(condition, normal_condition_, left_plan, right_plan); + if (rSuccess != ret) { + return ret; + } + } vector join_pair; join_pair.clear(); ret = GetEqualJoinPair(join_pair, left_plan, right_plan, @@ -743,11 +827,44 @@ RetCode AstJoin::GetLogicalPlan(LogicalOperator*& logic_plan) { if (rSuccess != ret) { return ret; } - logic_plan = new LogicalEqualJoin(join_pair, left_plan, right_plan); + + // Outer join should generate a filter to deal with normal conditon. + if (-1 != join_type_.find("left")) { + logic_plan = + new LogicalOuterJoin(join_pair, left_plan, right_plan, 0, condition); + ret = GetFilterLogicalPlan(logic_plan); + } else if (-1 != join_type_.find("right")) { + logic_plan = + new LogicalOuterJoin(join_pair, left_plan, right_plan, 1, condition); + ret = GetFilterLogicalPlan(logic_plan); + } else if (-1 != join_type_.find("full")) { + logic_plan = + new LogicalOuterJoin(join_pair, left_plan, right_plan, 2, condition); + ret = GetFilterLogicalPlan(logic_plan); + } else { + logic_plan = + new LogicalEqualJoin(join_pair, left_plan, right_plan, condition); + } } else { - logic_plan = new LogicalCrossJoin(left_plan, right_plan); + if (!normal_condition_.empty()) { + vector condition; + condition.clear(); + ret = + GetJoinCondition(condition, normal_condition_, left_plan, right_plan); + if (rSuccess != ret) { + return ret; + } + logic_plan = new LogicalCrossJoin(left_plan, right_plan, condition); + } else { + logic_plan = new LogicalCrossJoin(left_plan, right_plan); + } } - if (normal_condition_.size() > 0) { + + return rSuccess; +} +RetCode AstJoin::GetFilterLogicalPlan(LogicalOperator*& logic_plan) { + RetCode ret = rSuccess; + if (!normal_condition_.empty()) { vector condition; condition.clear(); ret = GetFilterCondition(condition, normal_condition_, logic_plan); @@ -756,9 +873,8 @@ RetCode AstJoin::GetLogicalPlan(LogicalOperator*& logic_plan) { } logic_plan = new LogicalFilter(logic_plan, condition); } - return rSuccess; + return ret; } - AstWhereClause::AstWhereClause(AstNodeType ast_node_type, AstNode* expr) : AstNode(ast_node_type), expr_(expr) {} @@ -1030,7 +1146,7 @@ RetCode AstOrderByClause::GetLogicalPlan(LogicalOperator*& logic_plan) { int direction = 0; AstOrderByList* orderby = orderby_list_; while (NULL != orderby) { - orderby->expr_->GetLogicalPlan(tmp_expr, logic_plan); + orderby->expr_->GetLogicalPlan(tmp_expr, logic_plan, NULL); direction = orderby->orderby_direction_ == "ASC" ? 0 : 1; orderby_expr.push_back(make_pair(tmp_expr, direction)); orderby = orderby->next_; @@ -1083,7 +1199,7 @@ RetCode AstHavingClause::GetLogicalPlan(LogicalOperator*& logic_plan) { if (NULL != expr_) { vector having_expr; ExprNode* expr = NULL; - expr_->GetLogicalPlan(expr, logic_plan); + expr_->GetLogicalPlan(expr, logic_plan, NULL); having_expr.push_back(expr); logic_plan = new LogicalFilter(logic_plan, having_expr); } @@ -1232,6 +1348,8 @@ RetCode AstColumn::SemanticAnalisys(SemanticContext* sem_cnxt) { if (rSuccess != ret) { LOG(ERROR) << "There are errors in ( " << relation_name_ << " , " << column_name_ << " )" << endl; + sem_cnxt->error_msg_ = + "column: '\e[1m" + column_name_ + "\e[0m' is invalid"; return ret; } if (NULL != next_) { @@ -1272,14 +1390,42 @@ void AstColumn::GetRefTable(set& ref_table) { } RetCode AstColumn::GetLogicalPlan(ExprNode*& logic_expr, - LogicalOperator* child_logic_plan) { - logic_expr = new ExprColumn( - ExprNodeType::t_qcolcumns, - child_logic_plan->GetPlanContext() - .GetAttribute(relation_name_, relation_name_ + "." + column_name_) - .attrType->type, - expr_str_, relation_name_, column_name_); - return rSuccess; + LogicalOperator* const left_lplan, + LogicalOperator* const right_lplan) { + Attribute ret_lattr = left_lplan->GetPlanContext().GetAttribute( + string(relation_name_ + "." + column_name_)); + if (NULL != right_lplan) { + Attribute ret_rattr = right_lplan->GetPlanContext().GetAttribute( + string(relation_name_ + "." + column_name_)); + if ((ret_lattr.attrName != "NULL") && (ret_rattr.attrName != "NULL")) { + assert(false); + return rFailure; + } else if (ret_lattr.attrName != "NULL") { + logic_expr = + new ExprColumn(ExprNodeType::t_qcolcumns, ret_lattr.attrType->type, + expr_str_, relation_name_, column_name_); + return rSuccess; + } else if (ret_rattr.attrName != "NULL") { + logic_expr = + new ExprColumn(ExprNodeType::t_qcolcumns, ret_rattr.attrType->type, + expr_str_, relation_name_, column_name_); + return rSuccess; + } else { + assert(false); + return rFailure; + } + } else { + if (ret_lattr.attrName != "NULL") { + logic_expr = + new ExprColumn(ExprNodeType::t_qcolcumns, ret_lattr.attrType->type, + expr_str_, relation_name_, column_name_); + return rSuccess; + } else { + logic_expr = NULL; + assert(false); + return rFailure; + } + } } RetCode AstColumn::SolveSelectAlias( SelectAliasSolver* const select_alias_solver) { @@ -1507,14 +1653,11 @@ RetCode AstSelectStmt::SemanticAnalisys(SemanticContext* sem_cnxt) { return ret; } -RetCode AstSelectStmt::PushDownCondition(PushDownConditionContext* pdccnxt) { - if (NULL == pdccnxt) { - pdccnxt = new PushDownConditionContext(); - } +RetCode AstSelectStmt::PushDownCondition(PushDownConditionContext& pdccnxt) { if (NULL != where_clause_) { AstWhereClause* where_clause = reinterpret_cast(where_clause_); - pdccnxt->GetSubExprInfo(where_clause->expr_); + pdccnxt.GetSubExprInfo(where_clause->expr_); } from_list_->PushDownCondition(pdccnxt); @@ -1529,14 +1672,14 @@ RetCode AstSelectStmt::GetLogicalPlanOfAggeration( ExprNode* tmp_expr = NULL; RetCode ret = rSuccess; for (auto it = groupby_attrs_.begin(); it != groupby_attrs_.end(); ++it) { - ret = (*it)->GetLogicalPlan(tmp_expr, logic_plan); + ret = (*it)->GetLogicalPlan(tmp_expr, logic_plan, NULL); if (rSuccess != ret) { return ret; } group_by_attrs.push_back(tmp_expr); } for (auto it = agg_attrs_.begin(); it != agg_attrs_.end(); ++it) { - ret = (*it)->GetLogicalPlan(tmp_expr, logic_plan); + ret = (*it)->GetLogicalPlan(tmp_expr, logic_plan, NULL); if (rSuccess != ret) { return ret; } @@ -1587,7 +1730,7 @@ RetCode AstSelectStmt::GetLogicalPlanOfProject(LogicalOperator*& logic_plan) { } } for (int i = 0; i < ast_expr.size(); ++i) { - ret = ast_expr[i]->GetLogicalPlan(tmp_expr, logic_plan); + ret = ast_expr[i]->GetLogicalPlan(tmp_expr, logic_plan, NULL); if (rSuccess != ret) { return rSuccess; } diff --git a/sql_parser/ast_node/ast_select_stmt.h b/sql_parser/ast_node/ast_select_stmt.h index 944f9818b..cb3343f57 100644 --- a/sql_parser/ast_node/ast_select_stmt.h +++ b/sql_parser/ast_node/ast_select_stmt.h @@ -85,7 +85,7 @@ class AstFromList : public AstNode { ~AstFromList(); void Print(int level = 0) const; RetCode SemanticAnalisys(SemanticContext* sem_cnxt); - RetCode PushDownCondition(PushDownConditionContext* pdccnxt); + RetCode PushDownCondition(PushDownConditionContext& pdccnxt); RetCode GetLogicalPlan(LogicalOperator*& logic_plan); map table_joined_root; @@ -107,7 +107,7 @@ class AstTable : public AstNode { ~AstTable(); void Print(int level = 0) const; RetCode SemanticAnalisys(SemanticContext* sem_cnxt); - RetCode PushDownCondition(PushDownConditionContext* pdccnxt); + RetCode PushDownCondition(PushDownConditionContext& pdccnxt); RetCode GetLogicalPlan(LogicalOperator*& logic_plan); vector equal_join_condition_; @@ -130,7 +130,7 @@ class AstSubquery : public AstNode { ~AstSubquery(); void Print(int level = 0) const; RetCode SemanticAnalisys(SemanticContext* sem_cnxt); - RetCode PushDownCondition(PushDownConditionContext* pdccnxt); + RetCode PushDownCondition(PushDownConditionContext& pdccnxt); RetCode GetLogicalPlan(LogicalOperator*& logic_plan); string subquery_alias_; @@ -163,8 +163,9 @@ class AstJoin : public AstNode { ~AstJoin(); void Print(int level = 0) const; RetCode SemanticAnalisys(SemanticContext* sem_cnxt); - RetCode PushDownCondition(PushDownConditionContext* pdccnxt); + RetCode PushDownCondition(PushDownConditionContext& pdccnxt); RetCode GetLogicalPlan(LogicalOperator*& logic_plan); + RetCode GetFilterLogicalPlan(LogicalOperator*& logic_plan); string join_type_; AstNode* left_table_; @@ -307,8 +308,10 @@ class AstColumn : public AstNode { RetCode SemanticAnalisys(SemanticContext* sem_cnxt); void RecoverExprName(string& name); void GetRefTable(set& ref_table); + RetCode GetLogicalPlan(ExprNode*& logic_expr, - LogicalOperator* child_logic_plan); + LogicalOperator* const left_lplan, + LogicalOperator* const right_lplan); RetCode SolveSelectAlias(SelectAliasSolver* const select_alias_solver); AstNode* AstNodeCopy(); string relation_name_; @@ -338,7 +341,7 @@ class AstSelectStmt : public AstNode { ~AstSelectStmt(); void Print(int level = 0) const; RetCode SemanticAnalisys(SemanticContext* sem_cnxt); - RetCode PushDownCondition(PushDownConditionContext* pdccnxt); + RetCode PushDownCondition(PushDownConditionContext& pdccnxt); RetCode GetLogicalPlan(LogicalOperator*& logic_plan); RetCode GetLogicalPlanOfAggeration(LogicalOperator*& logic_plan); RetCode GetLogicalPlanOfProject(LogicalOperator*& logic_plan); diff --git a/sql_parser/ast_node/ast_update_stmt.cpp b/sql_parser/ast_node/ast_update_stmt.cpp new file mode 100644 index 000000000..fb3850e99 --- /dev/null +++ b/sql_parser/ast_node/ast_update_stmt.cpp @@ -0,0 +1,193 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/sql_parser/ast_node/ast_update_stmt.cpp + * + * Created on: Aug 16, 2016 + * Author: cswang + * Email: cs_wang@infosys.com + * + * Description: + * + */ + +#include "../ast_node/ast_update_stmt.h" + +#include +#include +#include +#include +#include + +#include "./ast_select_stmt.h" +#include "../ast_node/ast_expr_node.h" +#include "../../common/error_define.h" +#include "../../catalog/table.h" +#include "../../Environment.h" +using namespace claims::common; // NOLINT +using std::cout; +using std::endl; +using std::string; +using std::setw; +using std::bitset; + +// namespace claims { +// namespace ast_node { + +AstUpdateStmt::AstUpdateStmt(AstNodeType ast_node_type, + AstNode* update_set_list, AstNode* update_table, + AstNode* where_list) + : AstNode(ast_node_type), + update_set_list_(update_set_list), + update_table_(update_table), + where_list_(where_list) {} + +AstUpdateStmt::~AstUpdateStmt() { + if (NULL != update_set_list_) { + delete update_set_list_; + update_set_list_ = NULL; + } + + if (NULL != where_list_) { + delete where_list_; + where_list_ = NULL; + } +} + +void AstUpdateStmt::Print(int level) const { + cout << setw(level * TAB_SIZE) << " " + << "|Update Stmt|" << endl; + + AstUpdateSetList* update_set_list_temp = update_set_list_; + while (NULL != update_set_list_temp) { + update_set_list_temp->Print(level + 1); + update_set_list_temp = update_set_list_temp->next_; + } + + if (NULL != where_list_) { + where_list_->Print(level + 1); + } +} + +RetCode AstUpdateStmt::SemanticAnalisys(SemanticContext* sem_cnxt) { + RetCode ret = rSuccess; + string tablename = dynamic_cast(update_table_)->table_name_; + cout << "AstUpdateStmt:[" << tablename << "]" << endl; + TableDescriptor* new_table = + Environment::getInstance()->getCatalog()->getTable(tablename); + if (NULL == new_table) { + LOG(ERROR) << "The table " + tablename + " is not existed."; + sem_cnxt->error_msg_ = "The table " + tablename + " is not existed."; + ret = rTableNotExisted; + return ret; + } + string tabledel = tablename + "_DEL"; + new_table = Environment::getInstance()->getCatalog()->getTable(tabledel); + if (NULL == new_table) { + LOG(ERROR) << "The table DEL " + tabledel + + " is not existed during update data." << std::endl; + sem_cnxt->error_msg_ = + "The table DEL " + tabledel + " is not existed during update data."; + ret = rTableNotExisted; + return ret; + } + + AstUpdateSetList* update_set_list_temp = update_set_list_; + while (NULL != update_set_list_temp) { + ret = update_set_list_temp->SemanticAnalisys(sem_cnxt); + if (rSuccess != ret) { + string column_name_tmp = reinterpret_cast( + update_set_list_temp->args0_)->column_name_; + LOG(ERROR) << "The column " + column_name_tmp + + " is not existed during update data." << std::endl; + sem_cnxt->error_msg_ = "The column " + column_name_tmp + + " is not existed during update data."; + return ret; + } + update_set_list_temp = update_set_list_temp->next_; + } + + return ret; +} + +AstUpdateSetList::AstUpdateSetList(AstNodeType ast_node_type, AstNode* args0, + AstNode* args1, AstNode* next) + : AstNode(ast_node_type), args0_(args0), args1_(args1), next_(next) {} + +AstUpdateSetList::~AstUpdateSetList() { + if (NULL != args0_) { + delete args0_; + args0_ = NULL; + } + + if (NULL != args1_) { + delete args1_; + args1_ = NULL; + } + + if (NULL != next_) { + delete next_; + next_ = NULL; + } +} + +void AstUpdateSetList::Print(int level) const { + cout << setw(level * TAB_SIZE) << " " + << "|UpdateSet List| " << endl; + if (args0_ != NULL) args0_->Print(level + 1); + if (args1_ != NULL) args1_->Print(level + 1); +} + +RetCode AstUpdateSetList::SemanticAnalisys(SemanticContext* sem_cnxt) { + RetCode ret = rSuccess; + // cout << "args0_->ast_node_type():" << args0_->ast_node_type() << endl; + // cout << "args1_->ast_node_type():" << args1_->ast_node_type() << endl; + if (args0_->ast_node_type() == AST_COLUMN) { + AstColumn* column = reinterpret_cast(args0_); + cout << "AstUpdateSetList args0: [" << column->relation_name_ << "." + << column->column_name_ << "]" << endl; + + } else { + printf("AstUpdateSetList args0 error ast_node_type\n"); + ret = rFailure; + return ret; + } +#if 1 + if (args1_->ast_node_type() == AST_EXPR_CONST) { + AstExprConst* expr_const = reinterpret_cast(args1_); + cout << "AstUpdateSetList args1: [" << expr_const->expr_type_ << "." + << expr_const->data_ << "]" << endl; + } else { + printf("AstUpdateSetList args1 error ast_node_type\n"); + LOG(ERROR) << "Don't support update set values use column data." + << std::endl; + sem_cnxt->error_msg_ = "Don't support update set values use column data."; + ret = rFailure; + return ret; + } +#endif + return ret; +} + +RetCode AstUpdateSetList::PushDownCondition(PushDownConditionContext& pdccnxt) { +} + +RetCode AstUpdateSetList::GetLogicalPlan(LogicalOperator*& logic_plan) {} + +// } /* namespace ast_node */ +// } /* namespace claims */ diff --git a/sql_parser/ast_node/ast_update_stmt.h b/sql_parser/ast_node/ast_update_stmt.h new file mode 100644 index 000000000..e5912eec7 --- /dev/null +++ b/sql_parser/ast_node/ast_update_stmt.h @@ -0,0 +1,70 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/sql_parser/ast_node/ast_update_stmt.h + * + * Created on: Aug 16, 2016 + * Author: cswang + * Email: cs_wang@infosys.com + * + * Description: + * + */ + +#ifndef SQL_PARSER_AST_NODE_AST_UPDATE_STMT_H_ +#define SQL_PARSER_AST_NODE_AST_UPDATE_STMT_H_ + +#include "../ast_node/ast_node.h" + +// namespace claims { +// namespace ast_node { + +class AstUpdateStmt : public AstNode { + public: + AstUpdateStmt(AstNodeType ast_node_type, AstNode* update_set_list, + AstNode* update_table, AstNode* where_list); + virtual ~AstUpdateStmt(); + void Print(int level = 0) const; + RetCode SemanticAnalisys(SemanticContext* sem_cnxt); + + AstNodeType ast_node_type_; + AstNode* update_set_list_; + AstNode* update_table_; + AstNode* where_list_; +}; + +class AstUpdateSetList : public AstNode { + public: + AstUpdateSetList(AstNodeType ast_node_type, AstNode* args0, AstNode* args1, + AstNode* next); + ~AstUpdateSetList(); + void Print(int level = 0) const; + RetCode SemanticAnalisys(SemanticContext* sem_cnxt); + RetCode PushDownCondition(PushDownConditionContext& pdccnxt); + RetCode GetLogicalPlan(LogicalOperator*& logic_plan); + + AstNodeType ast_node_type_; + AstNode* args0_; + AstNode* args1_; + AstNode* next_; +}; + +// } /* namespace ast_node */ +// } /* namespace claims */ + +#endif // SQL_PARSER_AST_NODE_AST_UPDATE_STMT_H_ diff --git a/sql_parser/parser/Makefile.am b/sql_parser/parser/Makefile.am index d685f77c1..3cd0f0401 100755 --- a/sql_parser/parser/Makefile.am +++ b/sql_parser/parser/Makefile.am @@ -1,10 +1,9 @@ AM_CPPFLAGS= -fPIC -fpermissive\ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization \ --I${THERON_HOME}/Include \ -I${GTEST_HOME}/include AM_LDFLAGS=-lc -lm -lrt -lboost_serialization -lxs -lpthread -lglog @@ -18,11 +17,10 @@ LDADD = ../../sql_parser/ast_node/libast_node.a \ ../../common/log/liblog.a \ /usr/local/lib/libglog.a \ /usr/local/lib/libglog.so \ - ${HADOOP_HOME}/c++/Linux-amd64-64/lib/libhdfs.a\ + ${HADOOP_HOME}/lib/native/libhdfs.a\ ${JAVA_HOME}/jre/lib/amd64/server/libjvm.so\ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.so \ - ${THERON_HOME}/Lib/libtherond.a \ ${GTEST_HOME}/libgtest.a diff --git a/sql_parser/parser/lex.yy.cpp b/sql_parser/parser/lex.yy.cpp index c2299d421..0f2aa4c73 100644 --- a/sql_parser/parser/lex.yy.cpp +++ b/sql_parser/parser/lex.yy.cpp @@ -362,8 +362,8 @@ static void yy_fatal_error (yyconst char msg[] ,yyscan_t yyscanner ); *yy_cp = '\0'; \ yyg->yy_c_buf_p = yy_cp; -#define YY_NUM_RULES 292 -#define YY_END_OF_BUFFER 293 +#define YY_NUM_RULES 293 +#define YY_END_OF_BUFFER 294 /* This struct is not used in this scanner, but its presence is necessary. */ struct yy_trans_info @@ -373,141 +373,141 @@ struct yy_trans_info }; static yyconst flex_int16_t yy_accept[1227] = { 0, - 0, 0, 0, 0, 0, 0, 293, 291, 290, 290, - 248, 291, 285, 248, 248, 291, 248, 248, 248, 248, - 231, 231, 291, 251, 255, 252, 291, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 291, 248, 289, 289, 274, 253, 0, - 243, 241, 0, 285, 249, 0, 242, 240, 0, 0, - 231, 0, 233, 0, 287, 232, 0, 0, 0, 284, - 259, 256, 254, 257, 260, 0, 277, 0, 0, 274, - 274, 274, 274, 274, 9, 274, 274, 0, 274, 274, - - 274, 274, 274, 20, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 88, 274, 90, 99, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 134, 274, - 139, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 201, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 0, 274, 274, 274, - 0, 276, 0, 250, 288, 274, 0, 0, 241, 0, - - 0, 0, 240, 0, 286, 0, 236, 232, 0, 246, - 0, 234, 245, 258, 0, 281, 0, 283, 0, 282, - 1, 2, 274, 274, 6, 7, 274, 10, 274, 271, - 0, 274, 274, 274, 274, 274, 16, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 43, 50, 274, 274, 274, 274, 274, - 60, 274, 274, 274, 274, 274, 274, 67, 274, 274, - 274, 274, 274, 274, 274, 75, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 96, 274, 274, 103, 274, 274, 274, 274, 274, 274, - - 274, 274, 274, 274, 274, 274, 273, 274, 274, 274, - 272, 127, 274, 274, 130, 274, 274, 274, 274, 274, - 141, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 174, 274, 274, - 274, 274, 274, 274, 181, 188, 274, 274, 274, 270, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 211, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 0, 226, - 274, 274, 275, 5, 0, 241, 0, 240, 286, 0, - - 235, 278, 280, 279, 274, 274, 274, 274, 247, 274, - 274, 274, 274, 274, 17, 18, 274, 21, 274, 23, - 274, 274, 25, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 44, 274, - 274, 274, 274, 274, 55, 274, 274, 274, 62, 63, - 274, 64, 65, 68, 274, 274, 71, 274, 274, 274, - 274, 274, 274, 78, 274, 274, 274, 82, 274, 274, - 46, 274, 274, 274, 274, 274, 274, 199, 176, 96, - 14, 274, 98, 274, 101, 104, 105, 274, 274, 108, - 109, 274, 274, 112, 274, 115, 116, 119, 274, 274, - - 274, 274, 274, 274, 274, 274, 274, 274, 132, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 152, 154, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 175, 274, 177, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 195, 196, 197, 274, 274, 274, 274, 237, 204, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 222, 220, 274, 274, 224, 274, 244, - 227, 274, 0, 0, 0, 0, 3, 274, 274, 274, - - 274, 230, 274, 274, 274, 19, 274, 267, 274, 274, - 26, 274, 274, 274, 274, 274, 274, 274, 274, 269, - 274, 35, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 239, 73, 74, 76, 274, 274, 80, 81, 274, - 274, 274, 274, 102, 274, 92, 93, 274, 274, 274, - 274, 274, 274, 107, 110, 111, 274, 274, 274, 274, - 121, 274, 274, 274, 274, 274, 228, 274, 274, 274, - 274, 274, 274, 140, 142, 274, 274, 274, 274, 274, - 274, 149, 274, 150, 153, 274, 274, 274, 274, 274, - - 274, 274, 274, 274, 274, 165, 156, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 191, 274, 274, - 274, 274, 274, 274, 274, 262, 205, 274, 274, 274, - 274, 274, 274, 210, 212, 274, 274, 216, 274, 274, - 274, 221, 223, 225, 274, 274, 8, 274, 12, 274, - 15, 274, 24, 274, 274, 274, 28, 274, 274, 274, - 274, 274, 34, 274, 40, 274, 274, 274, 274, 274, - 274, 274, 274, 54, 274, 274, 274, 61, 274, 66, - 274, 70, 274, 74, 274, 274, 83, 274, 274, 274, - - 89, 91, 274, 95, 274, 274, 274, 274, 274, 274, - 274, 274, 274, 274, 274, 48, 274, 274, 274, 133, - 274, 274, 137, 274, 274, 274, 274, 274, 274, 274, - 274, 156, 274, 158, 159, 274, 274, 274, 163, 164, - 166, 167, 168, 49, 171, 274, 274, 274, 178, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 192, 274, 274, 274, 274, 274, 274, 274, 206, 274, - 207, 274, 209, 265, 274, 274, 216, 274, 274, 274, - 274, 4, 274, 13, 22, 274, 274, 27, 29, 274, - 274, 274, 33, 274, 274, 274, 274, 274, 50, 51, - - 52, 53, 274, 274, 274, 274, 69, 72, 77, 274, - 274, 274, 274, 274, 96, 274, 100, 106, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 274, 274, 129, - 274, 50, 274, 274, 143, 274, 274, 146, 274, 274, - 151, 274, 157, 160, 161, 274, 169, 274, 274, 274, - 274, 179, 274, 274, 274, 274, 274, 274, 274, 274, - 274, 261, 274, 274, 274, 274, 274, 274, 203, 238, - 274, 274, 274, 274, 218, 219, 274, 274, 274, 274, - 274, 274, 32, 268, 274, 41, 45, 274, 274, 56, - 274, 58, 274, 79, 274, 274, 274, 274, 274, 97, - - 274, 117, 118, 274, 274, 274, 274, 274, 274, 274, - 274, 128, 274, 136, 274, 274, 274, 274, 274, 274, - 162, 274, 274, 274, 180, 274, 183, 274, 274, 274, - 274, 189, 274, 274, 274, 274, 274, 200, 202, 208, - 213, 214, 274, 274, 229, 274, 25, 266, 30, 274, - 274, 274, 274, 42, 263, 264, 274, 274, 135, 274, - 274, 274, 274, 274, 113, 274, 274, 123, 274, 274, - 123, 274, 274, 274, 274, 274, 145, 147, 274, 274, - 274, 172, 173, 274, 274, 274, 274, 274, 274, 274, - 193, 274, 198, 274, 217, 274, 274, 31, 274, 274, - - 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, - 122, 124, 274, 274, 274, 274, 138, 274, 148, 155, - 274, 274, 184, 274, 274, 274, 274, 194, 274, 274, - 274, 274, 274, 274, 274, 59, 274, 274, 86, 87, - 94, 274, 274, 47, 274, 274, 274, 144, 274, 274, - 274, 274, 274, 274, 274, 274, 274, 36, 37, 39, - 274, 274, 274, 274, 120, 274, 274, 274, 274, 182, - 274, 274, 274, 274, 274, 218, 274, 274, 57, 84, - 274, 274, 274, 126, 274, 274, 274, 274, 274, 190, - 215, 11, 274, 274, 114, 274, 274, 274, 185, 274, - - 274, 274, 274, 274, 274, 274, 274, 274, 274, 85, - 274, 274, 274, 274, 187, 38, 274, 274, 274, 274, - 125, 131, 170, 274, 186, 0 + 0, 0, 0, 0, 0, 0, 294, 292, 291, 291, + 249, 292, 286, 249, 249, 292, 249, 249, 249, 249, + 232, 232, 292, 252, 256, 253, 292, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 292, 249, 290, 290, 275, 254, 0, + 244, 242, 0, 286, 250, 0, 243, 241, 0, 0, + 232, 0, 234, 0, 288, 233, 0, 0, 0, 285, + 260, 257, 255, 258, 261, 0, 278, 0, 0, 275, + 275, 275, 275, 275, 9, 275, 275, 0, 275, 275, + + 275, 275, 275, 20, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 89, 275, 91, 100, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 135, 275, + 140, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 202, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 0, 275, 275, 275, + 0, 277, 0, 251, 289, 275, 0, 0, 242, 0, + + 0, 0, 241, 0, 287, 0, 237, 233, 0, 247, + 0, 235, 246, 259, 0, 282, 0, 284, 0, 283, + 1, 2, 275, 275, 6, 7, 275, 10, 275, 272, + 0, 275, 275, 275, 275, 275, 16, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 43, 50, 275, 275, 275, 275, 275, + 60, 275, 275, 275, 275, 275, 275, 67, 275, 275, + 275, 275, 275, 275, 275, 75, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 97, 275, 275, 104, 275, 275, 275, 275, 275, 275, + + 275, 275, 275, 275, 275, 275, 274, 275, 275, 275, + 273, 128, 275, 275, 131, 275, 275, 275, 275, 275, + 142, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 175, 275, 275, + 275, 275, 275, 275, 182, 189, 275, 275, 275, 271, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 212, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 0, 227, + 275, 275, 276, 5, 0, 242, 0, 241, 287, 0, + + 236, 279, 281, 280, 275, 275, 275, 275, 248, 275, + 275, 275, 275, 275, 17, 18, 275, 21, 275, 23, + 275, 275, 25, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 44, 275, + 275, 275, 275, 275, 55, 275, 275, 275, 62, 63, + 275, 64, 65, 68, 275, 275, 71, 275, 275, 275, + 275, 275, 275, 78, 79, 275, 275, 83, 275, 275, + 46, 275, 275, 275, 275, 275, 275, 200, 177, 97, + 14, 275, 99, 275, 102, 105, 106, 275, 275, 109, + 110, 275, 275, 113, 275, 116, 117, 120, 275, 275, + + 275, 275, 275, 275, 275, 275, 275, 275, 133, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 153, 155, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 176, 275, 178, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 196, 197, 198, 275, 275, 275, 275, 238, 205, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 223, 221, 275, 275, 225, 275, 245, + 228, 275, 0, 0, 0, 0, 3, 275, 275, 275, + + 275, 231, 275, 275, 275, 19, 275, 268, 275, 275, + 26, 275, 275, 275, 275, 275, 275, 275, 275, 270, + 275, 35, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 240, 73, 74, 76, 275, 275, 81, 82, 275, + 275, 275, 275, 103, 275, 93, 94, 275, 275, 275, + 275, 275, 275, 108, 111, 112, 275, 275, 275, 275, + 122, 275, 275, 275, 275, 275, 229, 275, 275, 275, + 275, 275, 275, 141, 143, 275, 275, 275, 275, 275, + 275, 150, 275, 151, 154, 275, 275, 275, 275, 275, + + 275, 275, 275, 275, 275, 166, 157, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 192, 275, 275, + 275, 275, 275, 275, 275, 263, 206, 275, 275, 275, + 275, 275, 275, 211, 213, 275, 275, 217, 275, 275, + 275, 222, 224, 226, 275, 275, 8, 275, 12, 275, + 15, 275, 24, 275, 275, 275, 28, 275, 275, 275, + 275, 275, 34, 275, 40, 275, 275, 275, 275, 275, + 275, 275, 275, 54, 275, 275, 275, 61, 275, 66, + 275, 70, 275, 74, 275, 275, 84, 275, 275, 275, + + 90, 92, 275, 96, 275, 275, 275, 275, 275, 275, + 275, 275, 275, 275, 275, 48, 275, 275, 275, 134, + 275, 275, 138, 275, 275, 275, 275, 275, 275, 275, + 275, 157, 275, 159, 160, 275, 275, 275, 164, 165, + 167, 168, 169, 49, 172, 275, 275, 275, 179, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 193, 275, 275, 275, 275, 275, 275, 275, 207, 275, + 208, 275, 210, 266, 275, 275, 217, 275, 275, 275, + 275, 4, 275, 13, 22, 275, 275, 27, 29, 275, + 275, 275, 33, 275, 275, 275, 275, 275, 50, 51, + + 52, 53, 275, 275, 275, 275, 69, 72, 77, 275, + 275, 275, 275, 275, 97, 275, 101, 107, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 275, 275, 130, + 275, 50, 275, 275, 144, 275, 275, 147, 275, 275, + 152, 275, 158, 161, 162, 275, 170, 275, 275, 275, + 275, 180, 275, 275, 275, 275, 275, 275, 275, 275, + 275, 262, 275, 275, 275, 275, 275, 275, 204, 239, + 275, 275, 275, 275, 219, 220, 275, 275, 275, 275, + 275, 275, 32, 269, 275, 41, 45, 275, 275, 56, + 275, 58, 275, 80, 275, 275, 275, 275, 275, 98, + + 275, 118, 119, 275, 275, 275, 275, 275, 275, 275, + 275, 129, 275, 137, 275, 275, 275, 275, 275, 275, + 163, 275, 275, 275, 181, 275, 184, 275, 275, 275, + 275, 190, 275, 275, 275, 275, 275, 201, 203, 209, + 214, 215, 275, 275, 230, 275, 25, 267, 30, 275, + 275, 275, 275, 42, 264, 265, 275, 275, 136, 275, + 275, 275, 275, 275, 114, 275, 275, 124, 275, 275, + 124, 275, 275, 275, 275, 275, 146, 148, 275, 275, + 275, 173, 174, 275, 275, 275, 275, 275, 275, 275, + 194, 275, 199, 275, 218, 275, 275, 31, 275, 275, + + 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + 123, 125, 275, 275, 275, 275, 139, 275, 149, 156, + 275, 275, 185, 275, 275, 275, 275, 195, 275, 275, + 275, 275, 275, 275, 275, 59, 275, 275, 87, 88, + 95, 275, 275, 47, 275, 275, 275, 145, 275, 275, + 275, 275, 275, 275, 275, 275, 275, 36, 37, 39, + 275, 275, 275, 275, 121, 275, 275, 275, 275, 183, + 275, 275, 275, 275, 275, 219, 275, 275, 57, 85, + 275, 275, 275, 127, 275, 275, 275, 275, 275, 191, + 216, 11, 275, 275, 115, 275, 275, 275, 186, 275, + + 275, 275, 275, 275, 275, 275, 275, 275, 275, 86, + 275, 275, 275, 275, 188, 38, 275, 275, 275, 275, + 126, 132, 171, 275, 187, 0 } ; static yyconst flex_int32_t yy_ec[256] = @@ -1452,7 +1452,7 @@ static yyconst flex_int16_t yy_chk[2760] = } ; /* Table of booleans, true if rule could match eol. */ -static yyconst flex_int32_t yy_rule_can_match_eol[293] = +static yyconst flex_int32_t yy_rule_can_match_eol[294] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -1468,7 +1468,7 @@ static yyconst flex_int32_t yy_rule_can_match_eol[293] = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, }; + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, }; /* The intent behind this definition is that it'll catch * any uses of REJECT which flex missed. @@ -2234,771 +2234,774 @@ YY_RULE_SETUP case 79: YY_RULE_SETUP #line 113 "sql.l" -{ return FULLTEXT; } +{ return FULL; } YY_BREAK case 80: YY_RULE_SETUP #line 114 "sql.l" -{ return GRANT; } +{ return FULLTEXT; } YY_BREAK case 81: YY_RULE_SETUP #line 115 "sql.l" -{ return GROUP; } +{ return GRANT; } YY_BREAK case 82: YY_RULE_SETUP #line 116 "sql.l" -{ return HASH; } +{ return GROUP; } YY_BREAK case 83: YY_RULE_SETUP #line 117 "sql.l" -{ return HAVING; } +{ return HASH; } YY_BREAK case 84: YY_RULE_SETUP #line 118 "sql.l" -{ return HIGH_PRIORITY; } +{ return HAVING; } YY_BREAK case 85: YY_RULE_SETUP #line 119 "sql.l" -{ return HOUR_MICROSECOND; } +{ return HIGH_PRIORITY; } YY_BREAK case 86: YY_RULE_SETUP #line 120 "sql.l" -{ return HOUR_MINUTE; } +{ return HOUR_MICROSECOND; } YY_BREAK case 87: YY_RULE_SETUP #line 121 "sql.l" -{ return HOUR_SECOND; } +{ return HOUR_MINUTE; } YY_BREAK case 88: YY_RULE_SETUP #line 122 "sql.l" -{ return IF; } +{ return HOUR_SECOND; } YY_BREAK case 89: YY_RULE_SETUP #line 123 "sql.l" -{ return IGNORE; } +{ return IF; } YY_BREAK case 90: YY_RULE_SETUP #line 124 "sql.l" -{ return IN; } +{ return IGNORE; } YY_BREAK case 91: YY_RULE_SETUP #line 125 "sql.l" -{ return INFILE; } +{ return IN; } YY_BREAK case 92: YY_RULE_SETUP #line 126 "sql.l" -{ return INNER; } +{ return INFILE; } YY_BREAK case 93: YY_RULE_SETUP #line 127 "sql.l" -{ return INOUT; } +{ return INNER; } YY_BREAK case 94: YY_RULE_SETUP #line 128 "sql.l" -{ return INSENSITIVE; } +{ return INOUT; } YY_BREAK case 95: YY_RULE_SETUP #line 129 "sql.l" -{ return INSERT; } +{ return INSENSITIVE; } YY_BREAK case 96: YY_RULE_SETUP #line 130 "sql.l" -{ return INTEGER; } +{ return INSERT; } YY_BREAK case 97: YY_RULE_SETUP #line 131 "sql.l" -{ return INTERVAL; } +{ return INTEGER; } YY_BREAK case 98: YY_RULE_SETUP #line 132 "sql.l" -{ return INTO; } +{ return INTERVAL; } YY_BREAK case 99: YY_RULE_SETUP #line 133 "sql.l" -{ return IS; } +{ return INTO; } YY_BREAK case 100: YY_RULE_SETUP #line 134 "sql.l" -{ return ITERATE; } +{ return IS; } YY_BREAK case 101: YY_RULE_SETUP #line 135 "sql.l" -{ return JOIN; } +{ return ITERATE; } YY_BREAK case 102: YY_RULE_SETUP #line 136 "sql.l" -{ return INDEX; } +{ return JOIN; } YY_BREAK case 103: YY_RULE_SETUP #line 137 "sql.l" -{ return KEY; } +{ return INDEX; } YY_BREAK case 104: YY_RULE_SETUP #line 138 "sql.l" -{ return KEYS; } +{ return KEY; } YY_BREAK case 105: YY_RULE_SETUP #line 139 "sql.l" -{ return KILL; } +{ return KEYS; } YY_BREAK case 106: YY_RULE_SETUP #line 140 "sql.l" -{ return LEADING; } +{ return KILL; } YY_BREAK case 107: YY_RULE_SETUP #line 141 "sql.l" -{ return LEAVE; } +{ return LEADING; } YY_BREAK case 108: YY_RULE_SETUP #line 142 "sql.l" -{ return LEFT; } +{ return LEAVE; } YY_BREAK case 109: YY_RULE_SETUP #line 143 "sql.l" -{ return LIKE; } +{ return LEFT; } YY_BREAK case 110: YY_RULE_SETUP #line 144 "sql.l" -{ return LIMIT; } +{ return LIKE; } YY_BREAK case 111: YY_RULE_SETUP #line 145 "sql.l" -{ return LINES; } +{ return LIMIT; } YY_BREAK case 112: YY_RULE_SETUP #line 146 "sql.l" -{ return LOAD; } +{ return LINES; } YY_BREAK case 113: YY_RULE_SETUP #line 147 "sql.l" -{ return LOCALTIME; } +{ return LOAD; } YY_BREAK case 114: YY_RULE_SETUP #line 148 "sql.l" -{ return LOCALTIMESTAMP; } +{ return LOCALTIME; } YY_BREAK case 115: YY_RULE_SETUP #line 149 "sql.l" -{ return LOCK; } +{ return LOCALTIMESTAMP; } YY_BREAK case 116: YY_RULE_SETUP #line 150 "sql.l" -{ return LONG; } +{ return LOCK; } YY_BREAK case 117: YY_RULE_SETUP #line 151 "sql.l" -{ return LONGBLOB; } +{ return LONG; } YY_BREAK case 118: YY_RULE_SETUP #line 152 "sql.l" -{ return LONGTEXT; } +{ return LONGBLOB; } YY_BREAK case 119: YY_RULE_SETUP #line 153 "sql.l" -{ return LOOP; } +{ return LONGTEXT; } YY_BREAK case 120: YY_RULE_SETUP #line 154 "sql.l" -{ return LOW_PRIORITY; } +{ return LOOP; } YY_BREAK case 121: YY_RULE_SETUP #line 155 "sql.l" -{ return MATCH; } +{ return LOW_PRIORITY; } YY_BREAK case 122: YY_RULE_SETUP #line 156 "sql.l" -{ return MEDIUMBLOB; } +{ return MATCH; } YY_BREAK case 123: YY_RULE_SETUP #line 157 "sql.l" -{ return MEDIUMINT; } +{ return MEDIUMBLOB; } YY_BREAK case 124: YY_RULE_SETUP #line 158 "sql.l" -{ return MEDIUMTEXT; } +{ return MEDIUMINT; } YY_BREAK case 125: YY_RULE_SETUP #line 159 "sql.l" -{ return MINUTE_MICROSECOND; } +{ return MEDIUMTEXT; } YY_BREAK case 126: YY_RULE_SETUP #line 160 "sql.l" -{ return MINUTE_SECOND; } +{ return MINUTE_MICROSECOND; } YY_BREAK case 127: YY_RULE_SETUP #line 161 "sql.l" -{ return MOD; } +{ return MINUTE_SECOND; } YY_BREAK case 128: YY_RULE_SETUP #line 162 "sql.l" -{ return MODIFIES; } +{ return MOD; } YY_BREAK case 129: YY_RULE_SETUP #line 163 "sql.l" -{ return NATURAL; } +{ return MODIFIES; } YY_BREAK case 130: YY_RULE_SETUP #line 164 "sql.l" -{ return NOT; } +{ return NATURAL; } YY_BREAK case 131: YY_RULE_SETUP #line 165 "sql.l" -{ return NO_WRITE_TO_BINLOG; } +{ return NOT; } YY_BREAK case 132: YY_RULE_SETUP #line 166 "sql.l" -{ return NULLX; } +{ return NO_WRITE_TO_BINLOG; } YY_BREAK case 133: YY_RULE_SETUP #line 167 "sql.l" -{ return NUMBER; } +{ return NULLX; } YY_BREAK case 134: YY_RULE_SETUP #line 168 "sql.l" -{ return ON; } +{ return NUMBER; } YY_BREAK case 135: YY_RULE_SETUP #line 169 "sql.l" -{ return DUPLICATE; } +{ return ON; } YY_BREAK case 136: YY_RULE_SETUP #line 170 "sql.l" -{ return OPTIMIZE; } +{ return DUPLICATE; } YY_BREAK case 137: YY_RULE_SETUP #line 171 "sql.l" -{ return OPTION; } +{ return OPTIMIZE; } YY_BREAK case 138: YY_RULE_SETUP #line 172 "sql.l" -{ return OPTIONALLY; } +{ return OPTION; } YY_BREAK case 139: YY_RULE_SETUP #line 173 "sql.l" -{ return OR; } +{ return OPTIONALLY; } YY_BREAK case 140: YY_RULE_SETUP #line 174 "sql.l" -{ return ORDER; } +{ return OR; } YY_BREAK case 141: YY_RULE_SETUP #line 175 "sql.l" -{ return OUT; } +{ return ORDER; } YY_BREAK case 142: YY_RULE_SETUP #line 176 "sql.l" -{ return OUTER; } +{ return OUT; } YY_BREAK case 143: YY_RULE_SETUP #line 177 "sql.l" -{ return OUTFILE; } +{ return OUTER; } YY_BREAK case 144: YY_RULE_SETUP #line 178 "sql.l" -{ return PARTITIONED; } +{ return OUTFILE; } YY_BREAK case 145: YY_RULE_SETUP #line 179 "sql.l" -{ return PRECISION; } +{ return PARTITIONED; } YY_BREAK case 146: YY_RULE_SETUP #line 180 "sql.l" -{ return PRIMARY; } +{ return PRECISION; } YY_BREAK case 147: YY_RULE_SETUP #line 181 "sql.l" -{ return PROCEDURE; } +{ return PRIMARY; } YY_BREAK case 148: YY_RULE_SETUP #line 182 "sql.l" -{ return PROJECTION; } +{ return PROCEDURE; } YY_BREAK case 149: YY_RULE_SETUP #line 183 "sql.l" -{ return PURGE; } +{ return PROJECTION; } YY_BREAK case 150: YY_RULE_SETUP #line 184 "sql.l" -{ return QUICK; } +{ return PURGE; } YY_BREAK case 151: YY_RULE_SETUP #line 185 "sql.l" -{ return QUARTER;} +{ return QUICK; } YY_BREAK case 152: YY_RULE_SETUP #line 186 "sql.l" -{ return READ; } +{ return QUARTER;} YY_BREAK case 153: YY_RULE_SETUP #line 187 "sql.l" -{ return READS; } +{ return READ; } YY_BREAK case 154: YY_RULE_SETUP #line 188 "sql.l" -{ return REAL; } +{ return READS; } YY_BREAK case 155: YY_RULE_SETUP #line 189 "sql.l" -{ return REFERENCES; } +{ return REAL; } YY_BREAK case 156: YY_RULE_SETUP #line 190 "sql.l" -{ return REGEXP; } +{ return REFERENCES; } YY_BREAK case 157: YY_RULE_SETUP #line 191 "sql.l" -{ return RELEASE; } +{ return REGEXP; } YY_BREAK case 158: YY_RULE_SETUP #line 192 "sql.l" -{ return RENAME; } +{ return RELEASE; } YY_BREAK case 159: YY_RULE_SETUP #line 193 "sql.l" -{ return REPEAT; } +{ return RENAME; } YY_BREAK case 160: YY_RULE_SETUP #line 194 "sql.l" -{ return REPLACE; } +{ return REPEAT; } YY_BREAK case 161: YY_RULE_SETUP #line 195 "sql.l" -{ return REQUIRE; } +{ return REPLACE; } YY_BREAK case 162: YY_RULE_SETUP #line 196 "sql.l" -{ return RESTRICT; } +{ return REQUIRE; } YY_BREAK case 163: YY_RULE_SETUP #line 197 "sql.l" -{ return RETURN; } +{ return RESTRICT; } YY_BREAK case 164: YY_RULE_SETUP #line 198 "sql.l" -{ return REVOKE; } +{ return RETURN; } YY_BREAK case 165: YY_RULE_SETUP #line 199 "sql.l" -{ return RIGHT; } +{ return REVOKE; } YY_BREAK case 166: YY_RULE_SETUP #line 200 "sql.l" -{ return ROLLUP; } +{ return RIGHT; } YY_BREAK case 167: YY_RULE_SETUP #line 201 "sql.l" -{ return SAMPLE; } +{ return ROLLUP; } YY_BREAK case 168: YY_RULE_SETUP #line 202 "sql.l" -{ return SCHEMA; } +{ return SAMPLE; } YY_BREAK case 169: YY_RULE_SETUP #line 203 "sql.l" -{ return SCHEMAS; } +{ return SCHEMA; } YY_BREAK case 170: YY_RULE_SETUP #line 204 "sql.l" -{ return SECOND_MICROSECOND; } +{ return SCHEMAS; } YY_BREAK case 171: YY_RULE_SETUP #line 205 "sql.l" -{ return SELECT; } +{ return SECOND_MICROSECOND; } YY_BREAK case 172: YY_RULE_SETUP #line 206 "sql.l" -{ return SENSITIVE; } +{ return SELECT; } YY_BREAK case 173: YY_RULE_SETUP #line 207 "sql.l" -{ return SEPARATOR; } +{ return SENSITIVE; } YY_BREAK case 174: YY_RULE_SETUP #line 208 "sql.l" -{ return SET; } +{ return SEPARATOR; } YY_BREAK case 175: YY_RULE_SETUP #line 209 "sql.l" -{ return SHOW; } +{ return SET; } YY_BREAK case 176: YY_RULE_SETUP #line 210 "sql.l" -{ return SMALLINT; } +{ return SHOW; } YY_BREAK case 177: YY_RULE_SETUP #line 211 "sql.l" -{ return SOME; } +{ return SMALLINT; } YY_BREAK case 178: YY_RULE_SETUP #line 212 "sql.l" -{ return SONAME; } +{ return SOME; } YY_BREAK case 179: YY_RULE_SETUP #line 213 "sql.l" -{ return SPATIAL; } +{ return SONAME; } YY_BREAK case 180: YY_RULE_SETUP #line 214 "sql.l" -{ return SPECIFIC; } +{ return SPATIAL; } YY_BREAK case 181: YY_RULE_SETUP #line 215 "sql.l" -{ return SQL; } +{ return SPECIFIC; } YY_BREAK case 182: YY_RULE_SETUP #line 216 "sql.l" -{ return SQLEXCEPTION; } +{ return SQL; } YY_BREAK case 183: YY_RULE_SETUP #line 217 "sql.l" -{ return SQLSTATE; } +{ return SQLEXCEPTION; } YY_BREAK case 184: YY_RULE_SETUP #line 218 "sql.l" -{ return SQLWARNING; } +{ return SQLSTATE; } YY_BREAK case 185: YY_RULE_SETUP #line 219 "sql.l" -{ return SQL_BIG_RESULT; } +{ return SQLWARNING; } YY_BREAK case 186: YY_RULE_SETUP #line 220 "sql.l" -{ return SQL_CALC_FOUND_ROWS; } +{ return SQL_BIG_RESULT; } YY_BREAK case 187: YY_RULE_SETUP #line 221 "sql.l" -{ return SQL_SMALL_RESULT; } +{ return SQL_CALC_FOUND_ROWS; } YY_BREAK case 188: YY_RULE_SETUP #line 222 "sql.l" -{ return SSL; } +{ return SQL_SMALL_RESULT; } YY_BREAK case 189: YY_RULE_SETUP #line 223 "sql.l" -{ return STARTING; } +{ return SSL; } YY_BREAK case 190: YY_RULE_SETUP #line 224 "sql.l" -{ return STRAIGHT_JOIN; } +{ return STARTING; } YY_BREAK case 191: YY_RULE_SETUP #line 225 "sql.l" -{ return TABLE; } +{ return STRAIGHT_JOIN; } YY_BREAK case 192: YY_RULE_SETUP #line 226 "sql.l" -{ return TABLES; } +{ return TABLE; } YY_BREAK case 193: YY_RULE_SETUP #line 227 "sql.l" -{ return TEMPORARY; } +{ return TABLES; } YY_BREAK case 194: YY_RULE_SETUP #line 228 "sql.l" -{ return TERMINATED; } +{ return TEMPORARY; } YY_BREAK case 195: YY_RULE_SETUP #line 229 "sql.l" -{ return TEXT; } +{ return TERMINATED; } YY_BREAK case 196: YY_RULE_SETUP #line 230 "sql.l" -{ return THEN; } +{ return TEXT; } YY_BREAK case 197: YY_RULE_SETUP #line 231 "sql.l" -{ return TIME; } +{ return THEN; } YY_BREAK case 198: YY_RULE_SETUP #line 232 "sql.l" -{ return TIMESTAMP; } +{ return TIME; } YY_BREAK case 199: YY_RULE_SETUP #line 233 "sql.l" -{ return TINYINT; } +{ return TIMESTAMP; } YY_BREAK case 200: YY_RULE_SETUP #line 234 "sql.l" -{ return TINYTEXT; } +{ return TINYINT; } YY_BREAK case 201: YY_RULE_SETUP #line 235 "sql.l" -{ return TO; } +{ return TINYTEXT; } YY_BREAK case 202: YY_RULE_SETUP #line 236 "sql.l" -{ return TRAILING; } +{ return TO; } YY_BREAK case 203: YY_RULE_SETUP #line 237 "sql.l" -{ return TRIGGER; } +{ return TRAILING; } YY_BREAK case 204: YY_RULE_SETUP #line 238 "sql.l" -{ return UNDO; } +{ return TRIGGER; } YY_BREAK case 205: YY_RULE_SETUP #line 239 "sql.l" -{ return UNION; } +{ return UNDO; } YY_BREAK case 206: YY_RULE_SETUP #line 240 "sql.l" -{ return UNIQUE; } +{ return UNION; } YY_BREAK case 207: YY_RULE_SETUP #line 241 "sql.l" -{ return UNLOCK; } +{ return UNIQUE; } YY_BREAK case 208: YY_RULE_SETUP #line 242 "sql.l" -{ return UNSIGNED; } +{ return UNLOCK; } YY_BREAK case 209: YY_RULE_SETUP #line 243 "sql.l" -{ return UPDATE; } +{ return UNSIGNED; } YY_BREAK case 210: YY_RULE_SETUP #line 244 "sql.l" -{ return USAGE; } +{ return UPDATE; } YY_BREAK case 211: YY_RULE_SETUP #line 245 "sql.l" -{ return USE; } +{ return USAGE; } YY_BREAK case 212: YY_RULE_SETUP #line 246 "sql.l" -{ return USING; } +{ return USE; } YY_BREAK case 213: YY_RULE_SETUP #line 247 "sql.l" -{ return UTC_DATE; } +{ return USING; } YY_BREAK case 214: YY_RULE_SETUP #line 248 "sql.l" -{ return UTC_TIME; } +{ return UTC_DATE; } YY_BREAK case 215: YY_RULE_SETUP #line 249 "sql.l" -{ return UTC_TIMESTAMP; } +{ return UTC_TIME; } YY_BREAK case 216: YY_RULE_SETUP #line 250 "sql.l" -{ return VALUES; } +{ return UTC_TIMESTAMP; } YY_BREAK case 217: YY_RULE_SETUP #line 251 "sql.l" -{ return VARBINARY; } +{ return VALUES; } YY_BREAK case 218: YY_RULE_SETUP #line 252 "sql.l" -{ return VARCHAR; } +{ return VARBINARY; } YY_BREAK case 219: YY_RULE_SETUP #line 253 "sql.l" -{ return VARYING; } +{ return VARCHAR; } YY_BREAK case 220: YY_RULE_SETUP #line 254 "sql.l" -{ return WHEN; } +{ return VARYING; } YY_BREAK case 221: YY_RULE_SETUP #line 255 "sql.l" -{ return WHERE; } +{ return WHEN; } YY_BREAK case 222: YY_RULE_SETUP #line 256 "sql.l" -{ return WEEK;} +{ return WHERE; } YY_BREAK case 223: YY_RULE_SETUP #line 257 "sql.l" -{ return WHILE; } +{ return WEEK;} YY_BREAK case 224: YY_RULE_SETUP #line 258 "sql.l" -{ return WITH; } +{ return WHILE; } YY_BREAK case 225: YY_RULE_SETUP #line 259 "sql.l" -{ return WRITE; } +{ return WITH; } YY_BREAK case 226: YY_RULE_SETUP #line 260 "sql.l" -{ return XOR; } +{ return WRITE; } YY_BREAK case 227: YY_RULE_SETUP #line 261 "sql.l" -{ return YEAR; } +{ return XOR; } YY_BREAK case 228: YY_RULE_SETUP #line 262 "sql.l" -{ return YEAR_MONTH; } +{ return YEAR; } YY_BREAK case 229: YY_RULE_SETUP #line 263 "sql.l" -{ return ZEROFILL; } +{ return YEAR_MONTH; } YY_BREAK case 230: YY_RULE_SETUP #line 264 "sql.l" +{ return ZEROFILL; } + YY_BREAK +case 231: +YY_RULE_SETUP +#line 265 "sql.l" { return BEGINT; } YY_BREAK /* numbers */ -case 231: +case 232: YY_RULE_SETUP -#line 269 "sql.l" +#line 270 "sql.l" { yylval->strval = strdup(yytext); return INTNUM; } // 2014-4-14---save int value in string type YY_BREAK -case 232: -#line 273 "sql.l" case 233: #line 274 "sql.l" case 234: @@ -3006,32 +3009,34 @@ case 234: case 235: #line 276 "sql.l" case 236: -YY_RULE_SETUP #line 277 "sql.l" -{ yylval->strval = strdup(yytext); return APPROXNUM; } // 2014-4-14---save double value in string type - YY_BREAK -/* booleans */ // 2014-4-14---save bool value in string type case 237: YY_RULE_SETUP -#line 281 "sql.l" -{ yylval->strval = "1"; return BOOL; } +#line 278 "sql.l" +{ yylval->strval = strdup(yytext); return APPROXNUM; } // 2014-4-14---save double value in string type YY_BREAK +/* booleans */ // 2014-4-14---save bool value in string type case 238: YY_RULE_SETUP #line 282 "sql.l" -{ yylval->strval = "-1"; return BOOL; } // 2014-4-14---not supported now---Yu +{ yylval->strval = "1"; return BOOL; } YY_BREAK case 239: YY_RULE_SETUP #line 283 "sql.l" +{ yylval->strval = "-1"; return BOOL; } // 2014-4-14---not supported now---Yu + YY_BREAK +case 240: +YY_RULE_SETUP +#line 284 "sql.l" { yylval->strval = "0"; return BOOL; } YY_BREAK /* strings */ -case 240: -#line 289 "sql.l" case 241: +#line 290 "sql.l" +case 242: YY_RULE_SETUP -#line 289 "sql.l" +#line 290 "sql.l" { char *temp = strdup(yytext); yylval->strval = strdup(yytext); @@ -3041,164 +3046,164 @@ YY_RULE_SETUP return STRING; } YY_BREAK -case 242: +case 243: *yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ yyg->yy_c_buf_p = yy_cp -= 1; YY_DO_BEFORE_ACTION; /* set up yytext again */ YY_RULE_SETUP -#line 298 "sql.l" +#line 299 "sql.l" { yyerror(pp,"Unterminated string %s", yytext); } YY_BREAK -case 243: +case 244: *yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ yyg->yy_c_buf_p = yy_cp -= 1; YY_DO_BEFORE_ACTION; /* set up yytext again */ YY_RULE_SETUP -#line 299 "sql.l" +#line 300 "sql.l" { yyerror(pp,"Unterminated string %s", yytext); } YY_BREAK /* hex strings */ -case 244: -#line 303 "sql.l" case 245: +#line 304 "sql.l" +case 246: YY_RULE_SETUP -#line 303 "sql.l" +#line 304 "sql.l" { yylval->strval = strdup(yytext); return STRING; } YY_BREAK /* bit strings */ -case 246: -#line 308 "sql.l" case 247: +#line 309 "sql.l" +case 248: YY_RULE_SETUP -#line 308 "sql.l" +#line 309 "sql.l" { yylval->strval = strdup(yytext); return STRING; } YY_BREAK /* operators */ -case 248: -YY_RULE_SETUP -#line 312 "sql.l" -{ return yytext[0]; } - YY_BREAK case 249: YY_RULE_SETUP -#line 314 "sql.l" -{ return ANDOP; } +#line 313 "sql.l" +{ return yytext[0]; } YY_BREAK case 250: YY_RULE_SETUP #line 315 "sql.l" -{ return OR; } +{ return ANDOP; } YY_BREAK case 251: YY_RULE_SETUP -#line 317 "sql.l" -{ yylval->subtok = 1; return COMPARISON; } +#line 316 "sql.l" +{ return OR; } YY_BREAK case 252: YY_RULE_SETUP #line 318 "sql.l" -{ yylval->subtok = 2; return COMPARISON; } +{ yylval->subtok = 1; return COMPARISON; } YY_BREAK case 253: -#line 320 "sql.l" -case 254: YY_RULE_SETUP -#line 320 "sql.l" -{ yylval->subtok = 3; return COMPARISON; } +#line 319 "sql.l" +{ yylval->subtok = 2; return COMPARISON; } YY_BREAK +case 254: +#line 321 "sql.l" case 255: YY_RULE_SETUP #line 321 "sql.l" -{ yylval->subtok = 4; return COMPARISON; } +{ yylval->subtok = 3; return COMPARISON; } YY_BREAK case 256: YY_RULE_SETUP #line 322 "sql.l" -{ yylval->subtok = 5; return COMPARISON; } +{ yylval->subtok = 4; return COMPARISON; } YY_BREAK case 257: YY_RULE_SETUP #line 323 "sql.l" -{ yylval->subtok = 6; return COMPARISON; } +{ yylval->subtok = 5; return COMPARISON; } YY_BREAK case 258: YY_RULE_SETUP #line 324 "sql.l" -{ yylval->subtok = 12; return COMPARISON; } +{ yylval->subtok = 6; return COMPARISON; } YY_BREAK case 259: YY_RULE_SETUP -#line 326 "sql.l" -{ yylval->subtok = 1; return SHIFT; } +#line 325 "sql.l" +{ yylval->subtok = 12; return COMPARISON; } YY_BREAK case 260: YY_RULE_SETUP #line 327 "sql.l" +{ yylval->subtok = 1; return SHIFT; } + YY_BREAK +case 261: +YY_RULE_SETUP +#line 328 "sql.l" { yylval->subtok = 2; return SHIFT; } YY_BREAK /* functions */ -case 261: +case 262: *yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ yyg->yy_c_buf_p = yy_cp -= 1; YY_DO_BEFORE_ACTION; /* set up yytext again */ YY_RULE_SETUP -#line 331 "sql.l" +#line 332 "sql.l" { return FSUBSTRING; } YY_BREAK -case 262: +case 263: *yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ yyg->yy_c_buf_p = yy_cp = yy_bp + 4; YY_DO_BEFORE_ACTION; /* set up yytext again */ YY_RULE_SETUP -#line 332 "sql.l" +#line 333 "sql.l" { return FTRIM; } YY_BREAK -case 263: +case 264: *yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ yyg->yy_c_buf_p = yy_cp = yy_bp + 8; YY_DO_BEFORE_ACTION; /* set up yytext again */ YY_RULE_SETUP -#line 333 "sql.l" +#line 334 "sql.l" { return FDATE_ADD; } YY_BREAK -case 264: +case 265: *yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ yyg->yy_c_buf_p = yy_cp = yy_bp + 8; YY_DO_BEFORE_ACTION; /* set up yytext again */ YY_RULE_SETUP -#line 334 "sql.l" +#line 335 "sql.l" { return FDATE_SUB; } YY_BREAK -case 265: +case 266: *yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ yyg->yy_c_buf_p = yy_cp = yy_bp + 5; YY_DO_BEFORE_ACTION; /* set up yytext again */ YY_RULE_SETUP -#line 335 "sql.l" +#line 336 "sql.l" { return FUPPER; }//---3.25fzh--- YY_BREAK -case 266: +case 267: *yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ yyg->yy_c_buf_p = yy_cp = yy_bp + 8; YY_DO_BEFORE_ACTION; /* set up yytext again */ YY_RULE_SETUP -#line 336 "sql.l" +#line 337 "sql.l" { return FCOALESCE; } YY_BREAK -case 267: +case 268: *yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ yyg->yy_c_buf_p = yy_cp = yy_bp + 4; YY_DO_BEFORE_ACTION; /* set up yytext again */ YY_RULE_SETUP -#line 337 "sql.l" +#line 338 "sql.l" { return FCAST; } YY_BREAK -case 268: +case 269: *yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ yyg->yy_c_buf_p = yy_cp = yy_bp + 7; YY_DO_BEFORE_ACTION; /* set up yytext again */ YY_RULE_SETUP -#line 338 "sql.l" +#line 339 "sql.l" { return FCONVERT; } YY_BREAK /* @@ -3207,86 +3212,78 @@ YY_RULE_SETUP /* ---2.18add aggrection functions */ -case 269: +case 270: YY_RULE_SETUP -#line 345 "sql.l" +#line 346 "sql.l" { int c = yyinput(pp->yyscan_info_); unput(c); if(c == '(') return FCOUNT; yylval->strval = strdup(yytext); return NAME; } YY_BREAK -case 270: +case 271: YY_RULE_SETUP -#line 349 "sql.l" +#line 350 "sql.l" { int c = yyinput(pp->yyscan_info_); unput(c); if(c == '(') return FSUM; yylval->strval = strdup(yytext); return NAME; } YY_BREAK -case 271: +case 272: YY_RULE_SETUP -#line 353 "sql.l" +#line 354 "sql.l" { int c = yyinput(pp->yyscan_info_); unput(c); if(c == '(') return FAVG; yylval->strval = strdup(yytext); return NAME; } YY_BREAK -case 272: +case 273: YY_RULE_SETUP -#line 357 "sql.l" +#line 358 "sql.l" { int c = yyinput(pp->yyscan_info_); unput(c); if(c == '(') return FMIN; yylval->strval = strdup(yytext); return NAME; } YY_BREAK -case 273: +case 274: YY_RULE_SETUP -#line 361 "sql.l" +#line 362 "sql.l" { int c = yyinput(pp->yyscan_info_); unput(c); if(c == '(') return FMAX; yylval->strval = strdup(yytext); return NAME; } YY_BREAK -case 274: +case 275: YY_RULE_SETUP -#line 366 "sql.l" +#line 367 "sql.l" { yylval->strval = strdup(yytext); return NAME; } YY_BREAK -case 275: +case 276: YY_RULE_SETUP -#line 368 "sql.l" +#line 369 "sql.l" { yylval->strval = strdup(yytext+1); yylval->strval[yyleng-2] = 0; return NAME; } YY_BREAK -case 276: +case 277: *yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ yyg->yy_c_buf_p = yy_cp -= 1; YY_DO_BEFORE_ACTION; /* set up yytext again */ YY_RULE_SETUP -#line 372 "sql.l" +#line 373 "sql.l" { yyerror(pp,"unterminated quoted name %s", yytext); } YY_BREAK /* user variables */ -case 277: -#line 376 "sql.l" case 278: #line 377 "sql.l" case 279: #line 378 "sql.l" case 280: -YY_RULE_SETUP -#line 378 "sql.l" -{ yylval->strval = strdup(yytext+1); return USERVAR; } - YY_BREAK +#line 379 "sql.l" case 281: -*yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ -yyg->yy_c_buf_p = yy_cp -= 1; -YY_DO_BEFORE_ACTION; /* set up yytext again */ YY_RULE_SETUP -#line 380 "sql.l" -{ yyerror(pp,"unterminated quoted user variable %s", yytext); } +#line 379 "sql.l" +{ yylval->strval = strdup(yytext+1); return USERVAR; } YY_BREAK case 282: *yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ @@ -3305,16 +3302,19 @@ YY_RULE_SETUP { yyerror(pp,"unterminated quoted user variable %s", yytext); } YY_BREAK case 284: +*yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */ +yyg->yy_c_buf_p = yy_cp -= 1; +YY_DO_BEFORE_ACTION; /* set up yytext again */ YY_RULE_SETUP -#line 385 "sql.l" -{ return ASSIGN; } +#line 383 "sql.l" +{ yyerror(pp,"unterminated quoted user variable %s", yytext); } YY_BREAK -/* comments */ case 285: YY_RULE_SETUP -#line 388 "sql.l" -; +#line 386 "sql.l" +{ return ASSIGN; } YY_BREAK +/* comments */ case 286: YY_RULE_SETUP #line 389 "sql.l" @@ -3322,42 +3322,47 @@ YY_RULE_SETUP YY_BREAK case 287: YY_RULE_SETUP -#line 391 "sql.l" -{ oldstate = YY_START; BEGIN COMMENT; } +#line 390 "sql.l" +; YY_BREAK case 288: YY_RULE_SETUP #line 392 "sql.l" -{ BEGIN oldstate; } +{ oldstate = YY_START; BEGIN COMMENT; } YY_BREAK case 289: -/* rule 289 can match eol */ YY_RULE_SETUP #line 393 "sql.l" +{ BEGIN oldstate; } + YY_BREAK +case 290: +/* rule 290 can match eol */ +YY_RULE_SETUP +#line 394 "sql.l" ; YY_BREAK case YY_STATE_EOF(COMMENT): -#line 394 "sql.l" +#line 395 "sql.l" { yyerror(pp,"unclosed comment"); } YY_BREAK /* everything else */ -case 290: -/* rule 290 can match eol */ -YY_RULE_SETUP -#line 397 "sql.l" -/* white space */ - YY_BREAK case 291: +/* rule 291 can match eol */ YY_RULE_SETUP #line 398 "sql.l" -{ yyerror(pp,"mystery character '%c'", *yytext); } +/* white space */ {yylineno++;} YY_BREAK case 292: YY_RULE_SETUP -#line 400 "sql.l" +#line 399 "sql.l" +{ yyerror(pp,"mystery character '%c'", *yytext); } + YY_BREAK +case 293: +YY_RULE_SETUP +#line 401 "sql.l" YY_FATAL_ERROR( "flex scanner jammed" ); YY_BREAK -#line 3361 "lex.yy.c" +#line 3366 "lex.yy.c" case YY_STATE_EOF(INITIAL): case YY_STATE_EOF(BTWMODE): yyterminate(); @@ -4533,7 +4538,7 @@ void yyfree (void * ptr , yyscan_t yyscanner) #define YYTABLES_NAME "yytables" -#line 400 "sql.l" +#line 401 "sql.l" void GetCorrectString(char *dest, const char *src) diff --git a/sql_parser/parser/parser.cpp b/sql_parser/parser/parser.cpp index 1835286e7..ebcddee1e 100755 --- a/sql_parser/parser/parser.cpp +++ b/sql_parser/parser/parser.cpp @@ -12,6 +12,7 @@ #include "./sql.lex.h" #include "./parser.h" +#include "../../common/memory_handle.h" #include "../ast_node/ast_node.h" extern int yyparse(struct ParseResult* result); @@ -29,21 +30,23 @@ Parser::Parser() { } } sql_stmt_ = string(InputStr); - CreateRawAST(sql_stmt_); + string result = ""; + CreateRawAST(sql_stmt_, result); } -Parser::Parser(string sql_stmt) : sql_stmt_(sql_stmt) { - CreateRawAST(sql_stmt_); +Parser::Parser(string sql_stmt, string& result_info) : sql_stmt_(sql_stmt) { + CreateRawAST(sql_stmt_, result_info); // CreateAst(); } -Parser::~Parser() { delete ast_root_; } +Parser::~Parser() { DELETE_PTR(ast_root_); } AstNode* Parser::GetRawAST() { return ast_root_; } -AstNode* Parser::CreateRawAST(string SQL_statement) { - struct ParseResult presult = {NULL, NULL, SQL_statement.c_str(), 0}; +AstNode* Parser::CreateRawAST(string SQL_statement, string& result_info) { + struct ParseResult presult = {NULL, NULL, SQL_statement.c_str(), 0, ""}; if (yylex_init_extra(&presult, &presult.yyscan_info_)) { + // if (yylex_init(&presult.yyscan_info_)) { perror("init alloc failed"); ast_root_ = NULL; } else { @@ -56,18 +59,17 @@ AstNode* Parser::CreateRawAST(string SQL_statement) { ast_root_ = presult.ast; } else { printf("SQL parse failed, ast is null!\n"); + result_info = presult.error_info_; ast_root_ = NULL; } } else { printf("SQL parse failed\n"); ast_root_ = NULL; } + yy_flush_buffer(bp, presult.yyscan_info_); yy_delete_buffer(bp, presult.yyscan_info_); + // yylex_destroy(presult.yyscan_info_); } return ast_root_; } -/* - * @brief TODO(yuyang):for dml/query, add semantic analysis and recover - * expression string - */ // AstNode* Parser::CreateAst() { AstNode* entire_AST = AST_root_; } diff --git a/sql_parser/parser/parser.h b/sql_parser/parser/parser.h index 8904ce284..3c8704515 100755 --- a/sql_parser/parser/parser.h +++ b/sql_parser/parser/parser.h @@ -18,13 +18,13 @@ using std::vector; class Parser { public: Parser(); - explicit Parser(string SQL_statement); + explicit Parser(string SQL_statement, string& result_info); virtual ~Parser(); AstNode* GetRawAST(); string get_sql_stmt() { return sql_stmt_; } private: - AstNode* CreateRawAST(string SQL_statement); + AstNode* CreateRawAST(string SQL_statement, string& result_info); // AstNode* CreateAst(string SQL_statement); void SemanticAnalysis(); void RecoveryExpr(); diff --git a/sql_parser/parser/sql.l b/sql_parser/parser/sql.l index 3355c195c..de666ee97 100644 --- a/sql_parser/parser/sql.l +++ b/sql_parser/parser/sql.l @@ -110,6 +110,7 @@ FOR { return FOR; } FORCE { return FORCE; } FOREIGN { return FOREIGN; } FROM { return FROM; } +FULL { return FULL; } FULLTEXT { return FULLTEXT; } GRANT { return GRANT; } GROUP { return GROUP; } @@ -394,7 +395,7 @@ MAX { int c = yyinput(pp->yyscan_info_); unput(c); <> { yyerror(pp,"unclosed comment"); } /* everything else */ -[ \t\n] /* white space */ +[ \t\n] /* white space */ {yylineno++;} . { yyerror(pp,"mystery character '%c'", *yytext); } %% diff --git a/sql_parser/parser/sql.lex.h b/sql_parser/parser/sql.lex.h index 3d6e99daf..2d64455a9 100644 --- a/sql_parser/parser/sql.lex.h +++ b/sql_parser/parser/sql.lex.h @@ -338,7 +338,7 @@ extern int yylex \ #undef YY_DECL #endif -#line 400 "sql.l" +#line 401 "sql.l" #line 345 "sql.lex.h" diff --git a/sql_parser/parser/sql.tab.cpp b/sql_parser/parser/sql.tab.cpp index 8cb9ac1bf..d9805519a 100644 --- a/sql_parser/parser/sql.tab.cpp +++ b/sql_parser/parser/sql.tab.cpp @@ -1,19 +1,19 @@ /* A Bison parser, made by GNU Bison 2.7. */ /* Skeleton implementation for Bison GLR parsers in C - + Copyright (C) 2002-2012 Free Software Foundation, Inc. - + This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. - + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License along with this program. If not, see . */ @@ -26,7 +26,7 @@ special exception, which will cause the skeleton and the resulting Bison output files to be licensed under the GNU General Public License without this special exception. - + This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ @@ -44,11 +44,6 @@ /* Pure parsers. */ #define YYPURE 1 - - - - - /* Copy the first part of user declarations. */ /* Line 207 of glr.c */ #line 10 "sql.ypp" @@ -60,6 +55,9 @@ #include #include #include +#include +#include +#include #include using namespace std; #include "../ast_node/ast_node.h" @@ -71,29 +69,31 @@ using namespace std; #include "../ast_node/ast_load_stmt.h" #include "../ast_node/ast_show_stmt.h" #include "../ast_node/ast_delete_stmt.h" +#include "../ast_node/ast_desc_stmt.h" +#include "../ast_node/ast_update_stmt.h" -void yyerror(struct ParseResult *pp,const char *s, ...); +void yyerror(struct ParseResult *pp, const char *s, ...); void emit(char *s, ...); /* Line 207 of glr.c */ -#line 80 "sql.tab.cpp" +#line 85 "sql.tab.cpp" -# ifndef YY_NULL -# if defined __cplusplus && 201103L <= __cplusplus -# define YY_NULL nullptr -# else -# define YY_NULL 0 -# endif -# endif +#ifndef YY_NULL +#if defined __cplusplus && 201103L <= __cplusplus +#define YY_NULL nullptr +#else +#define YY_NULL 0 +#endif +#endif #include "sql.tab.hpp" /* Enabling verbose error messages. */ #ifdef YYERROR_VERBOSE -# undef YYERROR_VERBOSE -# define YYERROR_VERBOSE 1 +#undef YYERROR_VERBOSE +#define YYERROR_VERBOSE 1 #else -# define YYERROR_VERBOSE 0 +#define YYERROR_VERBOSE 0 #endif /* Default (constant) value used for initialization for null @@ -104,14 +104,15 @@ static YYSTYPE yyval_default; /* Copy the second part of user declarations. */ /* Line 230 of glr.c */ -#line 41 "sql.ypp" +#line 46 "sql.ypp" #include "sql.lex.h" #define YYLEX_PARAM result->yyscan_info_ /* -should replace YYLEX with the following clause in sql.tab.cpp, why so? I don't know +should replace YYLEX with the following clause in sql.tab.cpp, why so? I don't +know #ifdef YYLEX_PARAM # define YYLEX yylex (&yylval, YYLEX_PARAM) @@ -120,45 +121,41 @@ should replace YYLEX with the following clause in sql.tab.cpp, why so? I don't k #endif */ - /* Line 230 of glr.c */ -#line 126 "sql.tab.cpp" +#line 131 "sql.tab.cpp" #include #include #include #ifndef YY_ -# if defined YYENABLE_NLS && YYENABLE_NLS -# if ENABLE_NLS -# include /* INFRINGES ON USER NAME SPACE */ -# define YY_(Msgid) dgettext ("bison-runtime", Msgid) -# endif -# endif -# ifndef YY_ -# define YY_(Msgid) Msgid -# endif +#if defined YYENABLE_NLS && YYENABLE_NLS +#if ENABLE_NLS +#include /* INFRINGES ON USER NAME SPACE */ +#define YY_(Msgid) dgettext("bison-runtime", Msgid) +#endif +#endif +#ifndef YY_ +#define YY_(Msgid) Msgid +#endif #endif /* Suppress unused-variable warnings by "using" E. */ -#if ! defined lint || defined __GNUC__ -# define YYUSE(E) ((void) (E)) +#if !defined lint || defined __GNUC__ +#define YYUSE(E) ((void)(E)) #else -# define YYUSE(E) /* empty */ +#define YYUSE(E) /* empty */ #endif /* Identity function, used to suppress warnings about constant conditions. */ #ifndef lint -# define YYID(N) (N) +#define YYID(N) (N) #else -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -static int -YYID (int i) +#if (defined __STDC__ || defined __C99__FUNC__ || defined __cplusplus || \ + defined _MSC_VER) +static int YYID(int i) #else -static int -YYID (i) - int i; +static int YYID(i) int i; #endif { return i; @@ -166,31 +163,31 @@ YYID (i) #endif #ifndef YYFREE -# define YYFREE free +#define YYFREE free #endif #ifndef YYMALLOC -# define YYMALLOC malloc +#define YYMALLOC malloc #endif #ifndef YYREALLOC -# define YYREALLOC realloc +#define YYREALLOC realloc #endif -#define YYSIZEMAX ((size_t) -1) +#define YYSIZEMAX ((size_t)-1) #ifdef __cplusplus - typedef bool yybool; +typedef bool yybool; #else - typedef unsigned char yybool; +typedef unsigned char yybool; #endif #define yytrue 1 #define yyfalse 0 #ifndef YYSETJMP -# include -# define YYJMP_BUF jmp_buf -# define YYSETJMP(Env) setjmp (Env) +#include +#define YYJMP_BUF jmp_buf +#define YYSETJMP(Env) setjmp(Env) /* Pacify clang. */ -# define YYLONGJMP(Env, Val) (longjmp (Env, Val), YYASSERT (0)) +#define YYLONGJMP(Env, Val) (longjmp(Env, Val), YYASSERT(0)) #endif /*-----------------. @@ -199,29 +196,28 @@ YYID (i) #ifndef __attribute__ /* This feature is available in gcc versions 2.5 and later. */ -# if (! defined __GNUC__ || __GNUC__ < 2 \ - || (__GNUC__ == 2 && __GNUC_MINOR__ < 5)) -# define __attribute__(Spec) /* empty */ -# endif +#if (!defined __GNUC__ || __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 5)) +#define __attribute__(Spec) /* empty */ +#endif #endif #ifndef YYASSERT -# define YYASSERT(Condition) ((void) ((Condition) || (abort (), 0))) +#define YYASSERT(Condition) ((void)((Condition) || (abort(), 0))) #endif /* YYFINAL -- State number of the termination state. */ -#define YYFINAL 92 +#define YYFINAL 99 /* YYLAST -- Last index in YYTABLE. */ -#define YYLAST 2389 +#define YYLAST 2580 /* YYNTOKENS -- Number of terminals. */ -#define YYNTOKENS 342 +#define YYNTOKENS 342 /* YYNNTS -- Number of nonterminals. */ -#define YYNNTS 85 +#define YYNNTS 87 /* YYNRULES -- Number of rules. */ -#define YYNRULES 371 +#define YYNRULES 377 /* YYNRULES -- Number of states. */ -#define YYNSTATES 782 +#define YYNSTATES 799 /* YYMAXRHS -- Maximum number of symbols on right-hand side of rule. */ #define YYMAXRHS 13 /* YYMAXLEFT -- Maximum number of symbols to the left of a handle @@ -229,1629 +225,1336 @@ YYID (i) #define YYMAXLEFT 0 /* YYTRANSLATE(X) -- Bison symbol number corresponding to X. */ -#define YYUNDEFTOK 2 -#define YYMAXUTOK 582 +#define YYUNDEFTOK 2 +#define YYMAXUTOK 582 -#define YYTRANSLATE(YYX) \ - ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) +#define YYTRANSLATE(YYX) \ + ((unsigned int)(YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) /* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */ -static const unsigned short int yytranslate[] = -{ - 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 17, 2, 2, 2, 28, 22, 2, - 340, 341, 26, 24, 338, 25, 339, 27, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 337, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 30, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 21, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 18, 19, 20, 23, 29, 31, 32, 33, - 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, - 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, - 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, - 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, - 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, - 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, - 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, - 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, - 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, - 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, - 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, - 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, - 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, - 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, - 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, - 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, - 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, - 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, - 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, - 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, - 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, - 334, 335, 336 -}; +static const unsigned short int yytranslate[] = { + 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 17, 2, 2, 2, 28, 22, 2, 340, 341, 26, 24, 338, + 25, 339, 27, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 337, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 30, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 21, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 18, 19, 20, 23, 29, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, + 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, + 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, + 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336}; #if YYDEBUG /* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in YYRHS. */ -static const unsigned short int yyprhs[] = -{ - 0, 0, 3, 6, 10, 13, 17, 19, 23, 35, - 36, 39, 40, 45, 47, 51, 52, 54, 56, 57, - 60, 61, 64, 65, 69, 72, 77, 78, 81, 86, - 87, 90, 92, 96, 97, 100, 103, 106, 109, 112, - 115, 118, 121, 123, 127, 129, 132, 134, 138, 140, - 142, 145, 150, 154, 158, 159, 161, 164, 166, 167, - 173, 177, 183, 190, 196, 197, 199, 201, 202, 204, - 206, 208, 211, 214, 215, 216, 218, 221, 226, 230, - 232, 234, 238, 242, 244, 246, 248, 250, 254, 258, - 262, 266, 270, 274, 277, 280, 284, 288, 292, 296, - 302, 309, 316, 323, 327, 331, 335, 339, 342, 345, - 349, 353, 357, 362, 366, 371, 377, 379, 383, 387, - 389, 393, 399, 407, 414, 423, 429, 437, 444, 453, - 458, 464, 469, 474, 479, 484, 489, 494, 501, 508, - 517, 526, 531, 539, 544, 551, 556, 558, 560, 562, - 563, 565, 567, 569, 571, 573, 580, 587, 591, 595, - 599, 603, 607, 611, 615, 619, 623, 628, 635, 639, - 645, 650, 656, 660, 665, 669, 674, 676, 681, 686, - 687, 691, 693, 702, 713, 723, 730, 742, 751, 755, - 756, 758, 760, 761, 763, 765, 769, 773, 779, 784, - 789, 795, 801, 802, 806, 809, 813, 817, 821, 825, - 828, 832, 836, 839, 843, 849, 852, 856, 860, 864, - 868, 872, 876, 880, 884, 888, 892, 894, 896, 898, - 900, 902, 906, 910, 913, 918, 920, 922, 924, 926, - 930, 934, 938, 942, 948, 954, 956, 960, 961, 965, - 971, 972, 974, 975, 978, 981, 982, 987, 991, 993, - 1004, 1018, 1020, 1031, 1032, 1034, 1036, 1038, 1039, 1042, - 1044, 1046, 1050, 1056, 1058, 1071, 1081, 1091, 1104, 1106, - 1112, 1114, 1119, 1124, 1125, 1128, 1130, 1137, 1139, 1143, - 1147, 1153, 1154, 1156, 1158, 1160, 1169, 1170, 1176, 1177, - 1180, 1183, 1186, 1189, 1191, 1192, 1193, 1197, 1201, 1207, - 1209, 1211, 1215, 1219, 1227, 1235, 1239, 1243, 1249, 1255, - 1257, 1263, 1267, 1275, 1280, 1285, 1290, 1294, 1298, 1303, - 1308, 1312, 1316, 1320, 1325, 1331, 1335, 1339, 1345, 1348, - 1352, 1357, 1363, 1368, 1373, 1378, 1382, 1386, 1387, 1389, - 1390, 1393, 1394, 1397, 1398, 1401, 1402, 1404, 1405, 1407, - 1409, 1411, 1419, 1427, 1434, 1437, 1440, 1443, 1444, 1447, - 1452, 1453 -}; +static const unsigned short int yyprhs[] = { + 0, 0, 3, 6, 10, 13, 17, 19, 23, 35, 36, 39, + 40, 45, 47, 51, 52, 54, 56, 57, 60, 61, 64, 65, + 69, 72, 77, 78, 81, 86, 87, 90, 92, 96, 97, 100, + 103, 106, 109, 112, 115, 118, 121, 123, 127, 129, 132, 134, + 138, 140, 142, 145, 150, 154, 158, 159, 161, 164, 166, 167, + 173, 177, 183, 190, 196, 197, 199, 201, 202, 204, 206, 208, + 210, 213, 216, 219, 220, 221, 223, 226, 231, 235, 237, 239, + 243, 247, 249, 251, 253, 255, 259, 263, 267, 271, 275, 279, + 282, 285, 289, 293, 297, 301, 307, 314, 321, 328, 332, 336, + 340, 344, 347, 350, 354, 358, 362, 367, 371, 376, 382, 384, + 388, 392, 394, 398, 404, 412, 419, 428, 434, 442, 449, 458, + 463, 469, 474, 479, 484, 489, 494, 499, 506, 513, 522, 531, + 536, 544, 549, 556, 561, 563, 565, 567, 568, 570, 572, 574, + 576, 578, 585, 592, 596, 600, 604, 608, 612, 616, 620, 624, + 628, 633, 640, 644, 650, 655, 661, 665, 670, 674, 679, 681, + 686, 691, 692, 696, 698, 707, 718, 728, 735, 747, 756, 760, + 761, 763, 765, 766, 768, 770, 774, 778, 784, 789, 794, 800, + 806, 807, 811, 814, 818, 822, 826, 830, 833, 837, 841, 844, + 848, 854, 857, 861, 865, 869, 873, 877, 881, 885, 889, 893, + 897, 899, 901, 903, 905, 907, 911, 915, 918, 923, 925, 927, + 929, 931, 935, 939, 943, 947, 953, 959, 961, 965, 966, 970, + 976, 977, 979, 980, 983, 986, 987, 992, 996, 998, 1009, 1023, + 1025, 1036, 1037, 1039, 1041, 1043, 1044, 1047, 1049, 1051, 1055, 1061, + 1063, 1076, 1086, 1096, 1109, 1111, 1117, 1119, 1124, 1129, 1130, 1133, + 1135, 1142, 1144, 1148, 1152, 1158, 1159, 1161, 1163, 1165, 1174, 1175, + 1181, 1182, 1185, 1188, 1191, 1194, 1196, 1197, 1198, 1202, 1206, 1212, + 1214, 1216, 1220, 1224, 1232, 1240, 1244, 1248, 1254, 1260, 1262, 1268, + 1272, 1280, 1285, 1290, 1295, 1299, 1303, 1308, 1313, 1317, 1321, 1325, + 1330, 1336, 1340, 1344, 1350, 1353, 1357, 1362, 1368, 1373, 1378, 1383, + 1387, 1391, 1392, 1393, 1396, 1397, 1400, 1401, 1404, 1405, 1407, 1408, + 1410, 1412, 1415, 1417, 1425, 1433, 1440, 1443, 1446, 1449, 1450, 1453, + 1458, 1459, 1462, 1464, 1470, 1474}; /* YYRHS -- A `-1'-separated list of the rules' RHS. */ -static const short int yyrhs[] = -{ - 343, 0, -1, 344, 337, -1, 343, 344, 337, -1, - 1, 337, -1, 343, 1, 337, -1, 345, -1, 193, - 357, 358, -1, 193, 357, 358, 107, 360, 346, 347, - 351, 352, 354, 355, -1, -1, 243, 373, -1, -1, - 110, 48, 348, 350, -1, 373, -1, 373, 338, 348, - -1, -1, 40, -1, 83, -1, -1, 245, 188, -1, - -1, 111, 373, -1, -1, 163, 48, 353, -1, 373, - 349, -1, 373, 349, 338, 353, -1, -1, 136, 373, - -1, 136, 373, 338, 373, -1, -1, 127, 356, -1, - 3, -1, 3, 338, 356, -1, -1, 357, 33, -1, - 357, 86, -1, 357, 87, -1, 357, 112, -1, 357, - 212, -1, 357, 209, -1, 357, 207, -1, 357, 208, - -1, 359, -1, 359, 338, 358, -1, 26, -1, 373, - 364, -1, 361, -1, 361, 338, 360, -1, 362, -1, - 365, -1, 3, 364, -1, 3, 339, 3, 364, -1, - 372, 363, 3, -1, 340, 360, 341, -1, -1, 39, - -1, 39, 3, -1, 3, -1, -1, 361, 366, 129, - 362, 370, -1, 361, 212, 362, -1, 361, 212, 362, - 158, 373, -1, 361, 368, 367, 129, 362, 371, -1, - 361, 154, 369, 129, 362, -1, -1, 120, -1, 63, - -1, -1, 165, -1, 135, -1, 187, -1, 135, 367, - -1, 187, 367, -1, -1, -1, 371, -1, 158, 373, - -1, 234, 340, 356, 341, -1, 340, 345, 341, -1, - 3, -1, 8, -1, 3, 339, 3, -1, 3, 339, - 26, -1, 4, -1, 5, -1, 7, -1, 6, -1, - 373, 24, 373, -1, 373, 25, 373, -1, 373, 26, - 373, -1, 373, 27, 373, -1, 373, 29, 373, -1, - 373, 28, 373, -1, 25, 373, -1, 24, 373, -1, - 373, 12, 373, -1, 373, 10, 373, -1, 373, 11, - 373, -1, 373, 20, 373, -1, 373, 20, 340, 345, - 341, -1, 373, 20, 37, 340, 345, 341, -1, 373, - 20, 199, 340, 345, 341, -1, 373, 20, 33, 340, - 345, 341, -1, 373, 21, 373, -1, 373, 22, 373, - -1, 373, 30, 373, -1, 373, 23, 373, -1, 17, - 373, -1, 18, 373, -1, 8, 9, 373, -1, 340, - 373, 341, -1, 373, 15, 156, -1, 373, 15, 18, - 156, -1, 373, 15, 6, -1, 373, 15, 18, 6, - -1, 373, 19, 373, 36, 373, -1, 373, -1, 373, - 338, 374, -1, 340, 374, 341, -1, 374, -1, 374, - 338, 375, -1, 373, 16, 340, 374, 341, -1, 340, - 374, 341, 16, 340, 375, 341, -1, 373, 18, 16, - 340, 374, 341, -1, 340, 374, 341, 18, 16, 340, - 374, 341, -1, 373, 16, 340, 345, 341, -1, 340, - 374, 341, 16, 340, 345, 341, -1, 373, 18, 16, - 340, 345, 341, -1, 340, 374, 341, 18, 16, 340, - 345, 341, -1, 99, 340, 345, 341, -1, 18, 99, - 340, 345, 341, -1, 328, 340, 26, 341, -1, 328, - 340, 373, 341, -1, 333, 340, 373, 341, -1, 334, - 340, 373, 341, -1, 335, 340, 373, 341, -1, 336, - 340, 373, 341, -1, 324, 340, 373, 338, 373, 341, - -1, 324, 340, 373, 107, 373, 341, -1, 324, 340, - 373, 338, 373, 338, 373, 341, -1, 324, 340, 373, - 107, 373, 104, 373, 341, -1, 325, 340, 373, 341, - -1, 325, 340, 376, 373, 107, 373, 341, -1, 329, - 340, 373, 341, -1, 330, 340, 373, 39, 377, 341, - -1, 331, 340, 374, 341, -1, 133, -1, 224, -1, - 47, -1, -1, 125, -1, 4, -1, 89, -1, 103, - -1, 53, -1, 326, 340, 373, 338, 378, 341, -1, - 327, 340, 373, 338, 378, 341, -1, 126, 373, 73, - -1, 126, 373, 75, -1, 126, 373, 76, -1, 126, - 373, 77, -1, 126, 373, 74, -1, 126, 373, 248, - -1, 126, 373, 247, -1, 126, 373, 250, -1, 126, - 373, 174, -1, 51, 373, 379, 96, -1, 51, 373, - 379, 93, 373, 96, -1, 51, 379, 96, -1, 51, - 379, 93, 373, 96, -1, 242, 373, 217, 373, -1, - 379, 242, 373, 217, 373, -1, 373, 14, 373, -1, - 373, 18, 14, 373, -1, 373, 13, 373, -1, 373, - 18, 13, 373, -1, 380, -1, 62, 69, 381, 3, - -1, 62, 190, 381, 3, -1, -1, 116, 18, 99, - -1, 382, -1, 62, 385, 213, 381, 3, 340, 386, - 341, -1, 62, 385, 213, 381, 3, 339, 3, 340, - 386, 341, -1, 62, 385, 213, 381, 3, 340, 386, - 341, 383, -1, 62, 385, 213, 381, 3, 383, -1, - 62, 385, 213, 381, 3, 339, 3, 340, 386, 341, - 383, -1, 62, 385, 213, 381, 3, 339, 3, 383, - -1, 384, 363, 345, -1, -1, 117, -1, 182, -1, - -1, 214, -1, 387, -1, 387, 338, 386, -1, 3, - 389, 388, -1, 169, 130, 340, 356, 341, -1, 130, - 340, 356, 341, -1, 118, 340, 356, 341, -1, 108, - 118, 340, 356, 341, -1, 108, 130, 340, 356, 341, - -1, -1, 388, 18, 156, -1, 388, 156, -1, 388, - 80, 4, -1, 388, 80, 5, -1, 388, 80, 7, - -1, 388, 80, 6, -1, 388, 41, -1, 388, 228, - 130, -1, 388, 169, 130, -1, 388, 130, -1, 388, - 57, 4, -1, 388, 228, 340, 356, 341, -1, 45, - 391, -1, 221, 391, 393, -1, 198, 391, 393, -1, - 149, 391, 393, -1, 124, 391, 393, -1, 125, 391, - 393, -1, 43, 391, 393, -1, 177, 391, 393, -1, - 89, 391, 393, -1, 103, 391, 393, -1, 78, 391, - 393, -1, 71, -1, 218, -1, 219, -1, 72, -1, - 247, -1, 53, 391, 394, -1, 240, 391, 394, -1, - 44, 391, -1, 239, 340, 5, 341, -1, 220, -1, - 46, -1, 148, -1, 143, -1, 222, 392, 394, -1, - 215, 392, 394, -1, 150, 392, 394, -1, 144, 392, - 394, -1, 97, 340, 390, 341, 394, -1, 196, 340, - 390, 341, 394, -1, 4, -1, 390, 338, 4, -1, - -1, 340, 5, 341, -1, 340, 5, 338, 5, 341, - -1, -1, 44, -1, -1, 393, 230, -1, 393, 249, - -1, -1, 394, 53, 196, 3, -1, 394, 55, 3, - -1, 395, -1, 62, 171, 158, 3, 340, 356, 341, - 167, 158, 3, -1, 62, 171, 158, 3, 340, 356, - 341, 157, 20, 5, 167, 158, 3, -1, 396, -1, - 62, 397, 118, 3, 398, 158, 3, 340, 400, 341, - -1, -1, 228, -1, 108, -1, 201, -1, -1, 234, - 399, -1, 319, -1, 320, -1, 3, 391, 349, -1, - 3, 391, 349, 338, 400, -1, 401, -1, 138, 213, - 3, 107, 374, 245, 4, 338, 4, 189, 20, 7, - -1, 138, 213, 3, 107, 374, 245, 4, 338, 4, - -1, 38, 213, 3, 107, 374, 245, 4, 338, 4, - -1, 38, 213, 3, 107, 374, 245, 4, 338, 4, - 189, 20, 7, -1, 402, -1, 90, 118, 3, 158, - 3, -1, 403, -1, 90, 69, 404, 3, -1, 90, - 190, 404, 3, -1, -1, 116, 99, -1, 405, -1, - 90, 385, 213, 404, 406, 407, -1, 3, -1, 3, - 339, 3, -1, 406, 338, 3, -1, 406, 338, 3, - 339, 3, -1, -1, 184, -1, 50, -1, 408, -1, - 123, 410, 411, 3, 412, 238, 413, 409, -1, -1, - 158, 159, 130, 231, 415, -1, -1, 410, 146, -1, - 410, 81, -1, 410, 112, -1, 410, 117, -1, 127, - -1, -1, -1, 340, 356, 341, -1, 340, 414, 341, - -1, 340, 414, 341, 338, 413, -1, 373, -1, 80, - -1, 373, 338, 414, -1, 80, 338, 414, -1, 123, - 410, 411, 3, 196, 415, 409, -1, 123, 410, 411, - 3, 412, 345, 409, -1, 3, 20, 373, -1, 3, - 20, 80, -1, 415, 338, 3, 20, 373, -1, 415, - 338, 3, 20, 80, -1, 416, -1, 197, 417, 277, - 418, 419, -1, 197, 295, 419, -1, 197, 417, 296, - 107, 3, 418, 419, -1, 197, 62, 69, 3, -1, - 197, 62, 190, 3, -1, 197, 62, 213, 3, -1, - 197, 69, 3, -1, 197, 190, 3, -1, 197, 297, - 3, 298, -1, 197, 297, 3, 299, -1, 197, 300, - 301, -1, 197, 302, 354, -1, 197, 373, 302, -1, - 197, 303, 104, 8, -1, 197, 118, 107, 3, 418, - -1, 197, 304, 299, -1, 197, 421, 298, -1, 197, - 322, 277, 418, 419, -1, 197, 288, -1, 197, 417, - 305, -1, 197, 422, 299, 419, -1, 197, 213, 299, - 418, 419, -1, 197, 294, 196, 419, -1, 197, 306, - 418, 420, -1, 197, 422, 307, 419, -1, 197, 308, - 354, -1, 197, 373, 308, -1, -1, 323, -1, -1, - 107, 3, -1, -1, 14, 4, -1, -1, 14, 373, - -1, -1, 321, -1, -1, 280, -1, 281, -1, 423, - -1, 82, 424, 107, 360, 346, 352, 354, -1, 82, - 424, 107, 425, 234, 360, 346, -1, 82, 424, 425, - 107, 360, 346, -1, 424, 146, -1, 424, 173, -1, - 424, 117, -1, -1, 3, 426, -1, 425, 338, 3, - 426, -1, -1, 339, 26, -1 -}; +static const short int yyrhs[] = { + 343, 0, -1, 344, 337, -1, 343, 344, 337, -1, 1, 337, -1, 343, 1, + 337, -1, 345, -1, 194, 357, 358, -1, 194, 357, 358, 107, 360, 346, 347, + 351, 352, 354, 355, -1, -1, 244, 373, -1, -1, 111, 48, 348, 350, -1, + 373, -1, 373, 338, 348, -1, -1, 40, -1, 83, -1, -1, 246, 189, -1, + -1, 112, 373, -1, -1, 164, 48, 353, -1, 373, 349, -1, 373, 349, 338, + 353, -1, -1, 137, 373, -1, 137, 373, 338, 373, -1, -1, 128, 356, -1, + 3, -1, 3, 338, 356, -1, -1, 357, 33, -1, 357, 86, -1, 357, 87, + -1, 357, 113, -1, 357, 213, -1, 357, 210, -1, 357, 208, -1, 357, 209, + -1, 359, -1, 359, 338, 358, -1, 26, -1, 373, 364, -1, 361, -1, 361, + 338, 360, -1, 362, -1, 365, -1, 3, 364, -1, 3, 339, 3, 364, -1, + 372, 363, 3, -1, 340, 360, 341, -1, -1, 39, -1, 39, 3, -1, 3, + -1, -1, 361, 366, 130, 362, 370, -1, 361, 213, 362, -1, 361, 213, 362, + 159, 373, -1, 361, 368, 367, 130, 362, 371, -1, 361, 155, 369, 130, 362, + -1, -1, 121, -1, 63, -1, -1, 166, -1, 136, -1, 188, -1, 108, -1, + 136, 367, -1, 188, 367, -1, 108, 367, -1, -1, -1, 371, -1, 159, 373, + -1, 235, 340, 356, 341, -1, 340, 345, 341, -1, 3, -1, 8, -1, 3, + 339, 3, -1, 3, 339, 26, -1, 4, -1, 5, -1, 7, -1, 6, -1, + 373, 24, 373, -1, 373, 25, 373, -1, 373, 26, 373, -1, 373, 27, 373, + -1, 373, 29, 373, -1, 373, 28, 373, -1, 25, 373, -1, 24, 373, -1, + 373, 12, 373, -1, 373, 10, 373, -1, 373, 11, 373, -1, 373, 20, 373, + -1, 373, 20, 340, 345, 341, -1, 373, 20, 37, 340, 345, 341, -1, 373, + 20, 200, 340, 345, 341, -1, 373, 20, 33, 340, 345, 341, -1, 373, 21, + 373, -1, 373, 22, 373, -1, 373, 30, 373, -1, 373, 23, 373, -1, 17, + 373, -1, 18, 373, -1, 8, 9, 373, -1, 340, 373, 341, -1, 373, 15, + 157, -1, 373, 15, 18, 157, -1, 373, 15, 6, -1, 373, 15, 18, 6, + -1, 373, 19, 373, 36, 373, -1, 373, -1, 373, 338, 374, -1, 340, 374, + 341, -1, 374, -1, 374, 338, 375, -1, 373, 16, 340, 374, 341, -1, 340, + 374, 341, 16, 340, 375, 341, -1, 373, 18, 16, 340, 374, 341, -1, 340, + 374, 341, 18, 16, 340, 374, 341, -1, 373, 16, 340, 345, 341, -1, 340, + 374, 341, 16, 340, 345, 341, -1, 373, 18, 16, 340, 345, 341, -1, 340, + 374, 341, 18, 16, 340, 345, 341, -1, 99, 340, 345, 341, -1, 18, 99, + 340, 345, 341, -1, 328, 340, 26, 341, -1, 328, 340, 373, 341, -1, 333, + 340, 373, 341, -1, 334, 340, 373, 341, -1, 335, 340, 373, 341, -1, 336, + 340, 373, 341, -1, 324, 340, 373, 338, 373, 341, -1, 324, 340, 373, 107, + 373, 341, -1, 324, 340, 373, 338, 373, 338, 373, 341, -1, 324, 340, 373, + 107, 373, 104, 373, 341, -1, 325, 340, 373, 341, -1, 325, 340, 376, 373, + 107, 373, 341, -1, 329, 340, 373, 341, -1, 330, 340, 373, 39, 377, 341, + -1, 331, 340, 374, 341, -1, 134, -1, 225, -1, 47, -1, -1, 126, -1, + 4, -1, 89, -1, 103, -1, 53, -1, 326, 340, 373, 338, 378, 341, -1, + 327, 340, 373, 338, 378, 341, -1, 127, 373, 73, -1, 127, 373, 75, -1, + 127, 373, 76, -1, 127, 373, 77, -1, 127, 373, 74, -1, 127, 373, 249, + -1, 127, 373, 248, -1, 127, 373, 251, -1, 127, 373, 175, -1, 51, 373, + 379, 96, -1, 51, 373, 379, 93, 373, 96, -1, 51, 379, 96, -1, 51, + 379, 93, 373, 96, -1, 243, 373, 218, 373, -1, 379, 243, 373, 218, 373, + -1, 373, 14, 373, -1, 373, 18, 14, 373, -1, 373, 13, 373, -1, 373, + 18, 13, 373, -1, 380, -1, 62, 69, 381, 3, -1, 62, 191, 381, 3, + -1, -1, 117, 18, 99, -1, 382, -1, 62, 385, 214, 381, 3, 340, 386, + 341, -1, 62, 385, 214, 381, 3, 339, 3, 340, 386, 341, -1, 62, 385, + 214, 381, 3, 340, 386, 341, 383, -1, 62, 385, 214, 381, 3, 383, -1, + 62, 385, 214, 381, 3, 339, 3, 340, 386, 341, 383, -1, 62, 385, 214, + 381, 3, 339, 3, 383, -1, 384, 363, 345, -1, -1, 118, -1, 183, -1, + -1, 215, -1, 387, -1, 387, 338, 386, -1, 3, 389, 388, -1, 170, 131, + 340, 356, 341, -1, 131, 340, 356, 341, -1, 119, 340, 356, 341, -1, 109, + 119, 340, 356, 341, -1, 109, 131, 340, 356, 341, -1, -1, 388, 18, 157, + -1, 388, 157, -1, 388, 80, 4, -1, 388, 80, 5, -1, 388, 80, 7, + -1, 388, 80, 6, -1, 388, 41, -1, 388, 229, 131, -1, 388, 170, 131, + -1, 388, 131, -1, 388, 57, 4, -1, 388, 229, 340, 356, 341, -1, 45, + 391, -1, 222, 391, 393, -1, 199, 391, 393, -1, 150, 391, 393, -1, 125, + 391, 393, -1, 126, 391, 393, -1, 43, 391, 393, -1, 178, 391, 393, -1, + 89, 391, 393, -1, 103, 391, 393, -1, 78, 391, 393, -1, 71, -1, 219, + -1, 220, -1, 72, -1, 248, -1, 53, 391, 394, -1, 241, 391, 394, -1, + 44, 391, -1, 240, 340, 5, 341, -1, 221, -1, 46, -1, 149, -1, 144, + -1, 223, 392, 394, -1, 216, 392, 394, -1, 151, 392, 394, -1, 145, 392, + 394, -1, 97, 340, 390, 341, 394, -1, 197, 340, 390, 341, 394, -1, 4, + -1, 390, 338, 4, -1, -1, 340, 5, 341, -1, 340, 5, 338, 5, 341, + -1, -1, 44, -1, -1, 393, 231, -1, 393, 250, -1, -1, 394, 53, 197, + 3, -1, 394, 55, 3, -1, 395, -1, 62, 172, 159, 3, 340, 356, 341, + 168, 159, 3, -1, 62, 172, 159, 3, 340, 356, 341, 158, 20, 5, 168, + 159, 3, -1, 396, -1, 62, 397, 119, 3, 398, 159, 3, 340, 400, 341, + -1, -1, 229, -1, 109, -1, 202, -1, -1, 235, 399, -1, 320, -1, 321, + -1, 3, 391, 349, -1, 3, 391, 349, 338, 400, -1, 401, -1, 139, 214, + 3, 107, 374, 246, 4, 338, 4, 190, 20, 7, -1, 139, 214, 3, 107, + 374, 246, 4, 338, 4, -1, 38, 214, 3, 107, 374, 246, 4, 338, 4, + -1, 38, 214, 3, 107, 374, 246, 4, 338, 4, 190, 20, 7, -1, 402, + -1, 90, 119, 3, 159, 3, -1, 403, -1, 90, 69, 404, 3, -1, 90, + 191, 404, 3, -1, -1, 117, 99, -1, 405, -1, 90, 385, 214, 404, 406, + 407, -1, 3, -1, 3, 339, 3, -1, 406, 338, 3, -1, 406, 338, 3, + 339, 3, -1, -1, 185, -1, 50, -1, 408, -1, 124, 410, 411, 3, 412, + 239, 413, 409, -1, -1, 159, 160, 131, 232, 415, -1, -1, 410, 147, -1, + 410, 81, -1, 410, 113, -1, 410, 118, -1, 128, -1, -1, -1, 340, 356, + 341, -1, 340, 414, 341, -1, 340, 414, 341, 338, 413, -1, 373, -1, 80, + -1, 373, 338, 414, -1, 80, 338, 414, -1, 124, 410, 411, 3, 197, 415, + 409, -1, 124, 410, 411, 3, 412, 345, 409, -1, 3, 20, 373, -1, 3, + 20, 80, -1, 415, 338, 3, 20, 373, -1, 415, 338, 3, 20, 80, -1, + 416, -1, 198, 417, 278, 418, 419, -1, 198, 296, 419, -1, 198, 417, 297, + 107, 3, 418, 419, -1, 198, 62, 69, 3, -1, 198, 62, 191, 3, -1, + 198, 62, 214, 3, -1, 198, 69, 3, -1, 198, 191, 3, -1, 198, 298, + 3, 299, -1, 198, 298, 3, 300, -1, 198, 301, 302, -1, 198, 303, 354, + -1, 198, 373, 303, -1, 198, 304, 104, 8, -1, 198, 119, 107, 3, 418, + -1, 198, 305, 300, -1, 198, 421, 299, -1, 198, 323, 278, 418, 419, -1, + 198, 289, -1, 198, 417, 306, -1, 198, 422, 300, 419, -1, 198, 214, 300, + 418, 419, -1, 198, 295, 197, 419, -1, 198, 307, 418, 420, -1, 198, 422, + 308, 419, -1, 198, 309, 354, -1, 198, 373, 309, -1, -1, -1, 107, 3, + -1, -1, 14, 4, -1, -1, 14, 373, -1, -1, 322, -1, -1, 281, -1, + 282, -1, 83, 3, -1, 423, -1, 82, 424, 107, 360, 346, 352, 354, -1, + 82, 424, 107, 425, 235, 360, 346, -1, 82, 424, 425, 107, 360, 346, -1, + 424, 147, -1, 424, 174, -1, 424, 118, -1, -1, 3, 426, -1, 425, 338, + 3, 426, -1, -1, 339, 26, -1, 427, -1, 232, 362, 197, 428, 346, -1, + 373, 20, 373, -1, 428, 338, 373, 20, 373, -1}; /* YYRLINE[YYN] -- source line where rule number YYN was defined. */ -static const unsigned short int yyrline[] = -{ - 0, 435, 435, 449, 462, 471, 483, 487, 491, 498, - 499, 502, 503, 508, 509, 511, 512, 513, 516, 517, - 520, 521, 524, 525, 528, 529, 531, 532, 533, 536, - 537, 541, 542, 545, 546, 547, 548, 549, 550, 551, - 552, 553, 557, 558, 559, 563, 566, 567, 571, 572, - 576, 578, 580, 581, 584, 585, 588, 589, 590, 604, - 605, 606, 607, 608, 611, 612, 613, 616, 617, 621, - 622, 626, 627, 628, 631, 632, 640, 641, 646, 653, - 654, 655, 656, 657, 658, 659, 660, 662, 663, 664, - 665, 666, 667, 669, 670, 672, 673, 674, 676, 677, - 678, 679, 680, 682, 683, 684, 685, 687, 688, 689, - 690, 693, 694, 695, 696, 699, 702, 703, 704, 707, - 708, 711, 712, 713, 715, 717, 718, 719, 720, 722, - 723, 733, 734, 735, 736, 737, 738, 742, 743, 744, - 745, 746, 747, 748, 749, 750, 753, 754, 755, 758, - 759, 760, 761, 762, 763, 766, 767, 771, 772, 773, - 774, 775, 776, 777, 778, 779, 783, 784, 785, 786, - 789, 790, 793, 794, 797, 798, 804, 808, 809, 812, - 813, 818, 821, 825, 829, 834, 838, 843, 847, 851, - 852, 853, 856, 857, 860, 861, 866, 867, 868, 869, - 870, 871, 873, 874, 875, 876, 877, 878, 879, 880, - 881, 882, 883, 884, 885, 889, 890, 891, 892, 893, - 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, - 904, 905, 906, 907, 908, 910, 911, 912, 913, 914, - 915, 916, 917, 918, 919, 921, 922, 925, 926, 927, - 930, 931, 934, 935, 936, 939, 940, 942, 947, 950, - 952, 960, 964, 968, 969, 970, 971, 974, 975, 978, - 979, 982, 983, 987, 990, 995, 996, 997, 1004, 1007, - 1011, 1015, 1016, 1019, 1020, 1024, 1027, 1031, 1032, 1033, - 1034, 1037, 1038, 1039, 1045, 1048, 1054, 1055, 1058, 1059, - 1060, 1061, 1062, 1065, 1065, 1068, 1069, 1072, 1073, 1077, - 1078, 1079, 1080, 1083, 1087, 1092, 1095, 1098, 1101, 1106, - 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, - 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, - 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1137, 1138, 1141, - 1142, 1145, 1146, 1149, 1150, 1152, 1153, 1156, 1157, 1158, - 1161, 1163, 1168, 1172, 1177, 1178, 1179, 1180, 1184, 1185, - 1188, 1189 -}; +static const unsigned short int yyrline[] = { + 0, 442, 442, 456, 469, 478, 490, 494, 498, 505, 506, 509, + 510, 515, 516, 518, 519, 520, 523, 524, 527, 528, 531, 532, + 535, 536, 538, 539, 540, 543, 544, 548, 549, 552, 553, 554, + 555, 556, 557, 558, 559, 560, 564, 565, 566, 570, 573, 574, + 578, 579, 583, 585, 587, 588, 591, 592, 595, 596, 597, 612, + 613, 614, 615, 616, 619, 620, 621, 624, 625, 629, 630, 631, + 635, 636, 637, 638, 641, 642, 650, 651, 656, 663, 664, 665, + 666, 667, 668, 669, 670, 672, 673, 674, 675, 676, 677, 679, + 680, 682, 683, 684, 686, 687, 688, 689, 690, 692, 693, 694, + 695, 697, 698, 699, 700, 703, 704, 705, 706, 709, 712, 713, + 714, 717, 718, 721, 722, 723, 725, 727, 728, 729, 730, 732, + 733, 743, 744, 745, 746, 747, 748, 752, 753, 754, 755, 756, + 757, 758, 759, 760, 763, 764, 765, 768, 769, 770, 771, 772, + 773, 776, 777, 781, 782, 783, 784, 785, 786, 787, 788, 789, + 793, 794, 795, 796, 799, 800, 803, 804, 807, 808, 814, 818, + 819, 822, 823, 828, 831, 835, 839, 844, 848, 853, 857, 861, + 862, 863, 866, 867, 870, 871, 876, 877, 878, 879, 880, 881, + 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, + 895, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, + 910, 911, 912, 913, 914, 915, 916, 917, 918, 920, 921, 922, + 923, 924, 925, 926, 927, 928, 929, 931, 932, 935, 936, 937, + 940, 941, 944, 945, 946, 949, 950, 952, 957, 960, 962, 970, + 974, 978, 979, 980, 981, 984, 985, 988, 989, 992, 993, 997, + 1000, 1005, 1006, 1007, 1014, 1017, 1021, 1025, 1026, 1029, 1030, 1034, + 1037, 1041, 1042, 1043, 1044, 1047, 1048, 1049, 1055, 1058, 1064, 1065, + 1068, 1069, 1070, 1071, 1072, 1075, 1075, 1078, 1079, 1082, 1083, 1087, + 1088, 1089, 1090, 1093, 1097, 1102, 1105, 1108, 1111, 1116, 1119, 1120, + 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, + 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, + 1145, 1147, 1151, 1152, 1155, 1156, 1159, 1160, 1162, 1163, 1166, 1167, + 1168, 1171, 1174, 1176, 1181, 1185, 1190, 1191, 1192, 1193, 1197, 1198, + 1201, 1202, 1205, 1208, 1211, 1212}; #endif #if YYDEBUG || YYERROR_VERBOSE || 0 /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. First, the terminals, then, starting at YYNTOKENS, nonterminals. */ -static const char *const yytname[] = -{ - "$end", "error", "$undefined", "NAME", "STRING", "INTNUM", "BOOL", - "APPROXNUM", "USERVAR", "ASSIGN", "OR", "XOR", "ANDOP", "REGEXP", "LIKE", - "IS", "IN", "'!'", "NOT", "BETWEEN", "COMPARISON", "'|'", "'&'", "SHIFT", - "'+'", "'-'", "'*'", "'/'", "'%'", "MOD", "'^'", "UMINUS", "ADD", "ALL", - "ALTER", "ANALYZE", "AND", "ANY", "APPEND", "AS", "ASC", - "AUTO_INCREMENT", "BEFORE", "BIGINT", "BINARY", "BIT", "BLOB", "BOTH", - "BY", "CALL", "CASCADE", "CASE", "CHANGE", "CHAR", "CHECK", "COLLATE", - "COLUMN", "COMMENT", "CONDITION", "CONSTRAINT", "CONTINUE", "CONVERT", - "CREATE", "CROSS", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", - "CURRENT_USER", "CURSOR", "DATABASE", "DATABASES", "DATE", "DATETIME", - "DAY_HOUR", "DAY", "DAY_MICROSECOND", "DAY_MINUTE", "DAY_SECOND", - "DECIMAL", "DECLARE", "DEFAULT", "DELAYED", "DELETE", "DESC", "DESCRIBE", - "DETERMINISTIC", "DISTINCT", "DISTINCTROW", "DIV", "DOUBLE", "DROP", - "DUAL", "EACH", "ELSE", "ELSEIF", "ENCLOSED", "END", "ENUM", "ESCAPED", - "EXISTS", "EXIT", "EXPLAIN", "FETCH", "FLOAT", "FOR", "FORCE", "FOREIGN", - "FROM", "FULLTEXT", "GRANT", "GROUP", "HAVING", "HIGH_PRIORITY", - "HOUR_MICROSECOND", "HOUR_MINUTE", "HOUR_SECOND", "IF", "IGNORE", - "INDEX", "INFILE", "INNER", "INOUT", "INSENSITIVE", "INSERT", "INT", - "INTEGER", "INTERVAL", "INTO", "ITERATE", "JOIN", "KEY", "KEYS", "KILL", - "LEADING", "LEAVE", "LEFT", "LIMIT", "LINES", "LOAD", "LOCALTIME", - "LOCALTIMESTAMP", "LOCK", "LONG", "LONGBLOB", "LONGTEXT", "LOOP", - "LOW_PRIORITY", "MATCH", "MEDIUMBLOB", "MEDIUMINT", "MEDIUMTEXT", - "MINUTE_MICROSECOND", "MINUTE_SECOND", "MODIFIES", "NATURAL", - "NO_WRITE_TO_BINLOG", "NULLX", "NUMBER", "ON", "DUPLICATE", "OPTIMIZE", - "OPTION", "OPTIONALLY", "ORDER", "OUT", "OUTER", "OUTFILE", - "PARTITIONED", "PRECISION", "PRIMARY", "PROCEDURE", "PROJECTION", - "PURGE", "QUICK", "QUARTER", "READ", "READS", "REAL", "REFERENCES", - "RELEASE", "RENAME", "REPEAT", "REPLACE", "REQUIRE", "RESTRICT", - "RETURN", "REVOKE", "RIGHT", "ROLLUP", "SAMPLE", "SCHEMA", "SCHEMAS", - "SECOND_MICROSECOND", "SELECT", "SENSITIVE", "SEPARATOR", "SET", "SHOW", - "SMALLINT", "SOME", "SONAME", "SPATIAL", "SPECIFIC", "SQL", - "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", "SQL_BIG_RESULT", - "SQL_CALC_FOUND_ROWS", "SQL_SMALL_RESULT", "SSL", "STARTING", - "STRAIGHT_JOIN", "TABLE", "TEMPORARY", "TEXT", "TERMINATED", "THEN", - "TIME", "TIMESTAMP", "TINYBLOB", "TINYINT", "TINYTEXT", "TO", "TRAILING", - "TRIGGER", "UNDO", "UNION", "UNIQUE", "UNLOCK", "UNSIGNED", "UPDATE", - "USAGE", "USE", "USING", "UTC_DATE", "UTC_TIME", "UTC_TIMESTAMP", - "VALUES", "VARBINARY", "VARCHAR", "VARYING", "WHEN", "WHERE", "WHILE", - "WITH", "WRITE", "YEAR", "YEAR_MONTH", "ZEROFILL", "WEEK", "DO", - "MAX_QUERIES_PER_HOUR", "MAX_UPDATES_PER_HOUR", - "MAX_CONNECTIONS_PER_HOUR", "MAX_USER_CONNECTIONS", "USER", "TRUNCATE", - "FAST", "MEDIUM", "EXTENDED", "CHANGED", "LEAVES", "MASTER", "QUERY", - "CACHE", "SLAVE", "BEGINT", "COMMIT", "START", "TRANSACTION", "NO", - "CHAIN", "AUTOCOMMIT", "SAVEPOINT", "ROLLBACK", "LOCAL", "TABLES", - "ISOLATION", "LEVEL", "GLOBAL", "SESSION", "UNCOMMITTED", "COMMITTED", - "REPEATABLE", "SERIALIZABLE", "IDENTIFIED", "PASSWORD", "PRIVILEGES", - "BACKUP", "CHECKSUM", "REPAIR", "USE_FRM", "RESTORE", "CHARACTER", - "COLLATION", "COLUMNS", "ENGINE", "LOGS", "STATUS", "STORAGE", "ENGINES", - "ERRORS", "GRANTS", "INNODB", "PROCESSLIST", "TRIGGERS", "VARIABLES", - "WARNINGS", "FLUSH", "HOSTS", "DES_KEY_FILE", "USER_RESOURCES", - "CONNECTION", "RESET", "PREPARE", "DEALLOCATE", "EXECUTE", "WORK", - "BTREE", "HASH", "BDB", "OPEN", "FULL", "FSUBSTRING", "FTRIM", - "FDATE_ADD", "FDATE_SUB", "FCOUNT", "FUPPER", "FCAST", "FCOALESCE", - "FCONVERT", "FSUM", "FAVG", "FMIN", "FMAX", "';'", "','", "'.'", "'('", - "')'", "$accept", "stmt_list", "stmt", "select_stmt", "opt_where", - "opt_groupby", "groupby_list", "opt_asc_desc", "opt_with_rollup", - "opt_having", "opt_orderby", "orderby_list", "opt_limit", - "opt_into_list", "column_list", "select_opts", "select_expr_list", - "select_expr", "table_references", "table_reference", "table_factor", - "opt_as", "opt_as_alias", "join_table", "opt_inner_cross", "opt_outer", - "left_or_right", "opt_left_or_right_outer", "opt_join_condition", - "join_condition", "table_subquery", "expr", "expr_list", "opt_expr_list", - "trim_ltb", "cast_data_type", "interval_exp", "case_list", - "create_database_stmt", "opt_if_not_exists", "create_table_stmt", - "create_select_statement", "opt_ignore_replace", "opt_temporary", - "create_col_list", "create_definition", "column_atts", "data_type", - "enum_list", "opt_length", "opt_binary", "opt_uz", "opt_csc", - "create_projection_stmt", "create_index_stmt", "index_att", - "opt_using_type", "index_type", "index_col_list", "load_table_stmt", - "drop_index_stmt", "drop_database_stmt", "opt_if_exists", - "drop_table_stmt", "table_list", "opt_rc", "insert_stmt", - "opt_ondupupdate", "insert_opts", "opt_into", "opt_col_names", - "insert_vals_list", "insert_vals", "insert_asgn_list", "show_stmt", - "opt_full", "opt_from", "opt_like_string", "opt_like_expr", "opt_bdb", - "opt_trans_level", "delete_stmt", "delete_opts", "delete_list", - "opt_dot_star", YY_NULL -}; +static const char *const yytname[] = { + "$end", "error", + "$undefined", "NAME", + "STRING", "INTNUM", + "BOOL", "APPROXNUM", + "USERVAR", "ASSIGN", + "OR", "XOR", + "ANDOP", "REGEXP", + "LIKE", "IS", + "IN", "'!'", + "NOT", "BETWEEN", + "COMPARISON", "'|'", + "'&'", "SHIFT", + "'+'", "'-'", + "'*'", "'/'", + "'%'", "MOD", + "'^'", "UMINUS", + "ADD", "ALL", + "ALTER", "ANALYZE", + "AND", "ANY", + "APPEND", "AS", + "ASC", "AUTO_INCREMENT", + "BEFORE", "BIGINT", + "BINARY", "BIT", + "BLOB", "BOTH", + "BY", "CALL", + "CASCADE", "CASE", + "CHANGE", "CHAR", + "CHECK", "COLLATE", + "COLUMN", "COMMENT", + "CONDITION", "CONSTRAINT", + "CONTINUE", "CONVERT", + "CREATE", "CROSS", + "CURRENT_DATE", "CURRENT_TIME", + "CURRENT_TIMESTAMP", "CURRENT_USER", + "CURSOR", "DATABASE", + "DATABASES", "DATE", + "DATETIME", "DAY_HOUR", + "DAY", "DAY_MICROSECOND", + "DAY_MINUTE", "DAY_SECOND", + "DECIMAL", "DECLARE", + "DEFAULT", "DELAYED", + "DELETE", "DESC", + "DESCRIBE", "DETERMINISTIC", + "DISTINCT", "DISTINCTROW", + "DIV", "DOUBLE", + "DROP", "DUAL", + "EACH", "ELSE", + "ELSEIF", "ENCLOSED", + "END", "ENUM", + "ESCAPED", "EXISTS", + "EXIT", "EXPLAIN", + "FETCH", "FLOAT", + "FOR", "FORCE", + "FOREIGN", "FROM", + "FULL", "FULLTEXT", + "GRANT", "GROUP", + "HAVING", "HIGH_PRIORITY", + "HOUR_MICROSECOND", "HOUR_MINUTE", + "HOUR_SECOND", "IF", + "IGNORE", "INDEX", + "INFILE", "INNER", + "INOUT", "INSENSITIVE", + "INSERT", "INT", + "INTEGER", "INTERVAL", + "INTO", "ITERATE", + "JOIN", "KEY", + "KEYS", "KILL", + "LEADING", "LEAVE", + "LEFT", "LIMIT", + "LINES", "LOAD", + "LOCALTIME", "LOCALTIMESTAMP", + "LOCK", "LONG", + "LONGBLOB", "LONGTEXT", + "LOOP", "LOW_PRIORITY", + "MATCH", "MEDIUMBLOB", + "MEDIUMINT", "MEDIUMTEXT", + "MINUTE_MICROSECOND", "MINUTE_SECOND", + "MODIFIES", "NATURAL", + "NO_WRITE_TO_BINLOG", "NULLX", + "NUMBER", "ON", + "DUPLICATE", "OPTIMIZE", + "OPTION", "OPTIONALLY", + "ORDER", "OUT", + "OUTER", "OUTFILE", + "PARTITIONED", "PRECISION", + "PRIMARY", "PROCEDURE", + "PROJECTION", "PURGE", + "QUICK", "QUARTER", + "READ", "READS", + "REAL", "REFERENCES", + "RELEASE", "RENAME", + "REPEAT", "REPLACE", + "REQUIRE", "RESTRICT", + "RETURN", "REVOKE", + "RIGHT", "ROLLUP", + "SAMPLE", "SCHEMA", + "SCHEMAS", "SECOND_MICROSECOND", + "SELECT", "SENSITIVE", + "SEPARATOR", "SET", + "SHOW", "SMALLINT", + "SOME", "SONAME", + "SPATIAL", "SPECIFIC", + "SQL", "SQLEXCEPTION", + "SQLSTATE", "SQLWARNING", + "SQL_BIG_RESULT", "SQL_CALC_FOUND_ROWS", + "SQL_SMALL_RESULT", "SSL", + "STARTING", "STRAIGHT_JOIN", + "TABLE", "TEMPORARY", + "TEXT", "TERMINATED", + "THEN", "TIME", + "TIMESTAMP", "TINYBLOB", + "TINYINT", "TINYTEXT", + "TO", "TRAILING", + "TRIGGER", "UNDO", + "UNION", "UNIQUE", + "UNLOCK", "UNSIGNED", + "UPDATE", "USAGE", + "USE", "USING", + "UTC_DATE", "UTC_TIME", + "UTC_TIMESTAMP", "VALUES", + "VARBINARY", "VARCHAR", + "VARYING", "WHEN", + "WHERE", "WHILE", + "WITH", "WRITE", + "YEAR", "YEAR_MONTH", + "ZEROFILL", "WEEK", + "DO", "MAX_QUERIES_PER_HOUR", + "MAX_UPDATES_PER_HOUR", "MAX_CONNECTIONS_PER_HOUR", + "MAX_USER_CONNECTIONS", "USER", + "TRUNCATE", "FAST", + "MEDIUM", "EXTENDED", + "CHANGED", "LEAVES", + "MASTER", "QUERY", + "CACHE", "SLAVE", + "BEGINT", "COMMIT", + "START", "TRANSACTION", + "NO", "CHAIN", + "AUTOCOMMIT", "SAVEPOINT", + "ROLLBACK", "LOCAL", + "TABLES", "ISOLATION", + "LEVEL", "GLOBAL", + "SESSION", "UNCOMMITTED", + "COMMITTED", "REPEATABLE", + "SERIALIZABLE", "IDENTIFIED", + "PASSWORD", "PRIVILEGES", + "BACKUP", "CHECKSUM", + "REPAIR", "USE_FRM", + "RESTORE", "CHARACTER", + "COLLATION", "COLUMNS", + "ENGINE", "LOGS", + "STATUS", "STORAGE", + "ENGINES", "ERRORS", + "GRANTS", "INNODB", + "PROCESSLIST", "TRIGGERS", + "VARIABLES", "WARNINGS", + "FLUSH", "HOSTS", + "DES_KEY_FILE", "USER_RESOURCES", + "CONNECTION", "RESET", + "PREPARE", "DEALLOCATE", + "EXECUTE", "WORK", + "BTREE", "HASH", + "BDB", "OPEN", + "FSUBSTRING", "FTRIM", + "FDATE_ADD", "FDATE_SUB", + "FCOUNT", "FUPPER", + "FCAST", "FCOALESCE", + "FCONVERT", "FSUM", + "FAVG", "FMIN", + "FMAX", "';'", + "','", "'.'", + "'('", "')'", + "$accept", "stmt_list", + "stmt", "select_stmt", + "opt_where", "opt_groupby", + "groupby_list", "opt_asc_desc", + "opt_with_rollup", "opt_having", + "opt_orderby", "orderby_list", + "opt_limit", "opt_into_list", + "column_list", "select_opts", + "select_expr_list", "select_expr", + "table_references", "table_reference", + "table_factor", "opt_as", + "opt_as_alias", "join_table", + "opt_inner_cross", "opt_outer", + "left_right_full", "opt_left_or_right_outer", + "opt_join_condition", "join_condition", + "table_subquery", "expr", + "expr_list", "opt_expr_list", + "trim_ltb", "cast_data_type", + "interval_exp", "case_list", + "create_database_stmt", "opt_if_not_exists", + "create_table_stmt", "create_select_statement", + "opt_ignore_replace", "opt_temporary", + "create_col_list", "create_definition", + "column_atts", "data_type", + "enum_list", "opt_length", + "opt_binary", "opt_uz", + "opt_csc", "create_projection_stmt", + "create_index_stmt", "index_att", + "opt_using_type", "index_type", + "index_col_list", "load_table_stmt", + "drop_index_stmt", "drop_database_stmt", + "opt_if_exists", "drop_table_stmt", + "table_list", "opt_rc", + "insert_stmt", "opt_ondupupdate", + "insert_opts", "opt_into", + "opt_col_names", "insert_vals_list", + "insert_vals", "insert_asgn_list", + "show_stmt", "opt_full", + "opt_from", "opt_like_string", + "opt_like_expr", "opt_bdb", + "opt_trans_level", "delete_stmt", + "delete_opts", "delete_list", + "opt_dot_star", "update_stmt", + "update_set_list", YY_NULL}; #endif /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ -static const unsigned short int yyr1[] = -{ - 0, 342, 343, 343, 343, 343, 344, 345, 345, 346, - 346, 347, 347, 348, 348, 349, 349, 349, 350, 350, - 351, 351, 352, 352, 353, 353, 354, 354, 354, 355, - 355, 356, 356, 357, 357, 357, 357, 357, 357, 357, - 357, 357, 358, 358, 358, 359, 360, 360, 361, 361, - 362, 362, 362, 362, 363, 363, 364, 364, 364, 365, - 365, 365, 365, 365, 366, 366, 366, 367, 367, 368, - 368, 369, 369, 369, 370, 370, 371, 371, 372, 373, - 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, - 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, - 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, - 373, 373, 373, 373, 373, 373, 374, 374, 374, 375, - 375, 373, 373, 373, 373, 373, 373, 373, 373, 373, - 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, - 373, 373, 373, 373, 373, 373, 376, 376, 376, 377, - 377, 377, 377, 377, 377, 373, 373, 378, 378, 378, - 378, 378, 378, 378, 378, 378, 373, 373, 373, 373, - 379, 379, 373, 373, 373, 373, 344, 380, 380, 381, - 381, 344, 382, 382, 382, 382, 382, 382, 383, 384, - 384, 384, 385, 385, 386, 386, 387, 387, 387, 387, - 387, 387, 388, 388, 388, 388, 388, 388, 388, 388, - 388, 388, 388, 388, 388, 389, 389, 389, 389, 389, - 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - 389, 389, 389, 389, 389, 390, 390, 391, 391, 391, - 392, 392, 393, 393, 393, 394, 394, 394, 344, 395, - 395, 344, 396, 397, 397, 397, 397, 398, 398, 399, - 399, 400, 400, 344, 401, 401, 401, 401, 344, 402, - 344, 403, 403, 404, 404, 344, 405, 406, 406, 406, - 406, 407, 407, 407, 344, 408, 409, 409, 410, 410, - 410, 410, 410, 411, 411, 412, 412, 413, 413, 414, - 414, 414, 414, 408, 408, 415, 415, 415, 415, 344, - 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, - 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, - 416, 416, 416, 416, 416, 416, 416, 417, 417, 418, - 418, 419, 419, 420, 420, 421, 421, 422, 422, 422, - 344, 423, 423, 423, 424, 424, 424, 424, 425, 425, - 426, 426 -}; +static const unsigned short int yyr1[] = { + 0, 342, 343, 343, 343, 343, 344, 345, 345, 346, 346, 347, 347, 348, 348, + 349, 349, 349, 350, 350, 351, 351, 352, 352, 353, 353, 354, 354, 354, 355, + 355, 356, 356, 357, 357, 357, 357, 357, 357, 357, 357, 357, 358, 358, 358, + 359, 360, 360, 361, 361, 362, 362, 362, 362, 363, 363, 364, 364, 364, 365, + 365, 365, 365, 365, 366, 366, 366, 367, 367, 368, 368, 368, 369, 369, 369, + 369, 370, 370, 371, 371, 372, 373, 373, 373, 373, 373, 373, 373, 373, 373, + 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, + 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 374, 374, + 374, 375, 375, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, + 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 376, 376, + 376, 377, 377, 377, 377, 377, 377, 373, 373, 378, 378, 378, 378, 378, 378, + 378, 378, 378, 373, 373, 373, 373, 379, 379, 373, 373, 373, 373, 344, 380, + 380, 381, 381, 344, 382, 382, 382, 382, 382, 382, 383, 384, 384, 384, 385, + 385, 386, 386, 387, 387, 387, 387, 387, 387, 388, 388, 388, 388, 388, 388, + 388, 388, 388, 388, 388, 388, 388, 389, 389, 389, 389, 389, 389, 389, 389, + 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, + 389, 389, 389, 389, 389, 389, 389, 390, 390, 391, 391, 391, 392, 392, 393, + 393, 393, 394, 394, 394, 344, 395, 395, 344, 396, 397, 397, 397, 397, 398, + 398, 399, 399, 400, 400, 344, 401, 401, 401, 401, 344, 402, 344, 403, 403, + 404, 404, 344, 405, 406, 406, 406, 406, 407, 407, 407, 344, 408, 409, 409, + 410, 410, 410, 410, 410, 411, 411, 412, 412, 413, 413, 414, 414, 414, 414, + 408, 408, 415, 415, 415, 415, 344, 416, 416, 416, 416, 416, 416, 416, 416, + 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, + 416, 416, 416, 416, 417, 418, 418, 419, 419, 420, 420, 421, 421, 422, 422, + 422, 344, 344, 423, 423, 423, 424, 424, 424, 424, 425, 425, 426, 426, 344, + 427, 428, 428}; /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ -static const unsigned char yyr2[] = -{ - 0, 2, 2, 3, 2, 3, 1, 3, 11, 0, - 2, 0, 4, 1, 3, 0, 1, 1, 0, 2, - 0, 2, 0, 3, 2, 4, 0, 2, 4, 0, - 2, 1, 3, 0, 2, 2, 2, 2, 2, 2, - 2, 2, 1, 3, 1, 2, 1, 3, 1, 1, - 2, 4, 3, 3, 0, 1, 2, 1, 0, 5, - 3, 5, 6, 5, 0, 1, 1, 0, 1, 1, - 1, 2, 2, 0, 0, 1, 2, 4, 3, 1, - 1, 3, 3, 1, 1, 1, 1, 3, 3, 3, - 3, 3, 3, 2, 2, 3, 3, 3, 3, 5, - 6, 6, 6, 3, 3, 3, 3, 2, 2, 3, - 3, 3, 4, 3, 4, 5, 1, 3, 3, 1, - 3, 5, 7, 6, 8, 5, 7, 6, 8, 4, - 5, 4, 4, 4, 4, 4, 4, 6, 6, 8, - 8, 4, 7, 4, 6, 4, 1, 1, 1, 0, - 1, 1, 1, 1, 1, 6, 6, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 4, 6, 3, 5, - 4, 5, 3, 4, 3, 4, 1, 4, 4, 0, - 3, 1, 8, 10, 9, 6, 11, 8, 3, 0, - 1, 1, 0, 1, 1, 3, 3, 5, 4, 4, - 5, 5, 0, 3, 2, 3, 3, 3, 3, 2, - 3, 3, 2, 3, 5, 2, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, - 1, 3, 3, 2, 4, 1, 1, 1, 1, 3, - 3, 3, 3, 5, 5, 1, 3, 0, 3, 5, - 0, 1, 0, 2, 2, 0, 4, 3, 1, 10, - 13, 1, 10, 0, 1, 1, 1, 0, 2, 1, - 1, 3, 5, 1, 12, 9, 9, 12, 1, 5, - 1, 4, 4, 0, 2, 1, 6, 1, 3, 3, - 5, 0, 1, 1, 1, 8, 0, 5, 0, 2, - 2, 2, 2, 1, 0, 0, 3, 3, 5, 1, - 1, 3, 3, 7, 7, 3, 3, 5, 5, 1, - 5, 3, 7, 4, 4, 4, 3, 3, 4, 4, - 3, 3, 3, 4, 5, 3, 3, 5, 2, 3, - 4, 5, 4, 4, 4, 3, 3, 0, 1, 0, - 2, 0, 2, 0, 2, 0, 1, 0, 1, 1, - 1, 7, 7, 6, 2, 2, 2, 0, 2, 4, - 0, 2 -}; +static const unsigned char yyr2[] = { + 0, 2, 2, 3, 2, 3, 1, 3, 11, 0, 2, 0, 4, 1, 3, 0, 1, 1, 0, 2, 0, 2, 0, + 3, 2, 4, 0, 2, 4, 0, 2, 1, 3, 0, 2, 2, 2, 2, 2, 2, 2, 2, 1, 3, 1, 2, + 1, 3, 1, 1, 2, 4, 3, 3, 0, 1, 2, 1, 0, 5, 3, 5, 6, 5, 0, 1, 1, 0, 1, + 1, 1, 1, 2, 2, 2, 0, 0, 1, 2, 4, 3, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 3, + 3, 3, 3, 2, 2, 3, 3, 3, 3, 5, 6, 6, 6, 3, 3, 3, 3, 2, 2, 3, 3, 3, 4, + 3, 4, 5, 1, 3, 3, 1, 3, 5, 7, 6, 8, 5, 7, 6, 8, 4, 5, 4, 4, 4, 4, 4, + 4, 6, 6, 8, 8, 4, 7, 4, 6, 4, 1, 1, 1, 0, 1, 1, 1, 1, 1, 6, 6, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 4, 6, 3, 5, 4, 5, 3, 4, 3, 4, 1, 4, 4, 0, 3, 1, + 8, 10, 9, 6, 11, 8, 3, 0, 1, 1, 0, 1, 1, 3, 3, 5, 4, 4, 5, 5, 0, 3, 2, + 3, 3, 3, 3, 2, 3, 3, 2, 3, 5, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, + 1, 1, 1, 3, 3, 2, 4, 1, 1, 1, 1, 3, 3, 3, 3, 5, 5, 1, 3, 0, 3, 5, 0, + 1, 0, 2, 2, 0, 4, 3, 1, 10, 13, 1, 10, 0, 1, 1, 1, 0, 2, 1, 1, 3, 5, 1, + 12, 9, 9, 12, 1, 5, 1, 4, 4, 0, 2, 1, 6, 1, 3, 3, 5, 0, 1, 1, 1, 8, 0, + 5, 0, 2, 2, 2, 2, 1, 0, 0, 3, 3, 5, 1, 1, 3, 3, 7, 7, 3, 3, 5, 5, 1, + 5, 3, 7, 4, 4, 4, 3, 3, 4, 4, 3, 3, 3, 4, 5, 3, 3, 5, 2, 3, 4, 5, 4, + 4, 4, 3, 3, 0, 0, 2, 0, 2, 0, 2, 0, 1, 0, 1, 1, 2, 1, 7, 7, 6, 2, 2, + 2, 0, 2, 4, 0, 2, 1, 5, 3, 5}; /* YYDPREC[RULE-NUM] -- Dynamic precedence of rule #RULE-NUM (0 if none). */ -static const unsigned char yydprec[] = -{ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0 -}; +static const unsigned char yydprec[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; /* YYMERGER[RULE-NUM] -- Index of merging function for rule #RULE-NUM. */ -static const unsigned char yymerger[] = -{ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0 -}; +static const unsigned char yymerger[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; /* YYDEFACT[S] -- default reduction number in state S. Performed when YYTABLE doesn't specify something else to do. Zero means the default is an error. */ -static const unsigned short int yydefact[] = -{ - 0, 0, 0, 192, 367, 192, 298, 0, 33, 347, - 0, 0, 6, 176, 181, 258, 261, 273, 278, 280, - 285, 294, 319, 360, 4, 0, 179, 265, 0, 179, - 266, 193, 264, 0, 0, 0, 283, 0, 283, 0, - 304, 0, 0, 79, 83, 84, 86, 85, 80, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 358, 359, 338, 0, 351, 0, 0, 26, 0, 0, - 349, 26, 356, 0, 348, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, - 0, 179, 0, 370, 0, 366, 364, 365, 0, 0, - 0, 0, 0, 283, 300, 301, 302, 303, 299, 0, - 0, 44, 34, 35, 36, 37, 40, 41, 39, 38, - 7, 42, 58, 0, 0, 107, 0, 108, 94, 93, - 0, 0, 0, 0, 0, 0, 326, 0, 0, 327, - 349, 351, 0, 321, 0, 330, 0, 331, 0, 335, - 0, 353, 345, 349, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 332, - 346, 349, 0, 339, 336, 351, 351, 5, 3, 0, - 0, 177, 0, 178, 0, 267, 0, 368, 58, 0, - 9, 46, 48, 49, 54, 0, 0, 0, 284, 281, - 0, 282, 0, 305, 0, 0, 0, 57, 0, 45, - 81, 82, 109, 0, 0, 0, 0, 168, 0, 323, - 324, 325, 0, 349, 351, 342, 352, 328, 329, 27, - 333, 350, 0, 343, 351, 0, 148, 146, 147, 0, - 0, 0, 0, 0, 0, 0, 0, 116, 0, 0, - 0, 0, 0, 0, 0, 110, 0, 96, 97, 95, - 174, 172, 113, 0, 111, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 98, 103, 104, 106, 87, 88, - 89, 90, 92, 91, 105, 351, 0, 340, 344, 0, - 180, 0, 189, 0, 0, 371, 0, 50, 58, 0, - 0, 0, 22, 66, 65, 69, 73, 70, 0, 0, - 0, 67, 55, 0, 0, 9, 370, 279, 287, 291, - 0, 0, 0, 0, 9, 43, 56, 0, 0, 0, - 166, 0, 0, 129, 334, 341, 0, 354, 337, 0, - 0, 141, 0, 0, 0, 131, 132, 143, 149, 145, - 133, 134, 135, 136, 118, 117, 0, 0, 114, 112, - 0, 0, 175, 173, 0, 0, 0, 0, 0, 0, - 320, 349, 0, 31, 0, 190, 191, 0, 0, 185, - 54, 269, 270, 268, 0, 58, 0, 78, 53, 10, - 0, 26, 67, 67, 0, 60, 47, 0, 68, 0, - 52, 9, 363, 369, 0, 293, 292, 0, 286, 0, - 296, 0, 0, 296, 0, 11, 129, 170, 0, 169, - 0, 28, 0, 0, 0, 0, 0, 0, 151, 154, - 152, 153, 150, 0, 0, 0, 125, 121, 0, 0, - 115, 0, 0, 0, 99, 351, 0, 0, 0, 189, - 0, 0, 0, 0, 0, 0, 194, 0, 0, 51, - 0, 361, 71, 72, 0, 0, 74, 0, 362, 288, - 289, 0, 0, 0, 313, 306, 0, 296, 314, 0, - 0, 20, 167, 171, 0, 138, 0, 137, 0, 0, - 155, 156, 144, 0, 119, 0, 0, 127, 123, 102, - 100, 101, 322, 0, 32, 0, 0, 0, 187, 247, - 247, 247, 236, 247, 226, 229, 247, 247, 0, 247, - 247, 247, 238, 250, 237, 247, 250, 247, 0, 247, - 250, 227, 228, 235, 247, 250, 0, 247, 230, 202, - 0, 0, 0, 0, 0, 189, 0, 188, 0, 23, - 15, 63, 61, 0, 0, 59, 75, 0, 0, 316, - 315, 0, 0, 310, 309, 0, 295, 0, 0, 0, - 22, 0, 0, 142, 157, 161, 158, 159, 160, 165, - 163, 162, 164, 126, 0, 122, 0, 0, 276, 0, - 0, 0, 0, 252, 233, 215, 255, 252, 252, 0, - 252, 252, 252, 251, 255, 252, 255, 252, 0, 252, - 255, 252, 255, 0, 255, 196, 0, 0, 0, 0, - 0, 184, 195, 247, 0, 16, 17, 24, 76, 0, - 62, 290, 0, 0, 0, 0, 307, 275, 18, 13, - 21, 26, 140, 139, 120, 128, 124, 0, 0, 259, - 189, 0, 221, 231, 225, 223, 245, 0, 224, 219, - 220, 242, 218, 241, 222, 0, 217, 240, 216, 239, - 0, 232, 0, 209, 0, 0, 212, 204, 0, 0, - 0, 0, 199, 198, 0, 15, 262, 0, 0, 0, - 318, 317, 312, 311, 0, 0, 0, 12, 0, 29, - 0, 0, 186, 0, 248, 253, 254, 0, 0, 0, - 255, 255, 234, 203, 213, 205, 206, 208, 207, 211, - 210, 0, 200, 201, 197, 271, 25, 77, 297, 308, - 0, 19, 14, 0, 8, 277, 0, 0, 0, 257, - 246, 243, 244, 0, 0, 274, 30, 260, 249, 256, - 214, 272 -}; +static const unsigned short int yydefact[] = { + 0, 0, 0, 194, 369, 0, 194, 300, 0, 33, 349, 0, 0, 0, 6, + 178, 183, 260, 263, 275, 280, 282, 287, 296, 321, 362, 374, 4, 0, 181, + 267, 0, 181, 268, 195, 266, 0, 0, 0, 361, 285, 0, 285, 0, 306, + 0, 0, 81, 85, 86, 88, 87, 82, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 359, 360, 340, 0, 352, 0, 0, 26, 0, 0, 350, + 26, 357, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 58, 0, 0, 54, 1, 0, 0, 2, 0, 0, + 0, 0, 0, 181, 0, 372, 0, 368, 366, 367, 0, 0, 0, 0, 0, + 285, 302, 303, 304, 305, 301, 0, 0, 44, 34, 35, 36, 37, 40, 41, + 39, 38, 7, 42, 58, 0, 0, 109, 0, 110, 96, 95, 0, 0, 0, + 0, 0, 0, 328, 0, 0, 329, 350, 352, 0, 323, 0, 332, 0, 333, + 0, 337, 0, 354, 347, 350, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 334, 348, 350, 0, + 341, 338, 352, 352, 57, 0, 0, 50, 0, 0, 46, 48, 49, 0, 55, + 0, 5, 3, 0, 0, 179, 0, 180, 0, 269, 0, 370, 58, 9, 0, + 0, 0, 286, 283, 0, 284, 0, 307, 0, 0, 0, 45, 83, 84, 111, + 0, 0, 0, 0, 170, 0, 325, 326, 327, 0, 350, 352, 344, 353, 330, + 331, 27, 335, 351, 0, 345, 352, 0, 150, 148, 149, 0, 0, 0, 0, + 0, 0, 0, 0, 118, 0, 0, 0, 0, 0, 0, 0, 112, 0, 98, + 99, 97, 176, 174, 115, 0, 113, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 100, 105, 106, 108, 89, 90, 91, 92, 94, 93, 107, 352, 0, 342, + 346, 56, 58, 80, 53, 66, 71, 65, 69, 75, 70, 0, 0, 0, 67, + 0, 9, 52, 0, 182, 0, 191, 0, 0, 373, 0, 0, 22, 0, 9, + 372, 281, 289, 293, 0, 0, 0, 0, 9, 43, 0, 0, 0, 168, 0, + 0, 131, 336, 343, 0, 355, 339, 0, 0, 143, 0, 0, 0, 133, 134, + 145, 151, 147, 135, 136, 137, 138, 120, 119, 0, 0, 116, 114, 0, 0, + 177, 175, 0, 0, 0, 0, 0, 0, 322, 350, 51, 67, 67, 67, 0, + 60, 47, 0, 68, 0, 0, 0, 375, 0, 31, 0, 192, 193, 0, 0, + 187, 54, 271, 272, 270, 0, 10, 0, 26, 9, 365, 371, 0, 295, 294, + 0, 288, 0, 298, 0, 0, 298, 0, 11, 131, 172, 0, 171, 0, 28, + 0, 0, 0, 0, 0, 0, 153, 156, 154, 155, 152, 0, 0, 0, 127, + 123, 0, 0, 117, 0, 0, 0, 101, 352, 74, 72, 73, 0, 0, 76, + 0, 100, 0, 0, 0, 0, 191, 0, 0, 0, 0, 0, 0, 196, 0, + 0, 0, 363, 364, 290, 291, 0, 0, 0, 315, 308, 0, 298, 316, 0, + 0, 20, 169, 173, 0, 140, 0, 139, 0, 0, 157, 158, 146, 0, 121, + 0, 0, 129, 125, 104, 102, 103, 324, 63, 61, 0, 0, 59, 77, 0, + 0, 0, 32, 0, 0, 0, 189, 249, 249, 249, 238, 249, 228, 231, 249, + 249, 0, 249, 249, 249, 240, 252, 239, 249, 252, 249, 0, 249, 252, 229, + 230, 237, 249, 252, 0, 249, 232, 204, 0, 0, 0, 0, 0, 191, 0, + 190, 0, 23, 15, 0, 318, 317, 0, 0, 312, 311, 0, 297, 0, 0, + 0, 22, 0, 0, 144, 159, 163, 160, 161, 162, 167, 165, 164, 166, 128, + 0, 124, 0, 0, 78, 0, 62, 100, 278, 0, 0, 0, 0, 254, 235, + 217, 257, 254, 254, 0, 254, 254, 254, 253, 257, 254, 257, 254, 0, 254, + 257, 254, 257, 0, 257, 198, 0, 0, 0, 0, 0, 186, 197, 249, 0, + 16, 17, 24, 292, 0, 0, 0, 0, 309, 277, 18, 13, 21, 26, 142, + 141, 122, 130, 126, 0, 0, 0, 261, 191, 0, 223, 233, 227, 225, 247, + 0, 226, 221, 222, 244, 220, 243, 224, 0, 219, 242, 218, 241, 0, 234, + 0, 211, 0, 0, 214, 206, 0, 0, 0, 0, 201, 200, 0, 15, 264, + 0, 0, 320, 319, 314, 313, 0, 0, 0, 12, 0, 29, 79, 0, 0, + 188, 0, 250, 255, 256, 0, 0, 0, 257, 257, 236, 205, 215, 207, 208, + 210, 209, 213, 212, 0, 202, 203, 199, 273, 25, 299, 310, 0, 19, 14, + 0, 8, 279, 0, 0, 0, 259, 248, 245, 246, 0, 0, 276, 30, 262, + 251, 258, 216, 274}; /* YYPDEFGOTO[NTERM-NUM]. */ -static const short int yydefgoto[] = -{ - -1, 10, 11, 12, 332, 511, 668, 657, 727, 600, - 421, 579, 157, 764, 404, 42, 130, 131, 220, 221, - 222, 343, 327, 223, 340, 429, 341, 424, 585, 586, - 224, 277, 178, 525, 270, 463, 456, 142, 13, 98, - 14, 409, 410, 33, 485, 486, 645, 569, 687, 623, - 634, 682, 683, 15, 16, 34, 324, 413, 654, 17, - 18, 19, 110, 20, 349, 438, 21, 504, 40, 119, - 352, 507, 595, 440, 22, 89, 161, 153, 263, 90, - 91, 23, 35, 108, 217 -}; +static const short int yydefgoto[] = { + -1, 12, 13, 14, 357, 526, 685, 677, 744, 616, 443, 602, 164, 781, 430, + 46, 137, 138, 219, 220, 221, 225, 217, 222, 343, 424, 344, 419, 552, 553, + 98, 289, 185, 540, 282, 476, 469, 149, 15, 105, 16, 435, 436, 36, 507, + 508, 665, 592, 705, 643, 654, 700, 701, 17, 18, 37, 353, 439, 674, 19, + 20, 21, 117, 22, 363, 451, 23, 519, 44, 126, 366, 522, 611, 453, 24, + 92, 168, 160, 275, 93, 94, 25, 38, 115, 236, 26, 346}; /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing STATE-NUM. */ -#define YYPACT_NINF -470 -static const short int yypact[] = -{ - 1017, -256, -123, 330, -470, 33, -470, -80, -470, 511, - 509, -198, -470, -470, -470, -470, -470, -470, -470, -470, - -470, -470, -470, -470, -470, 165, 76, -470, 47, 76, - -470, -470, -470, 2, 101, 83, 105, 227, 105, 25, - 283, 241, 708, -87, -470, -470, -470, -470, 239, 1059, - 1088, 1059, 1059, 92, -37, 247, -86, 151, 268, -13, - -470, -470, -470, 95, 286, 294, 5, 174, 210, 26, - 216, 174, -470, 50, -470, -11, -8, -7, 13, 29, - 34, 38, 42, 44, 48, 56, 61, 1117, 1793, -213, - 70, -91, -470, 35, 46, -470, 258, 384, 408, 409, - 410, 76, 411, 85, 18, -470, -470, -470, -98, 332, - 430, 276, 432, 105, -470, -470, -470, -470, -470, 433, - 333, -470, -470, -470, -470, -470, -470, -470, -470, -470, - 335, 109, 2254, 79, 1059, 1124, 99, 1124, -470, -470, - 1059, 1835, -40, 446, 447, 450, -470, 261, 452, -470, - 216, 286, 453, -470, -88, -470, 1059, -470, 448, -470, - 455, 466, -470, 216, 1059, 12, 1059, 1059, 873, 1059, - 1059, 1117, 1059, 1059, 1059, 1059, 1117, 254, 125, 1059, - 1059, 1059, 1059, 1059, 43, 142, 180, 1059, 850, 1059, - 1059, 1059, 1059, 1059, 1059, 1059, 1059, 1059, 1059, -470, - -470, 216, 377, -470, -470, 286, 286, -470, -470, 1117, - 388, -470, 150, -470, 488, 259, 468, -470, 28, 4, - 252, 106, -470, -470, 457, -196, 19, 495, -470, -470, - 496, -470, 497, -192, 1117, 19, 901, -470, 500, -470, - -470, -470, 2303, 261, 1856, -39, 1059, -470, 1059, -470, - -470, -470, 163, 216, 286, -470, -470, -470, -470, 1665, - -470, -470, 1059, -470, 286, 752, -470, -470, -470, 1448, - 1059, 1686, 1707, 167, 1469, 1490, 2211, 1728, 170, 1511, - 1532, 1553, 1574, 172, 1117, -470, 90, 2323, 1352, 2341, - 2359, 2359, -470, 21, -470, 926, 1059, 1059, 166, 2276, - 181, 187, 190, 926, 592, 615, 719, 836, 379, 379, - 503, 503, 503, 503, -470, 286, 531, -470, -470, 292, - -470, 535, -107, -24, 381, -470, 86, -470, 23, 199, - 200, 1059, 380, -470, -470, -470, -83, -470, 19, 19, - 417, 383, -470, 546, 19, 252, 85, -470, 211, -48, - 548, 535, -120, 307, 252, -470, -470, 213, 1059, 1059, - -470, 2146, 1898, -470, -470, -470, 1059, 2303, -470, 1059, - 1059, -470, 1919, 429, 429, -470, -470, -470, 24, -470, - -470, -470, -470, -470, 90, -470, 217, 545, -470, -470, - 222, 223, 2359, 2359, 926, 1059, 261, 261, 261, 224, - -470, 216, 562, 229, 231, -470, -470, 565, 32, -470, - 457, -470, -470, -470, 567, 52, 572, -470, -470, 2303, - 529, 174, 383, 383, 454, 421, -470, 19, -470, 458, - -470, 252, -470, -470, 578, -470, -470, 579, -470, 566, - -134, 253, 248, 435, 593, 486, 449, 2303, 2167, -470, - 1059, 2303, 657, 1333, 1059, 1059, 257, 263, -470, -470, - -470, -470, -470, 264, 926, 260, -470, -470, 265, 267, - 801, 270, 284, 285, -470, 286, 271, 535, -23, -112, - 1908, -46, 262, 287, 494, 289, 290, 261, 291, -470, - 1059, -470, -470, -470, 19, 1059, -133, 19, -470, -470, - 295, 951, 474, 632, -470, -470, 979, 435, -470, 308, - 600, 538, -470, 2303, 1059, -470, 1059, -470, 1595, 1814, - -470, -470, -470, 309, 313, 311, 926, -470, -470, -470, - -470, -470, -470, 649, -470, 637, 501, 32, -470, 318, - 318, 318, -470, 318, -470, -470, 318, 318, 320, 318, - 318, 318, -470, 618, -470, 318, 618, 318, 323, 318, - 618, -470, -470, -470, 318, 618, 324, 318, -470, -470, - 325, 334, 535, 535, 348, -103, 32, -470, 686, -470, - 2188, -470, 2303, 1059, 350, -470, -470, -108, 690, -470, - 2303, 568, 675, 362, 1749, 363, -470, 699, 1059, 1059, - 380, 1616, 1637, -470, -470, -470, -470, -470, -470, -470, - -470, -470, -470, -470, 1117, -470, 364, 366, 520, 705, - 714, 382, 713, -470, -470, -470, -470, -470, -470, 716, - -470, -470, -470, -470, -470, -470, -470, -470, 716, -470, - -470, -470, -470, 717, -470, 274, 535, 535, 387, 389, - 535, -470, -470, 318, 390, -470, -470, 391, 2303, 535, - -470, -470, 490, 1002, 979, 979, 397, 547, 482, 1772, - 2303, 174, -470, -470, -470, -470, -470, 718, 570, -470, - -94, -203, -156, 67, -156, -156, -470, -121, -156, -156, - -156, 67, -156, 67, -156, -96, -156, 67, -156, 67, - 398, 67, 584, -470, 746, 386, -470, -470, 622, -118, - 412, 413, -470, -470, 414, 36, -470, 1059, 415, 548, - -470, 2303, -470, -470, 248, 738, 581, -470, 1059, 633, - 776, 626, -470, 780, -470, -470, -470, 597, 785, 792, - -470, -470, -470, -470, -470, -470, -470, -470, -470, -470, - -470, 535, -470, -470, -470, 451, -470, -470, 459, -470, - 791, -470, -470, 535, -470, -470, 797, 460, 799, -470, - -470, 67, 67, 462, 686, -470, -470, -470, -470, -470, - -470, -470 -}; +#define YYPACT_NINF -489 +static const short int yypact[] = { + 1900, -270, -112, 871, -489, 106, 10, -489, -70, -489, 589, 13, + 1873, -199, -489, -489, -489, -489, -489, -489, -489, -489, -489, -489, + -489, -489, -489, -489, 147, 38, -489, 0, 38, -489, -489, -489, + -32, 79, 9, -489, 113, 238, 113, 59, 164, 276, 426, -39, + -489, -489, -489, -489, 286, 1064, 1088, 1064, 1064, 491, 15, 305, + -19, 217, 328, 32, -489, -489, -489, 137, 321, 349, 56, 228, + 262, 68, 267, 228, -489, 97, 44, 49, 50, 51, 53, 54, + 55, 72, 73, 77, 78, 83, 1119, 1816, -157, 109, -140, -2, + 11, 214, 380, -489, 87, 88, -489, 319, 409, 432, 433, 435, + 38, 436, 101, 20, -489, -489, -489, -100, 342, 439, 287, 442, + 113, -489, -489, -489, -489, -489, 444, 341, -489, -489, -489, -489, + -489, -489, -489, -489, -489, 347, 111, 2424, 120, 1064, 813, 115, + 813, -489, -489, 1064, 1865, 8, 458, 460, 461, -489, 271, 463, + -489, 267, 321, 464, -489, -212, -489, 1064, -489, 459, -489, 467, + 457, -489, 267, 1064, 14, 1064, 1064, 688, 1064, 1064, 1119, 1064, + 1064, 1064, 1064, 1119, 1186, 132, 1064, 1064, 1064, 1064, 1064, 45, + 134, 230, 1064, 52, 1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064, + 1064, 1064, -489, -489, 267, 365, -489, -489, 321, 321, -489, 472, + 473, -489, 138, 139, 399, -489, -489, 1064, -489, 475, -489, -489, + 1119, 382, -489, 142, -489, 482, 251, 462, -489, 25, 245, -206, + 13, 487, -489, -489, 488, -489, 489, -129, 1119, 13, 855, -489, + -489, -489, 2508, 271, 2124, 19, 1064, -489, 1064, -489, -489, -489, + 152, 267, 321, -489, -489, -489, -489, 1678, -489, -489, 1064, -489, + 321, 1657, -489, -489, -489, 1333, 1064, 1699, 1720, 159, 1354, 1450, + 2459, 1747, 161, 1471, 1492, 1518, 1541, 162, 1119, -489, 231, 2169, + 2294, 1951, 2027, 2027, -489, 24, -489, 927, 1064, 1064, 165, 2481, + 170, 174, 177, 927, 784, 1246, 744, 375, 261, 261, 474, 474, + 474, 474, -489, 321, 515, -489, -489, -489, 47, -489, -489, -489, + -489, -489, -489, -30, -489, 13, 13, 392, 357, 2529, -218, -489, + 278, -489, 523, -108, 35, 368, -489, 131, 1064, 364, 13, 245, + 101, -489, 194, -44, 529, 523, -111, 288, 245, -489, 195, 1064, + 1064, -489, 2275, 2145, -489, -489, -489, 1064, 2508, -489, 1064, 1064, + -489, 2227, 410, 410, -489, -489, -489, 233, -489, -489, -489, -489, + -489, 231, -489, 198, 525, -489, -489, 202, 203, 2027, 2027, 927, + 1064, 271, 271, 271, 205, -489, 267, -489, 357, 357, 357, 417, + 390, -489, 13, -489, 420, 52, 1064, -489, 547, 215, 211, -489, + -489, 559, 5, -489, 380, -489, -489, -489, 573, 2508, 530, 228, + 245, -489, -489, 574, -489, -489, 576, -489, 560, -134, 241, 243, + 427, 584, 478, 545, 2508, 2372, -489, 1064, 2508, 1209, 1312, 1064, + 1064, 250, 257, -489, -489, -489, -489, -489, 258, 927, 260, -489, + -489, 268, 269, 1220, 270, 275, 282, -489, 321, -489, -489, -489, + 13, 1064, -118, 13, 69, 2550, 263, 523, 39, -83, 2133, 93, + 277, 284, 471, 285, 265, 271, 289, 1064, -489, -489, -489, 291, + 724, 465, 624, -489, -489, 957, 427, -489, 293, 585, 520, -489, + 2508, 1064, -489, 1064, -489, 1562, 1842, -489, -489, -489, 296, 304, + 302, 927, -489, -489, -489, -489, -489, -489, -489, 2508, 1064, 306, + -489, -489, -110, 52, 640, -489, 625, 495, 5, -489, 307, 307, + 307, -489, 307, -489, -489, 307, 307, 308, 307, 307, 307, -489, + 606, -489, 307, 606, 307, 315, 307, 606, -489, -489, -489, 307, + 606, 320, 307, -489, -489, 322, 323, 523, 523, 324, -78, 5, + -489, 656, -489, 2393, 658, -489, 2508, 534, 647, 330, 1774, 333, + -489, 665, 1064, 1064, 364, 1583, 1604, -489, -489, -489, -489, -489, + -489, -489, -489, -489, -489, -489, 1119, -489, 335, 336, 2508, 523, + -489, 193, 490, 673, 676, 343, 678, -489, -489, -489, -489, -489, + -489, 681, -489, -489, -489, -489, -489, -489, -489, -489, 681, -489, + -489, -489, -489, 682, -489, 240, 523, 523, 345, 348, 523, -489, + -489, 307, 356, -489, -489, 352, -489, 466, 981, 957, 957, 361, + 511, 456, 1795, 2508, 228, -489, -489, -489, -489, -489, 362, 687, + 542, -489, -76, -45, -178, 210, -178, -178, -489, -37, -178, -178, + -178, 210, -178, 210, -178, -29, -178, 210, -178, 210, 370, 210, + 558, -489, 712, 249, -489, -489, 587, -98, 376, 378, -489, -489, + 381, 71, -489, 1064, 529, -489, 2508, -489, -489, 243, 701, 535, + -489, 1064, 595, -489, 719, 577, -489, 733, -489, -489, -489, 543, + 740, 741, -489, -489, -489, -489, -489, -489, -489, -489, -489, -489, + -489, 523, -489, -489, -489, 406, -489, 408, -489, 757, -489, -489, + 523, -489, -489, 762, 437, 773, -489, -489, 210, 210, 438, 656, + -489, -489, -489, -489, -489, -489, -489}; /* YYPGOTO[NTERM-NUM]. */ -static const short int yypgoto[] = -{ - -470, -470, 794, -89, -285, -470, 84, 128, -470, -470, - 249, 131, -70, -470, -348, -470, 580, -470, -132, -470, - -299, 440, -126, -470, -470, -119, -470, -470, -470, 282, - -470, -9, -163, 238, -470, -470, 498, 729, -470, 51, - -470, -434, -470, 866, -469, -470, -470, -470, 235, -170, - -347, -311, -385, -470, -470, -470, -470, -470, 108, -470, - -470, -470, 10, -470, -470, -470, -470, -392, -470, -470, - -470, 160, -352, 169, -470, -470, -116, -140, -470, -470, - -470, -470, -470, 781, 540 -}; +static const short int yypgoto[] = { + -489, -489, 735, -81, -216, -489, 36, 57, -489, -489, 166, + 42, -73, -489, -360, -489, 533, -489, -40, -489, -8, 350, + -105, -489, -489, -102, -489, -489, -489, 244, -489, -10, -174, + 154, -489, -489, 398, 644, -489, 34, -489, -488, -489, 789, + -452, -489, -489, -489, 141, 586, -380, -381, -337, -489, -489, + -489, -489, -489, 37, -489, -489, -489, 6, -489, -489, -489, + -489, -404, -489, -489, -489, 60, -398, 61, -489, -489, -146, + -131, -489, -489, -489, -489, -489, 689, 469, -489, -489}; /* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If positive, shift that token. If negative, reduce the rule which number is the opposite. If YYTABLE_NINF, syntax error. */ -#define YYTABLE_NINF -371 -static const short int yytable[] = -{ - 88, 162, 435, 441, 350, 405, 239, 328, 278, 226, - 405, 255, 750, 283, 405, 43, 44, 45, 46, 47, - 48, 218, 328, 405, 502, 583, 237, 388, 458, 49, - 50, 237, 143, 132, 254, 480, 51, 52, 344, 425, - 135, 137, 138, 139, 141, 538, 319, 264, 112, 292, - 583, 508, 422, 246, 359, 237, 247, 360, 252, 266, - 432, 293, 238, 53, 201, 317, 318, 238, 621, 445, - 406, 353, 570, 8, 735, 406, 655, 459, 177, 406, - 100, 24, 240, 202, 571, 315, 103, 330, 406, 415, - 25, 238, 203, 736, 345, 43, 44, 45, 46, 47, - 48, 584, 36, 354, 423, 241, 386, 652, 387, 49, - 50, 56, 325, 460, 365, 596, 51, 52, 442, 656, - 737, 385, 738, 232, 368, 242, 584, 461, 496, 534, - 329, 244, 391, 41, 535, 733, 436, 364, 734, 95, - 481, 651, 227, 53, 536, 267, 498, 259, 351, 462, - 482, 37, 214, 144, 357, 265, 269, 271, 272, 274, - 275, 276, 483, 279, 280, 281, 282, 177, 96, 333, - 287, 288, 289, 290, 291, 400, 145, 389, 299, 304, - 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, - 104, 56, 97, 296, 297, 581, 298, 8, 587, 294, - 105, 484, 248, 248, 503, 99, 390, 426, 205, 636, - 257, 258, 431, 640, 399, 101, 206, 739, 642, 102, - 740, 109, 751, 38, 648, 649, 334, 132, 537, 106, - 111, 469, 407, 408, -182, -64, 268, 361, 113, 362, - 227, 335, 739, -183, 120, 741, 732, 31, 134, 691, - 146, 693, 133, 367, 147, 697, 107, 699, 148, 701, - 336, 372, -370, 443, 179, 180, 181, 182, 183, 184, - 185, 149, 186, 187, 188, 189, 190, 191, 192, 193, - 194, 195, 196, 197, 198, 475, 150, 392, 393, 489, - 437, 151, 702, 337, 177, 411, 412, 154, 710, 711, - 152, 524, 714, 492, 493, 468, 155, 471, 472, 473, - 156, 718, 722, 723, 158, 703, 684, 685, 338, 688, - 689, 690, 419, 160, 692, 159, 694, 163, 696, 164, - 698, 704, 165, 166, 140, 532, 75, 76, 77, 78, - 79, 80, 81, 82, 219, 83, 84, 85, 86, 447, - 448, 491, 87, 167, 705, 771, 772, 451, 219, 219, - 452, 453, 416, 617, 114, 209, -58, 326, 204, 168, - 624, 625, 207, 626, 169, 523, 627, 628, 170, 630, - 631, 632, 171, 208, 172, 635, 470, 637, 173, 639, - 745, 746, 747, 748, 641, 115, 174, 644, 577, 26, - 116, 175, 210, 773, 706, 194, 195, 196, 197, 198, - 117, 211, 212, 213, 215, 776, 75, 76, 77, 78, - 79, 80, 81, 82, 216, 83, 84, 85, 86, 118, - 707, 228, 87, 229, 230, 231, 233, 616, 27, 243, - 234, 513, 235, 708, 339, 518, 519, 236, -263, 249, - 250, 524, -129, 251, 8, 253, 260, 256, 261, -129, - -129, -129, -129, -129, -129, -129, 286, -129, -129, -129, - -129, -129, -129, -129, -129, -129, -129, -129, -129, -129, - 262, 580, 295, 715, 316, -129, 582, 320, -129, -129, - 321, 322, 590, 323, 325, 331, 342, 594, 346, 347, - 348, 28, 709, 356, 363, 601, 394, 602, 375, 92, - 93, 379, -129, 384, 43, 44, 45, 46, 47, 48, - 29, 396, -129, -129, -129, -129, -129, 397, 49, 50, - 398, 30, -129, 198, 401, 51, 52, 402, 403, 414, - 417, 418, -129, 420, 31, -129, 427, 2, 428, 430, - 434, 439, 444, -129, 446, 455, -129, 464, 32, -129, - -129, 465, 53, 466, 467, 474, 476, 477, 479, -129, - 488, 3, 478, 54, 658, 415, -129, 490, -129, 495, - 55, 499, 500, 494, -129, -129, 501, 497, 506, 669, - 670, 4, 284, 502, 505, 285, 510, 509, 520, 5, - 526, 729, 572, -129, 521, 522, 527, -129, 528, 533, - 56, 529, -129, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, -129, 574, 530, 531, 573, 576, 57, - 575, 578, 6, 591, 588, 592, -129, 190, 191, 192, - 193, 194, 195, 196, 197, 198, 597, 7, 598, 599, - 613, 614, 615, 618, 721, 594, 594, 619, 622, 620, - 629, -129, 633, 638, 643, 646, -129, 179, 180, 181, - 182, 183, 184, 185, 647, 186, 187, 188, 189, 190, - 191, 192, 193, 194, 195, 196, 197, 198, 650, 653, - 659, -129, -129, 661, -129, 663, -129, -129, 662, -129, - 664, 58, 8, 667, 666, 675, 9, 676, 580, 677, - 678, 43, 44, 45, 46, 47, 48, 679, 681, 669, - 686, 719, 700, 680, 59, 49, 50, 726, 712, 717, - 713, 716, 51, 52, 121, 724, 725, 731, 730, 742, - 743, 122, 191, 192, 193, 194, 195, 196, 197, 198, - 744, -129, 749, 752, 753, 754, 757, -129, 760, 53, - 763, 514, 179, 180, 181, 182, 183, 184, 185, 761, - 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 765, 766, 767, -129, -129, 769, 774, - -129, 60, 61, 768, 123, 124, 770, 503, 775, 62, - 777, 778, 779, 780, 94, 63, 64, 56, 65, -355, - -357, 66, 762, 67, 68, 69, 355, 70, -357, 71, - 125, 188, 189, 190, 191, 192, 193, 194, 195, 196, - 197, 198, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 755, 83, 84, 85, 86, 756, 671, - 487, 87, 674, 43, 44, 45, 46, 47, 48, 369, - 192, 193, 194, 195, 196, 197, 198, 49, 50, 660, - 245, 39, 457, 695, 51, 52, 43, 44, 45, 46, - 47, 48, 781, 300, 759, 225, 433, 301, 758, 0, - 49, 50, 0, 0, 0, 0, 0, 51, 52, 273, - 0, 53, 0, 0, 43, 44, 45, 46, 47, 48, - 0, 0, 0, 0, 0, 126, 127, 128, 49, 50, - 129, 0, 0, 0, 53, 51, 52, 121, 0, 43, - 44, 45, 46, 47, 48, 0, 0, 0, 0, 0, - 0, 0, 0, 49, 50, 0, 0, 0, 0, 56, - 51, 52, 53, 0, 43, 44, 45, 46, 47, 48, - 0, 0, 0, 0, 0, 0, 0, 0, 49, 50, - 0, 0, 56, 0, 0, 51, 52, 53, 0, 0, - 0, 0, 43, 44, 45, 46, 47, 48, 0, 0, - 0, 0, 0, 0, 0, 0, 49, 50, 515, 0, - 56, 0, 53, 51, 52, 43, 44, 45, 46, 47, - 48, 0, 0, 0, 0, 0, 0, 0, 1, 49, - 50, 0, 0, 0, 0, 56, 51, 52, 0, 0, - 53, 589, 75, 76, 77, 78, 79, 80, 81, 82, - 0, 83, 84, 85, 86, 0, 0, 0, 87, 302, - 56, 0, 0, 53, 0, 2, 0, 0, 0, 593, - 0, 0, 43, 44, 45, 46, 47, 48, 0, 0, - 0, 0, 0, 0, 0, 0, 49, 50, 56, 3, - 0, 0, 720, 51, 52, 0, 0, 0, 0, 0, - 370, 43, 44, 45, 46, 47, 48, 0, 0, 4, - 0, 56, 0, 0, 0, 49, 50, 5, 0, 0, - 53, 0, 51, 52, 0, 0, 0, 0, 0, 8, - 43, 44, 45, 46, 47, 48, 0, 0, 0, 0, - 0, 0, 0, 0, 49, 50, 0, 0, 0, 53, - 6, 51, 52, 187, 188, 189, 190, 191, 192, 193, - 194, 195, 196, 197, 198, 7, 0, 0, 56, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 53, 0, - 0, 0, 0, 0, 75, 76, 77, 78, 79, 80, - 81, 82, 0, 83, 84, 85, 86, 136, 0, 0, - 303, 0, 0, 0, 0, 0, 0, 75, 76, 77, - 78, 79, 80, 81, 82, 0, 83, 84, 85, 86, - 8, 0, 0, 87, 9, 0, 56, 0, 0, 0, - 0, 0, 0, 0, 0, 75, 76, 77, 78, 79, - 80, 81, 82, 0, 83, 84, 85, 86, 0, 0, - 0, 87, 0, 0, 0, 0, 0, 0, 0, 0, - 75, 76, 77, 78, 79, 80, 81, 82, 0, 83, - 84, 85, 86, 0, 0, 0, 176, 0, 0, 0, - 0, 0, 0, 0, 0, 75, 76, 77, 78, 79, - 80, 81, 82, 0, 83, 84, 85, 86, 0, 0, - 0, 87, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 75, 76, 77, 78, 79, 80, 81, - 82, 0, 83, 84, 85, 86, 0, 0, 0, 87, - 0, 0, 0, 0, 0, 0, 75, 76, 77, 78, - 79, 80, 81, 82, 0, 83, 84, 85, 86, 0, - 0, 0, 87, 179, 180, 181, 182, 183, 184, 185, - 0, 186, 187, 188, 189, 190, 191, 192, 193, 194, - 195, 196, 197, 198, 181, 182, 183, 184, 185, 0, - 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 75, 76, 77, 78, 79, 80, 81, - 82, 0, 83, 84, 85, 86, 0, 0, 0, 87, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 75, 76, 77, 78, 79, 80, 81, 82, - 0, 83, 84, 85, 86, 0, 0, 0, 87, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 75, 76, 77, 78, 79, 80, 81, 82, 0, - 83, 84, 85, 86, 0, 0, 0, 176, 179, 180, - 181, 182, 183, 184, 185, 0, 186, 187, 188, 189, - 190, 191, 192, 193, 194, 195, 196, 197, 198, 179, - 180, 181, 182, 183, 184, 185, 0, 186, 187, 188, - 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, - 179, 180, 181, 182, 183, 184, 185, 0, 186, 187, - 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, - 198, 179, 180, 181, 182, 183, 184, 185, 0, 186, - 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, - 197, 198, 179, 180, 181, 182, 183, 184, 185, 0, - 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 179, 180, 181, 182, 183, 184, 185, - 0, 186, 187, 188, 189, 190, 191, 192, 193, 194, - 195, 196, 197, 198, 179, 180, 181, 182, 183, 184, - 185, 0, 186, 187, 188, 189, 190, 191, 192, 193, - 194, 195, 196, 197, 198, 179, 180, 181, 182, 183, - 184, 185, 0, 186, 187, 188, 189, 190, 191, 192, - 193, 194, 195, 196, 197, 198, 179, 180, 181, 182, - 183, 184, 185, 0, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 179, 180, 181, - 182, 183, 184, 185, 0, 186, 187, 188, 189, 190, - 191, 192, 193, 194, 195, 196, 197, 198, 0, 0, - 0, 516, 0, 0, 517, 179, 180, 181, 182, 183, - 184, 185, 0, 186, 187, 188, 189, 190, 191, 192, - 193, 194, 195, 196, 197, 198, 179, 180, 181, 182, - 183, 184, 185, 0, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 179, 180, 181, - 182, 183, 184, 185, 0, 186, 187, 188, 189, 190, - 191, 192, 193, 194, 195, 196, 197, 198, 179, 180, - 181, 182, 183, 184, 185, 0, 186, 187, 188, 189, - 190, 191, 192, 193, 194, 195, 196, 197, 198, 179, - 180, 181, 182, 183, 184, 185, 0, 186, 187, 188, - 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, - 0, 0, 179, 180, 181, 182, 183, 184, 185, 371, - 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 179, 180, 181, 182, 183, 184, 185, - 376, 186, 187, 188, 189, 190, 191, 192, 193, 194, - 195, 196, 197, 198, 179, 180, 181, 182, 183, 184, - 185, 377, 186, 187, 188, 189, 190, 191, 192, 193, - 194, 195, 196, 197, 198, 179, 180, 181, 182, 183, - 184, 185, 380, 186, 187, 188, 189, 190, 191, 192, - 193, 194, 195, 196, 197, 198, 179, 180, 181, 182, - 183, 184, 185, 381, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 604, 605, 606, - 607, 608, 0, 0, 382, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 179, 180, - 181, 182, 183, 184, 185, 383, 186, 187, 188, 189, - 190, 191, 192, 193, 194, 195, 196, 197, 198, 179, - 180, 181, 182, 183, 184, 185, 603, 186, 187, 188, - 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, - 0, 539, 540, 541, 542, 0, 0, 672, 0, 0, - 0, 543, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 673, 544, - 545, 0, 0, 0, 0, 0, 546, 0, 609, 0, - 0, 0, 0, 0, 0, 0, 0, 547, 0, 0, - 0, 0, 0, 366, 0, 548, 0, 0, 0, 0, - 0, 549, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 373, 0, 454, 0, 0, 0, - 0, 0, 550, 551, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 374, 0, 0, 0, 0, - 0, 552, 553, 0, 0, 0, 554, 555, 556, 0, - 0, 610, 611, 0, 612, 0, 284, 0, 0, 0, - 0, 0, 0, 358, 0, 0, 0, 140, 0, 0, - 0, 0, 0, 0, 0, 557, 0, 665, 0, 0, - 0, 0, 0, 0, 0, 199, 0, 0, 0, 0, - 0, 200, 0, 0, 558, 0, 559, 0, 0, 0, - 728, 0, 0, 0, 0, 450, 0, 0, 0, 0, - 0, 0, 0, 560, 0, 0, 561, 562, 563, 564, - 565, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 566, 567, 0, - 0, 0, 0, 0, 0, 568, 179, 180, 181, 182, - 183, 184, 185, 0, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 179, 180, 181, - 182, 183, 184, 185, 0, 186, 187, 188, 189, 190, - 191, 192, 193, 194, 195, 196, 197, 198, 179, 180, - 181, 182, 183, 184, 185, 0, 186, 187, 188, 189, - 190, 191, 192, 193, 194, 195, 196, 197, 198, 0, - 0, 179, 180, 181, 182, 183, 184, 185, 655, 186, - 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, - 197, 198, 449, 0, 0, 0, 0, 0, 0, 0, - 378, 0, 0, 0, 0, 0, 0, 237, 0, 0, - 0, 0, 0, 512, 179, 180, 181, 182, 183, 184, - 185, 656, 186, 187, 188, 189, 190, 191, 192, 193, - 194, 195, 196, 197, 198, 0, 179, 180, 181, 182, - 183, 184, 185, 238, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 0, 0, 0, - 0, 0, 395, 179, 180, 181, 182, 183, 184, 185, - 0, 186, 187, 188, 189, 190, 191, 192, 193, 194, - 195, 196, 197, 198, 180, 181, 182, 183, 184, 185, - 0, 186, 187, 188, 189, 190, 191, 192, 193, 194, - 195, 196, 197, 198, 182, 183, 184, 185, 0, 186, - 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, - 197, 198, -371, -371, -371, -371, 0, 186, 187, 188, - 189, 190, 191, 192, 193, 194, 195, 196, 197, 198 -}; +#define YYTABLE_NINF -378 +static const short int yytable[] = { + 91, 214, 169, 97, 290, 454, 448, 240, 502, 295, 431, 266, + 110, 561, 95, 218, 95, 47, 48, 49, 50, 51, 52, 237, + 276, 517, 356, 267, 214, 358, 401, 53, 54, 768, 251, 431, + 139, 215, 55, 56, 431, 550, 431, 142, 144, 145, 146, 148, + 119, 550, 214, 304, 523, 753, 348, 47, 48, 49, 50, 51, + 52, 278, 327, 305, 215, 57, 107, 27, 364, 53, 54, 238, + 754, 264, 367, 432, 55, 56, 416, 40, 184, 329, 330, 9, + 150, 312, 215, 269, 270, 313, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 432, 258, 28, 57, 259, 432, 417, 432, + 641, 39, 671, 675, 372, 60, 503, 373, 111, 551, 612, 377, + 426, 208, 398, 252, 504, 551, 246, 112, 455, 41, 427, 254, + 241, 404, 332, 378, 505, 256, 102, 557, 209, 449, 233, 445, + 45, 381, 253, 672, 279, 210, 103, 60, 458, 271, 676, 104, + 113, 354, 418, 106, 212, 277, 281, 283, 284, 286, 287, 288, + 213, 291, 292, 293, 294, 184, 370, 506, 299, 300, 301, 302, + 303, 402, 108, 114, 311, 316, 317, 318, 319, 320, 321, 322, + 323, 324, 325, 326, 413, 558, 109, 656, 359, 42, 306, 660, + 518, 9, 151, 559, 662, 368, 750, 365, 593, 345, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 594, 34, 403, 415, + 513, 152, 116, 433, 434, 482, 412, 668, 669, 471, 241, 280, + 139, 118, 769, 308, 309, 121, 310, 399, 374, 400, 375, 260, + 314, 763, 764, 765, 766, 560, 720, -184, -372, -185, 260, 755, + 380, 756, 702, 703, 488, 706, 707, 708, 385, 120, 710, 694, + 712, 122, 714, 127, 716, 721, 123, 739, 740, 456, 472, 201, + 202, 203, 204, 205, 124, 751, 450, 141, 752, 722, 405, 406, + 140, 757, 421, 539, 758, 184, 728, 729, 153, 757, 732, 125, + 759, -376, 489, 490, 491, 709, 444, 711, 723, 154, 473, 715, + 155, 717, 481, 719, 484, 485, 486, 156, 157, 420, 158, 159, + 474, 216, 78, 79, 80, 81, 82, 83, 84, 85, 441, 86, + 87, 88, 89, 96, 161, 96, 90, 437, 438, 547, 162, 475, + 96, 460, 461, -58, 355, 163, 165, 633, 166, 464, 512, 724, + 465, 466, 167, 170, 78, 79, 80, 81, 82, 83, 84, 85, + 171, 86, 87, 88, 89, 172, 173, 174, 315, 175, 176, 177, + 538, 725, 483, 199, 200, 201, 202, 203, 204, 205, -376, -376, + 211, 790, 726, 223, 178, 179, 494, 496, 497, 180, 181, 224, + 793, 788, 789, 182, 226, 227, 228, 229, 600, 47, 48, 49, + 50, 51, 52, 230, 231, -377, 232, 234, 235, 242, 243, 53, + 54, 245, 244, 247, 248, 250, 55, 56, 128, 528, 249, 255, + 539, 533, 534, 129, 632, 261, 335, 262, 263, 9, 265, 272, + 268, 727, 273, 274, 328, 298, 307, 331, 332, 57, 347, 333, + 334, 349, 350, 549, 548, 351, 352, 554, 354, 356, 360, 361, + 362, 376, 47, 48, 49, 50, 51, 52, 388, 603, 392, 397, + 205, 407, 606, 336, 53, 54, 409, 610, 130, 131, 410, 55, + 56, 411, 414, 617, 337, 618, 422, 423, 428, 60, 429, 440, + 442, -64, -377, -377, 452, 447, 457, 338, 459, 468, 477, 132, + 634, 478, 57, 479, 480, 637, 487, 492, -131, 493, 495, 498, + 500, 499, 339, -131, -131, -131, -131, -131, -131, -131, 501, -131, + -131, -131, -131, -131, -131, -131, -131, -131, -131, -131, -131, -131, + 510, 514, 511, 515, 516, -131, 520, 521, -131, -131, 517, 340, + 524, 525, 60, 535, 47, 48, 49, 50, 51, 52, 536, 537, + 541, 556, 597, 599, 686, 687, 53, 54, -131, 542, 543, 544, + 341, 55, 56, 746, 545, 595, -131, -131, -131, -131, -131, 546, + 596, 607, 598, 608, -131, 601, 604, 613, 615, 614, 133, 134, + 135, 629, -131, 136, 57, -131, 630, 631, 638, 639, 635, 642, + 649, -131, 653, 58, -131, -131, 640, 658, -131, -131, 59, 673, + 663, 678, 666, 667, 670, 679, -131, 680, 681, 684, 738, 610, + 610, -131, 683, -131, 692, 693, 696, 697, 695, -131, -131, 699, + 698, 704, 730, 718, 60, 731, 735, 47, 48, 49, 50, 51, + 52, 734, 736, 741, -131, 742, 743, 747, -131, 53, 54, 748, + 61, -131, 749, 760, 55, 56, 285, 761, 762, 770, 767, 771, + -131, 777, 772, 780, 778, 603, 782, 47, 48, 49, 50, 51, + 52, -131, 147, 686, 783, 342, 784, 57, 785, 53, 54, 786, + 791, 787, 518, 101, 55, 56, 78, 79, 80, 81, 82, 83, + 84, 85, -131, 86, 87, 88, 89, -131, 792, 794, 90, 198, + 199, 200, 201, 202, 203, 204, 205, 57, 796, 774, 795, 797, + 62, 779, 688, 369, 691, 470, 509, 60, -131, -131, 773, -131, + 257, -131, -131, 43, -131, 775, 636, 713, 239, 776, 0, 63, + 605, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 78, + 79, 80, 81, 82, 83, 84, 85, 60, 86, 87, 88, 89, + 798, 446, 0, 90, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 0, 0, 0, 0, -131, 0, 0, 0, + 0, 0, -131, 0, 0, 0, 47, 48, 49, 50, 51, 52, + 0, 0, 0, 0, 0, 0, 64, 65, 53, 54, 0, 0, + 0, 0, 66, 55, 56, 128, -131, -131, 67, 68, -131, 69, + -356, -358, 70, 0, 71, 72, 73, 0, 74, -358, 75, 0, + 0, 0, 0, 0, 0, 0, 57, 0, 0, 0, 0, 76, + 77, 78, 79, 80, 81, 82, 83, 84, 85, 0, 86, 87, + 88, 89, 0, 0, 0, 90, 47, 48, 49, 50, 51, 52, + 0, 0, 0, 0, 29, 0, 0, 0, 53, 54, 0, 0, + 0, 0, 0, 55, 56, 0, 60, 0, 0, 0, 0, 0, + 47, 48, 49, 50, 51, 52, 0, 0, 0, 0, 0, 0, + 0, 0, 53, 54, 0, 0, 57, 0, 30, 55, 56, 0, + 47, 48, 49, 50, 51, 52, -265, 0, 0, 0, 0, 0, + 0, 0, 53, 54, 0, 0, 0, 0, 0, 55, 56, 0, + 57, 0, 0, 0, 78, 79, 80, 81, 82, 83, 84, 85, + 0, 86, 87, 88, 89, 0, 60, 0, 90, 0, 0, 0, + 57, 0, 0, 0, 0, 609, 0, 0, 0, 0, 0, 31, + 0, 0, 0, 0, 78, 79, 80, 81, 82, 83, 84, 85, + 60, 86, 87, 88, 89, 737, 32, 0, 90, 0, 0, 47, + 48, 49, 50, 51, 52, 33, 0, 0, 0, 0, 0, 0, + 60, 53, 54, 0, 0, 0, 34, 0, 55, 56, 0, 47, + 48, 49, 50, 51, 52, 0, 0, 0, 35, 0, 0, 0, + 0, 53, 54, 0, 0, 0, 0, 0, 55, 56, 0, 57, + 0, 0, 0, 0, 0, 9, 47, 48, 49, 50, 51, 52, + 0, 0, 0, 0, 0, 0, 0, 0, 53, 54, 0, 57, + 0, 0, 0, 55, 56, 0, 0, 0, 0, 644, 645, 0, + 646, 0, 0, 647, 648, 0, 650, 651, 652, 0, 0, 60, + 655, 0, 657, 0, 659, 0, 57, 0, 0, 661, 0, 0, + 664, 0, 0, 78, 79, 80, 81, 82, 83, 84, 85, 143, + 86, 87, 88, 89, 0, 0, 0, 90, 186, 187, 188, 189, + 190, 191, 192, 0, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 0, 60, 186, 187, 188, 189, 190, + 191, 192, 0, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 78, 79, 80, 81, 82, 83, 84, 85, 733, + 86, 87, 88, 89, 0, 0, 0, 183, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 0, 0, 0, 0, 78, 79, 80, + 81, 82, 83, 84, 85, 0, 86, 87, 88, 89, 0, 0, + 0, 90, 0, 0, 0, 0, 0, 0, 0, 78, 79, 80, + 81, 82, 83, 84, 85, 529, 86, 87, 88, 89, 0, 0, + 0, 90, 186, 187, 188, 189, 190, 191, 192, 0, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 186, + 187, 188, 189, 190, 191, 192, 0, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 186, 187, 188, 189, + 190, 191, 192, 0, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 0, 0, 0, 78, 79, 80, 81, + 82, 83, 84, 85, 0, 86, 87, 88, 89, 0, 0, 0, + 90, 0, 0, 0, 0, 0, 0, 0, 78, 79, 80, 81, + 82, 83, 84, 85, 0, 86, 87, 88, 89, 0, 0, 0, + 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 78, 79, 80, 81, 82, 83, 84, 85, 0, + 86, 87, 88, 89, 0, 0, 0, 183, 186, 187, 188, 189, + 190, 191, 192, 0, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 186, 187, 188, 189, 190, 191, 192, + 0, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 186, 187, 188, 189, 190, 191, 192, 0, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 0, + 296, 0, 0, 297, 186, 187, 188, 189, 190, 191, 192, 0, + 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 0, 530, 186, 187, 188, 189, 190, 191, 192, 0, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 186, 187, 188, 189, 190, 191, 192, 0, 193, 194, 195, 196, + 197, 198, 199, 200, 201, 202, 203, 204, 205, 186, 187, 188, + 189, 190, 191, 192, 0, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 186, 187, 188, 189, 190, 191, + 192, 0, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 531, 0, 0, 532, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 186, + 187, 188, 189, 190, 191, 192, 384, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 186, 187, 188, 189, + 190, 191, 192, 389, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 186, 187, 188, 189, 190, 191, 192, + 0, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 186, 187, 188, 189, 190, 191, 192, 0, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 0, + 0, 0, 0, 0, 0, 186, 187, 188, 189, 190, 191, 192, + 382, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 0, 0, 0, 0, 0, 0, 186, 187, 188, 189, + 190, 191, 192, 390, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 186, 187, 188, 189, 190, 191, 192, + 393, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 186, 187, 188, 189, 190, 191, 192, 394, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 0, + 0, 0, 0, 0, 186, 187, 188, 189, 190, 191, 192, 395, + 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 99, 100, 186, 187, 188, 189, 190, 191, 192, 396, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 0, 0, 0, 0, 0, 1, 0, 619, 0, 0, 0, 0, + 0, 0, 0, 2, 0, 0, 0, 620, 621, 622, 623, 624, + 0, 0, 0, 0, 689, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 3, 0, 0, 2, 0, 0, 0, 0, 0, + 0, 690, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, + 5, 0, 0, 0, 0, 0, 3, 6, 189, 190, 191, 192, + 0, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 4, 5, 0, 0, 0, 0, 0, 0, 6, 0, + 0, 0, 0, 383, 0, 7, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, + 379, 625, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 386, 0, 8, + -378, -378, -378, -378, 0, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 387, 0, 0, 0, 0, 0, + 0, 0, 0, 9, 0, 0, 0, 10, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, 0, 0, + 0, 0, 626, 627, 0, 628, 9, 0, 0, 0, 10, 0, + 0, 0, 0, 0, 0, 11, 0, 0, 147, 0, 0, 0, + 682, 0, 0, 0, 0, 0, 0, 206, 0, 0, 0, 0, + 0, 207, 0, 0, 0, 0, 0, 0, 11, 745, 186, 187, + 188, 189, 190, 191, 192, 0, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 186, 187, 188, 189, 190, + 191, 192, 0, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 562, 563, 564, 565, 187, 188, 189, 190, + 191, 192, 566, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 0, 0, 0, 0, 567, 568, 0, 0, + 0, 0, 0, 569, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 570, 0, 0, 0, 0, 0, 0, 0, 571, 0, + 0, 0, 0, 0, 572, 186, 187, 188, 189, 190, 191, 192, + 0, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 573, 574, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 575, 576, 0, + 0, 0, 577, 578, 579, 186, 187, 188, 189, 190, 191, 192, + 0, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 188, 189, 190, 191, 192, 580, 193, 194, 195, 196, + 197, 198, 199, 200, 201, 202, 203, 204, 205, 0, 0, 0, + 0, 0, 581, 0, 582, 0, 467, 0, 0, 0, 0, 0, + 0, 0, 371, 0, 0, 0, 0, 0, 0, 583, 0, 0, + 584, 585, 586, 587, 588, 0, 0, 0, 0, 0, 0, 463, + 0, 0, 0, 0, 0, 0, 0, 462, 0, 589, 590, 0, + 0, 0, 0, 0, 0, 591, 186, 187, 188, 189, 190, 191, + 192, 0, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 186, 187, 188, 189, 190, 191, 192, 0, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 0, 0, 0, 214, 0, 0, 0, 0, 0, 675, 186, 187, + 188, 189, 190, 191, 192, 0, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 0, 0, 0, 0, 0, + 0, 0, 0, 215, 0, 0, 0, 0, 527, 186, 187, 188, + 189, 190, 191, 192, 676, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 0, 186, 187, 188, 189, 190, + 191, 192, 391, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 0, 0, 0, 0, 0, 408, 186, 187, + 188, 189, 190, 191, 192, 0, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 186, 187, 188, 189, 190, + 191, 192, 0, 193, 194, 425, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 186, 187, 188, 189, 190, 191, 192, 0, + 193, 194, 555, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205}; /* YYCONFLP[YYPACT[STATE-NUM]] -- Pointer into YYCONFL of start of list of conflicting reductions corresponding to action entry for state STATE-NUM in yytable. 0 means no conflicts. The list in yyconfl is terminated by a rule number of 0. */ -static const unsigned char yyconflp[] = -{ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 129, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 9, 0, 0, 0, 0, 0, 0, 11, - 13, 15, 17, 19, 21, 23, 0, 25, 27, 29, - 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, - 0, 0, 0, 0, 0, 51, 0, 0, 53, 55, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 57, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 59, 61, 63, 65, 67, 0, 0, 0, - 0, 0, 69, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 71, 0, 0, 73, 0, 0, 0, 0, - 0, 0, 0, 75, 0, 0, 77, 0, 0, 79, - 81, 0, 0, 0, 0, 0, 0, 0, 0, 83, - 0, 0, 0, 0, 0, 0, 85, 0, 87, 7, - 0, 0, 0, 0, 89, 91, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, - 0, 0, 0, 93, 0, 0, 0, 95, 0, 0, - 0, 0, 97, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 99, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 101, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 103, 0, 0, 0, 0, 105, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 107, 109, 0, 111, 0, 113, 115, 0, 117, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 119, 0, 0, 0, 0, 0, 121, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 123, 125, 0, 0, - 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 -}; +static const unsigned char yyconflp[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 131, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 9, 7, 0, 0, 0, 0, 0, 11, 13, 15, 17, 19, + 21, 23, 0, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, + 0, 0, 0, 0, 0, 51, 0, 0, 53, 55, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 59, 61, 63, 65, 67, 0, + 0, 0, 0, 0, 69, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71, 0, + 0, 73, 0, 0, 0, 0, 0, 0, 0, 75, 0, 0, 77, 79, 0, 0, + 81, 83, 0, 0, 0, 0, 0, 0, 0, 0, 85, 0, 0, 0, 0, 0, + 0, 87, 0, 89, 0, 0, 0, 0, 0, 91, 93, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 95, 0, 0, 0, + 97, 0, 0, 0, 0, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 103, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 105, 0, 0, 0, 0, 107, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 109, 111, 0, 113, 0, 115, 117, 0, 119, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 121, 0, 0, 0, 0, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 125, 127, 0, 0, 129, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0}; /* YYCONFL[I] -- lists of conflicting rule numbers, each terminated by 0, pointed into by YYCONFLP. */ -static const short int yyconfl[] = -{ - 0, 116, 0, 370, 0, 116, 0, 60, 0, 130, - 0, 130, 0, 130, 0, 130, 0, 130, 0, 130, - 0, 130, 0, 130, 0, 130, 0, 130, 0, 130, - 0, 130, 0, 130, 0, 130, 0, 130, 0, 130, - 0, 130, 0, 130, 0, 130, 0, 130, 0, 130, - 0, 130, 0, 130, 0, 130, 0, 130, 0, 130, - 0, 130, 0, 130, 0, 130, 0, 130, 0, 130, - 0, 130, 0, 130, 0, 130, 0, 130, 0, 130, - 0, 130, 0, 130, 0, 130, 0, 130, 0, 130, - 0, 130, 0, 130, 0, 130, 0, 130, 0, 130, - 0, 130, 0, 130, 0, 130, 0, 130, 0, 130, - 0, 130, 0, 130, 0, 130, 0, 130, 0, 130, - 0, 130, 0, 130, 0, 130, 0, 130, 0, 74, - 0 -}; - -static const short int yycheck[] = -{ - 9, 71, 50, 351, 196, 117, 132, 3, 171, 107, - 117, 151, 130, 176, 117, 3, 4, 5, 6, 7, - 8, 3, 3, 117, 158, 158, 3, 6, 4, 17, - 18, 3, 69, 42, 150, 3, 24, 25, 234, 338, - 49, 50, 51, 52, 53, 479, 209, 163, 38, 6, - 158, 443, 135, 93, 93, 3, 96, 96, 147, 47, - 345, 18, 39, 51, 277, 205, 206, 39, 537, 354, - 182, 234, 118, 193, 230, 182, 40, 53, 87, 182, - 29, 337, 3, 296, 130, 201, 3, 219, 182, 3, - 213, 39, 305, 249, 226, 3, 4, 5, 6, 7, - 8, 234, 69, 235, 187, 26, 16, 576, 18, 17, - 18, 99, 26, 89, 254, 507, 24, 25, 238, 83, - 53, 284, 55, 113, 264, 134, 234, 103, 427, 477, - 219, 140, 295, 213, 157, 338, 184, 253, 341, 337, - 108, 575, 338, 51, 167, 133, 431, 156, 340, 125, - 118, 118, 101, 190, 243, 164, 165, 166, 167, 168, - 169, 170, 130, 172, 173, 174, 175, 176, 3, 63, - 179, 180, 181, 182, 183, 315, 213, 156, 187, 188, - 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, - 107, 99, 116, 13, 14, 494, 16, 193, 497, 156, - 117, 169, 242, 242, 338, 158, 295, 339, 299, 556, - 298, 299, 344, 560, 303, 213, 307, 338, 565, 118, - 341, 116, 340, 190, 572, 573, 120, 236, 340, 146, - 3, 394, 339, 340, 337, 129, 224, 246, 213, 248, - 338, 135, 338, 337, 3, 341, 680, 214, 9, 634, - 3, 636, 339, 262, 340, 640, 173, 642, 107, 644, - 154, 270, 234, 352, 10, 11, 12, 13, 14, 15, - 16, 3, 18, 19, 20, 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, 401, 299, 296, 297, 415, - 338, 196, 18, 187, 303, 319, 320, 3, 646, 647, - 14, 464, 650, 422, 423, 394, 301, 396, 397, 398, - 136, 659, 664, 665, 104, 41, 627, 628, 212, 630, - 631, 632, 331, 107, 635, 299, 637, 277, 639, 340, - 641, 57, 340, 340, 242, 475, 324, 325, 326, 327, - 328, 329, 330, 331, 340, 333, 334, 335, 336, 358, - 359, 421, 340, 340, 80, 740, 741, 366, 340, 340, - 369, 370, 339, 526, 81, 107, 338, 339, 298, 340, - 540, 541, 337, 543, 340, 464, 546, 547, 340, 549, - 550, 551, 340, 337, 340, 555, 395, 557, 340, 559, - 4, 5, 6, 7, 564, 112, 340, 567, 487, 69, - 117, 340, 18, 751, 130, 26, 27, 28, 29, 30, - 127, 3, 3, 3, 3, 763, 324, 325, 326, 327, - 328, 329, 330, 331, 339, 333, 334, 335, 336, 146, - 156, 99, 340, 3, 158, 3, 3, 526, 108, 340, - 107, 450, 107, 169, 338, 454, 455, 338, 118, 3, - 3, 614, 3, 3, 193, 3, 8, 4, 3, 10, - 11, 12, 13, 14, 15, 16, 341, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, - 14, 490, 340, 653, 107, 36, 495, 99, 39, 40, - 340, 3, 501, 234, 26, 243, 39, 506, 3, 3, - 3, 171, 228, 3, 341, 514, 340, 516, 341, 0, - 1, 341, 63, 341, 3, 4, 5, 6, 7, 8, - 190, 340, 73, 74, 75, 76, 77, 340, 17, 18, - 340, 201, 83, 30, 3, 24, 25, 245, 3, 158, - 341, 341, 93, 163, 214, 96, 129, 38, 165, 3, - 339, 3, 245, 104, 341, 126, 107, 340, 228, 110, - 111, 16, 51, 341, 341, 341, 4, 338, 3, 120, - 3, 62, 341, 62, 583, 3, 127, 48, 129, 158, - 69, 3, 3, 129, 135, 136, 20, 129, 340, 598, - 599, 82, 338, 158, 341, 341, 110, 4, 341, 90, - 340, 671, 340, 154, 341, 341, 341, 158, 341, 338, - 99, 341, 163, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 174, 130, 341, 341, 340, 338, 118, - 341, 340, 123, 159, 339, 3, 187, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 338, 138, 48, 111, - 341, 338, 341, 4, 663, 664, 665, 20, 340, 158, - 340, 212, 44, 340, 340, 340, 217, 10, 11, 12, - 13, 14, 15, 16, 340, 18, 19, 20, 21, 22, - 23, 24, 25, 26, 27, 28, 29, 30, 340, 3, - 340, 242, 243, 3, 245, 20, 247, 248, 130, 250, - 338, 190, 193, 4, 341, 341, 197, 341, 717, 189, - 5, 3, 4, 5, 6, 7, 8, 3, 5, 728, - 4, 231, 5, 341, 213, 17, 18, 245, 341, 338, - 341, 341, 24, 25, 26, 338, 189, 167, 20, 341, - 156, 33, 23, 24, 25, 26, 27, 28, 29, 30, - 4, 302, 130, 341, 341, 341, 341, 308, 20, 51, - 127, 104, 10, 11, 12, 13, 14, 15, 16, 188, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 7, 158, 5, 337, 338, 3, 338, - 341, 280, 281, 196, 86, 87, 4, 338, 7, 288, - 3, 341, 3, 341, 10, 294, 295, 99, 297, 298, - 299, 300, 728, 302, 303, 304, 236, 306, 307, 308, - 112, 20, 21, 22, 23, 24, 25, 26, 27, 28, - 29, 30, 321, 322, 323, 324, 325, 326, 327, 328, - 329, 330, 331, 715, 333, 334, 335, 336, 717, 600, - 410, 340, 614, 3, 4, 5, 6, 7, 8, 107, - 24, 25, 26, 27, 28, 29, 30, 17, 18, 587, - 141, 5, 374, 638, 24, 25, 3, 4, 5, 6, - 7, 8, 774, 33, 724, 104, 346, 37, 719, -1, - 17, 18, -1, -1, -1, -1, -1, 24, 25, 26, - -1, 51, -1, -1, 3, 4, 5, 6, 7, 8, - -1, -1, -1, -1, -1, 207, 208, 209, 17, 18, - 212, -1, -1, -1, 51, 24, 25, 26, -1, 3, - 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, - -1, -1, -1, 17, 18, -1, -1, -1, -1, 99, - 24, 25, 51, -1, 3, 4, 5, 6, 7, 8, - -1, -1, -1, -1, -1, -1, -1, -1, 17, 18, - -1, -1, 99, -1, -1, 24, 25, 51, -1, -1, - -1, -1, 3, 4, 5, 6, 7, 8, -1, -1, - -1, -1, -1, -1, -1, -1, 17, 18, 341, -1, - 99, -1, 51, 24, 25, 3, 4, 5, 6, 7, - 8, -1, -1, -1, -1, -1, -1, -1, 1, 17, - 18, -1, -1, -1, -1, 99, 24, 25, -1, -1, - 51, 80, 324, 325, 326, 327, 328, 329, 330, 331, - -1, 333, 334, 335, 336, -1, -1, -1, 340, 199, - 99, -1, -1, 51, -1, 38, -1, -1, -1, 80, - -1, -1, 3, 4, 5, 6, 7, 8, -1, -1, - -1, -1, -1, -1, -1, -1, 17, 18, 99, 62, - -1, -1, 80, 24, 25, -1, -1, -1, -1, -1, - 338, 3, 4, 5, 6, 7, 8, -1, -1, 82, - -1, 99, -1, -1, -1, 17, 18, 90, -1, -1, - 51, -1, 24, 25, -1, -1, -1, -1, -1, 193, - 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, - -1, -1, -1, -1, 17, 18, -1, -1, -1, 51, - 123, 24, 25, 19, 20, 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, 138, -1, -1, 99, -1, - -1, -1, -1, -1, -1, -1, -1, -1, 51, -1, - -1, -1, -1, -1, 324, 325, 326, 327, 328, 329, - 330, 331, -1, 333, 334, 335, 336, 99, -1, -1, - 340, -1, -1, -1, -1, -1, -1, 324, 325, 326, - 327, 328, 329, 330, 331, -1, 333, 334, 335, 336, - 193, -1, -1, 340, 197, -1, 99, -1, -1, -1, - -1, -1, -1, -1, -1, 324, 325, 326, 327, 328, - 329, 330, 331, -1, 333, 334, 335, 336, -1, -1, - -1, 340, -1, -1, -1, -1, -1, -1, -1, -1, - 324, 325, 326, 327, 328, 329, 330, 331, -1, 333, - 334, 335, 336, -1, -1, -1, 340, -1, -1, -1, - -1, -1, -1, -1, -1, 324, 325, 326, 327, 328, - 329, 330, 331, -1, 333, 334, 335, 336, -1, -1, - -1, 340, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, 324, 325, 326, 327, 328, 329, 330, - 331, -1, 333, 334, 335, 336, -1, -1, -1, 340, - -1, -1, -1, -1, -1, -1, 324, 325, 326, 327, - 328, 329, 330, 331, -1, 333, 334, 335, 336, -1, - -1, -1, 340, 10, 11, 12, 13, 14, 15, 16, - -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29, 30, 12, 13, 14, 15, 16, -1, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 324, 325, 326, 327, 328, 329, 330, - 331, -1, 333, 334, 335, 336, -1, -1, -1, 340, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, 324, 325, 326, 327, 328, 329, 330, 331, - -1, 333, 334, 335, 336, -1, -1, -1, 340, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, 324, 325, 326, 327, 328, 329, 330, 331, -1, - 333, 334, 335, 336, -1, -1, -1, 340, 10, 11, - 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 10, - 11, 12, 13, 14, 15, 16, -1, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, - 10, 11, 12, 13, 14, 15, 16, -1, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 10, 11, 12, 13, 14, 15, 16, -1, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, - 29, 30, 10, 11, 12, 13, 14, 15, 16, -1, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 10, 11, 12, 13, 14, 15, 16, - -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29, 30, 10, 11, 12, 13, 14, 15, - 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, 10, 11, 12, 13, 14, - 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 10, 11, 12, 13, - 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 10, 11, 12, - 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, - 23, 24, 25, 26, 27, 28, 29, 30, -1, -1, - -1, 338, -1, -1, 341, 10, 11, 12, 13, 14, - 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 10, 11, 12, 13, - 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 10, 11, 12, - 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, - 23, 24, 25, 26, 27, 28, 29, 30, 10, 11, - 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 10, - 11, 12, 13, 14, 15, 16, -1, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, - -1, -1, 10, 11, 12, 13, 14, 15, 16, 341, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 10, 11, 12, 13, 14, 15, 16, - 341, 18, 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29, 30, 10, 11, 12, 13, 14, 15, - 16, 341, 18, 19, 20, 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, 10, 11, 12, 13, 14, - 15, 16, 341, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 10, 11, 12, 13, - 14, 15, 16, 341, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 73, 74, 75, - 76, 77, -1, -1, 341, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, 10, 11, - 12, 13, 14, 15, 16, 341, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 10, - 11, 12, 13, 14, 15, 16, 341, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, - -1, 43, 44, 45, 46, -1, -1, 341, -1, -1, - -1, 53, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, 341, 71, - 72, -1, -1, -1, -1, -1, 78, -1, 174, -1, - -1, -1, -1, -1, -1, -1, -1, 89, -1, -1, - -1, -1, -1, 338, -1, 97, -1, -1, -1, -1, - -1, 103, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, 338, -1, 107, -1, -1, -1, - -1, -1, 124, 125, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, 338, -1, -1, -1, -1, - -1, 143, 144, -1, -1, -1, 148, 149, 150, -1, - -1, 247, 248, -1, 250, -1, 338, -1, -1, -1, - -1, -1, -1, 217, -1, -1, -1, 242, -1, -1, - -1, -1, -1, -1, -1, 177, -1, 338, -1, -1, - -1, -1, -1, -1, -1, 302, -1, -1, -1, -1, - -1, 308, -1, -1, 196, -1, 198, -1, -1, -1, - 338, -1, -1, -1, -1, 217, -1, -1, -1, -1, - -1, -1, -1, 215, -1, -1, 218, 219, 220, 221, - 222, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, 239, 240, -1, - -1, -1, -1, -1, -1, 247, 10, 11, 12, 13, - 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 10, 11, 12, - 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, - 23, 24, 25, 26, 27, 28, 29, 30, 10, 11, - 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, -1, - -1, 10, 11, 12, 13, 14, 15, 16, 40, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, - 29, 30, 96, -1, -1, -1, -1, -1, -1, -1, - 39, -1, -1, -1, -1, -1, -1, 3, -1, -1, - -1, -1, -1, 96, 10, 11, 12, 13, 14, 15, - 16, 83, 18, 19, 20, 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, -1, 10, 11, 12, 13, - 14, 15, 16, 39, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, -1, -1, -1, - -1, -1, 36, 10, 11, 12, 13, 14, 15, 16, - -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29, 30, 11, 12, 13, 14, 15, 16, - -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29, 30, 13, 14, 15, 16, -1, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, - 29, 30, 13, 14, 15, 16, -1, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 -}; +static const short int yyconfl[] = { + 0, 118, 0, 372, 0, 118, 0, 60, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, + 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, + 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, + 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, + 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, + 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, + 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, 0, 132, + 0, 132, 0, 132, 0, 76, 0}; + +static const short int yycheck[] = { + 10, 3, 75, 11, 178, 365, 50, 107, 3, 183, 118, 157, 3, 501, 3, + 96, 3, 3, 4, 5, 6, 7, 8, 3, 170, 159, 244, 158, 3, 235, + 6, 17, 18, 131, 139, 118, 46, 39, 24, 25, 118, 159, 118, 53, 54, + 55, 56, 57, 42, 159, 3, 6, 456, 231, 228, 3, 4, 5, 6, 7, + 8, 47, 208, 18, 39, 51, 32, 337, 197, 17, 18, 111, 250, 154, 248, + 183, 24, 25, 108, 69, 90, 212, 213, 194, 69, 33, 39, 299, 300, 37, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 183, 93, 214, 51, 96, + 183, 136, 183, 560, 3, 598, 40, 93, 99, 109, 96, 107, 235, 522, 265, + 338, 278, 296, 3, 119, 235, 120, 118, 239, 119, 346, 141, 338, 307, 3, + 266, 131, 147, 337, 499, 297, 185, 108, 359, 214, 276, 26, 599, 134, 306, + 3, 99, 368, 163, 83, 117, 147, 26, 188, 159, 300, 171, 172, 173, 174, + 175, 176, 177, 308, 179, 180, 181, 182, 183, 255, 170, 186, 187, 188, 189, + 190, 157, 214, 174, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 327, 158, 119, 579, 240, 191, 157, 583, 338, 194, 191, 168, 588, 249, + 698, 340, 119, 223, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 131, + 215, 307, 332, 444, 214, 117, 339, 340, 407, 315, 595, 596, 4, 338, 225, + 250, 3, 340, 13, 14, 81, 16, 16, 258, 18, 260, 243, 200, 4, 5, + 6, 7, 340, 18, 337, 235, 337, 243, 53, 274, 55, 647, 648, 414, 650, + 651, 652, 282, 214, 655, 635, 657, 113, 659, 3, 661, 41, 118, 681, 682, + 366, 53, 26, 27, 28, 29, 30, 128, 338, 338, 9, 341, 57, 308, 309, + 339, 338, 342, 477, 341, 315, 666, 667, 3, 338, 670, 147, 341, 244, 416, + 417, 418, 654, 358, 656, 80, 340, 89, 660, 107, 662, 407, 664, 409, 410, + 411, 3, 300, 341, 197, 14, 103, 339, 324, 325, 326, 327, 328, 329, 330, + 331, 356, 333, 334, 335, 336, 340, 3, 340, 340, 320, 321, 488, 302, 126, + 340, 371, 372, 338, 339, 137, 104, 541, 300, 379, 443, 131, 382, 383, 107, + 278, 324, 325, 326, 327, 328, 329, 330, 331, 340, 333, 334, 335, 336, 340, + 340, 340, 340, 340, 340, 340, 477, 157, 408, 24, 25, 26, 27, 28, 29, + 30, 337, 338, 299, 769, 170, 197, 340, 340, 422, 425, 426, 340, 340, 39, + 780, 758, 759, 340, 337, 337, 107, 18, 509, 3, 4, 5, 6, 7, 8, + 3, 3, 244, 3, 3, 339, 99, 3, 17, 18, 3, 159, 3, 107, 338, + 24, 25, 26, 463, 107, 340, 630, 467, 468, 33, 541, 3, 63, 3, 3, + 194, 3, 8, 4, 229, 3, 14, 107, 341, 340, 3, 3, 51, 3, 341, + 341, 99, 340, 493, 492, 3, 235, 495, 26, 244, 3, 3, 3, 341, 3, + 4, 5, 6, 7, 8, 341, 511, 341, 341, 30, 340, 516, 108, 17, 18, + 340, 521, 86, 87, 340, 24, 25, 340, 3, 529, 121, 531, 130, 166, 246, + 99, 3, 159, 164, 130, 337, 338, 3, 339, 246, 136, 341, 127, 340, 113, + 550, 16, 51, 341, 341, 555, 341, 130, 3, 159, 130, 4, 341, 338, 155, + 10, 11, 12, 13, 14, 15, 16, 3, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 3, 3, 48, 3, 20, 36, 341, 340, 39, + 40, 159, 188, 4, 111, 99, 341, 3, 4, 5, 6, 7, 8, 341, 341, + 340, 338, 131, 338, 614, 615, 17, 18, 63, 341, 341, 341, 213, 24, 25, + 688, 341, 340, 73, 74, 75, 76, 77, 341, 340, 160, 341, 3, 83, 340, + 339, 338, 112, 48, 208, 209, 210, 341, 93, 213, 51, 96, 338, 341, 4, + 20, 340, 340, 340, 104, 44, 62, 107, 108, 159, 340, 111, 112, 69, 3, + 340, 3, 340, 340, 340, 131, 121, 20, 338, 4, 680, 681, 682, 128, 341, + 130, 341, 341, 5, 3, 190, 136, 137, 5, 341, 4, 341, 5, 99, 341, + 338, 3, 4, 5, 6, 7, 8, 341, 232, 338, 155, 190, 246, 341, 159, + 17, 18, 20, 119, 164, 168, 341, 24, 25, 26, 157, 4, 341, 131, 341, + 175, 20, 341, 128, 189, 735, 7, 3, 4, 5, 6, 7, 8, 188, 243, + 745, 159, 338, 5, 51, 197, 17, 18, 3, 338, 4, 338, 12, 24, 25, + 324, 325, 326, 327, 328, 329, 330, 331, 213, 333, 334, 335, 336, 218, 7, + 3, 340, 23, 24, 25, 26, 27, 28, 29, 30, 51, 3, 735, 341, 341, + 191, 745, 616, 250, 630, 387, 436, 99, 243, 244, 733, 246, 148, 248, 249, + 6, 251, 736, 554, 658, 111, 741, -1, 214, 80, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 324, 325, 326, 327, 328, 329, 330, 331, 99, 333, + 334, 335, 336, 791, 360, -1, 340, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, -1, -1, -1, -1, 303, -1, -1, -1, -1, -1, 309, + -1, -1, -1, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, + 281, 282, 17, 18, -1, -1, -1, -1, 289, 24, 25, 26, 337, 338, 295, + 296, 341, 298, 299, 300, 301, -1, 303, 304, 305, -1, 307, 308, 309, -1, + -1, -1, -1, -1, -1, -1, 51, -1, -1, -1, -1, 322, 323, 324, 325, + 326, 327, 328, 329, 330, 331, -1, 333, 334, 335, 336, -1, -1, -1, 340, + 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, 69, -1, -1, -1, 17, + 18, -1, -1, -1, -1, -1, 24, 25, -1, 99, -1, -1, -1, -1, -1, + 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, 17, + 18, -1, -1, 51, -1, 109, 24, 25, -1, 3, 4, 5, 6, 7, 8, + 119, -1, -1, -1, -1, -1, -1, -1, 17, 18, -1, -1, -1, -1, -1, + 24, 25, -1, 51, -1, -1, -1, 324, 325, 326, 327, 328, 329, 330, 331, + -1, 333, 334, 335, 336, -1, 99, -1, 340, -1, -1, -1, 51, -1, -1, + -1, -1, 80, -1, -1, -1, -1, -1, 172, -1, -1, -1, -1, 324, 325, + 326, 327, 328, 329, 330, 331, 99, 333, 334, 335, 336, 80, 191, -1, 340, + -1, -1, 3, 4, 5, 6, 7, 8, 202, -1, -1, -1, -1, -1, -1, + 99, 17, 18, -1, -1, -1, 215, -1, 24, 25, -1, 3, 4, 5, 6, + 7, 8, -1, -1, -1, 229, -1, -1, -1, -1, 17, 18, -1, -1, -1, + -1, -1, 24, 25, -1, 51, -1, -1, -1, -1, -1, 194, 3, 4, 5, + 6, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, 17, 18, -1, 51, + -1, -1, -1, 24, 25, -1, -1, -1, -1, 563, 564, -1, 566, -1, -1, + 569, 570, -1, 572, 573, 574, -1, -1, 99, 578, -1, 580, -1, 582, -1, + 51, -1, -1, 587, -1, -1, 590, -1, -1, 324, 325, 326, 327, 328, 329, + 330, 331, 99, 333, 334, 335, 336, -1, -1, -1, 340, 10, 11, 12, 13, + 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, -1, 99, 10, 11, 12, 13, 14, 15, 16, -1, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 324, 325, 326, 327, 328, 329, 330, 331, 673, + 333, 334, 335, 336, -1, -1, -1, 340, 22, 23, 24, 25, 26, 27, 28, + 29, 30, -1, -1, -1, -1, 324, 325, 326, 327, 328, 329, 330, 331, -1, + 333, 334, 335, 336, -1, -1, -1, 340, -1, -1, -1, -1, -1, -1, -1, + 324, 325, 326, 327, 328, 329, 330, 331, 104, 333, 334, 335, 336, -1, -1, + -1, 340, 10, 11, 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 10, 11, 12, 13, 14, 15, 16, + -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 10, + 11, 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, -1, -1, -1, 324, 325, 326, 327, 328, 329, 330, + 331, -1, 333, 334, 335, 336, -1, -1, -1, 340, -1, -1, -1, -1, -1, + -1, -1, 324, 325, 326, 327, 328, 329, 330, 331, -1, 333, 334, 335, 336, + -1, -1, -1, 340, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 324, 325, 326, 327, 328, 329, 330, 331, -1, 333, 334, 335, + 336, -1, -1, -1, 340, 10, 11, 12, 13, 14, 15, 16, -1, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 10, 11, 12, 13, + 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 10, 11, 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, -1, 338, -1, -1, 341, 10, 11, + 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, -1, 341, 10, 11, 12, 13, 14, 15, 16, -1, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 10, 11, 12, + 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 10, 11, 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 10, 11, 12, 13, 14, 15, + 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 338, -1, -1, 341, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 10, 11, 12, 13, 14, 15, 16, 341, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 10, 11, 12, 13, 14, 15, 16, + 341, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 10, + 11, 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 10, 11, 12, 13, 14, 15, 16, -1, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, -1, -1, -1, -1, + -1, -1, 10, 11, 12, 13, 14, 15, 16, 107, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, -1, -1, -1, -1, -1, -1, 10, + 11, 12, 13, 14, 15, 16, 341, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 10, 11, 12, 13, 14, 15, 16, 341, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 10, 11, 12, 13, + 14, 15, 16, 341, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, 16, 341, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 0, 1, + 10, 11, 12, 13, 14, 15, 16, 341, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, -1, -1, -1, -1, -1, 1, -1, 341, -1, + -1, -1, -1, -1, -1, -1, 38, -1, -1, -1, 73, 74, 75, 76, 77, + -1, -1, -1, -1, 341, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 62, -1, -1, 38, -1, -1, -1, -1, -1, -1, 341, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 82, 83, -1, -1, -1, -1, -1, 62, 90, 13, + 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 82, 83, -1, -1, -1, -1, -1, -1, 90, -1, -1, -1, -1, + 338, -1, 124, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 139, -1, -1, -1, 338, 175, -1, -1, -1, -1, -1, -1, 124, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 338, -1, 139, + 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 338, -1, -1, -1, -1, -1, -1, -1, -1, 194, -1, -1, + -1, 198, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 338, -1, -1, -1, -1, 248, 249, -1, 251, 194, -1, -1, -1, 198, -1, + -1, -1, -1, -1, -1, 232, -1, -1, 243, -1, -1, -1, 338, -1, -1, + -1, -1, -1, -1, 303, -1, -1, -1, -1, -1, 309, -1, -1, -1, -1, + -1, -1, 232, 338, 10, 11, 12, 13, 14, 15, 16, -1, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 10, 11, 12, 13, 14, + 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 43, 44, 45, 46, 11, 12, 13, 14, 15, 16, 53, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, -1, -1, -1, -1, 71, + 72, -1, -1, -1, -1, -1, 78, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 89, -1, -1, -1, -1, -1, -1, -1, 97, -1, -1, -1, -1, + -1, 103, 10, 11, 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 125, 126, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 144, 145, -1, + -1, -1, 149, 150, 151, 10, 11, 12, 13, 14, 15, 16, -1, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 12, 13, 14, 15, + 16, 178, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + -1, -1, -1, -1, -1, 197, -1, 199, -1, 107, -1, -1, -1, -1, -1, + -1, -1, 218, -1, -1, -1, -1, -1, -1, 216, -1, -1, 219, 220, 221, + 222, 223, -1, -1, -1, -1, -1, -1, 218, -1, -1, -1, -1, -1, -1, + -1, 96, -1, 240, 241, -1, -1, -1, -1, -1, -1, 248, 10, 11, 12, + 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 10, 11, 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, -1, -1, -1, 3, -1, -1, + -1, -1, -1, 40, 10, 11, 12, 13, 14, 15, 16, -1, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, -1, -1, -1, -1, -1, + -1, -1, -1, 39, -1, -1, -1, -1, 96, 10, 11, 12, 13, 14, 15, + 16, 83, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + -1, 10, 11, 12, 13, 14, 15, 16, 39, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, -1, -1, -1, -1, -1, 36, 10, 11, + 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 10, 11, 12, 13, 14, 15, 16, -1, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 10, 11, 12, 13, 14, + 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30}; /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing symbol of state STATE-NUM. */ -static const unsigned short int yystos[] = -{ - 0, 1, 38, 62, 82, 90, 123, 138, 193, 197, - 343, 344, 345, 380, 382, 395, 396, 401, 402, 403, - 405, 408, 416, 423, 337, 213, 69, 108, 171, 190, - 201, 214, 228, 385, 397, 424, 69, 118, 190, 385, - 410, 213, 357, 3, 4, 5, 6, 7, 8, 17, - 18, 24, 25, 51, 62, 69, 99, 118, 190, 213, - 280, 281, 288, 294, 295, 297, 300, 302, 303, 304, - 306, 308, 321, 322, 323, 324, 325, 326, 327, 328, - 329, 330, 331, 333, 334, 335, 336, 340, 373, 417, - 421, 422, 0, 1, 344, 337, 3, 116, 381, 158, - 381, 213, 118, 3, 107, 117, 146, 173, 425, 116, - 404, 3, 404, 213, 81, 112, 117, 127, 146, 411, - 3, 26, 33, 86, 87, 112, 207, 208, 209, 212, - 358, 359, 373, 339, 9, 373, 99, 373, 373, 373, - 242, 373, 379, 69, 190, 213, 3, 340, 107, 3, - 299, 196, 14, 419, 3, 301, 136, 354, 104, 299, - 107, 418, 354, 277, 340, 340, 340, 340, 340, 340, - 340, 340, 340, 340, 340, 340, 340, 373, 374, 10, - 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 302, - 308, 277, 296, 305, 298, 299, 307, 337, 337, 107, - 18, 3, 3, 3, 381, 3, 339, 426, 3, 340, - 360, 361, 362, 365, 372, 425, 107, 338, 99, 3, - 158, 3, 404, 3, 107, 107, 338, 3, 39, 364, - 3, 26, 373, 340, 373, 379, 93, 96, 242, 3, - 3, 3, 345, 3, 418, 419, 4, 298, 299, 373, - 8, 3, 14, 420, 418, 373, 47, 133, 224, 373, - 376, 373, 373, 26, 373, 373, 373, 373, 374, 373, - 373, 373, 373, 374, 338, 341, 341, 373, 373, 373, - 373, 373, 6, 18, 156, 340, 13, 14, 16, 373, - 33, 37, 199, 340, 373, 373, 373, 373, 373, 373, - 373, 373, 373, 373, 373, 418, 107, 419, 419, 374, - 99, 340, 3, 234, 398, 26, 339, 364, 3, 345, - 360, 243, 346, 63, 120, 135, 154, 187, 212, 338, - 366, 368, 39, 363, 234, 360, 3, 3, 3, 406, - 196, 340, 412, 374, 360, 358, 3, 345, 217, 93, - 96, 373, 373, 341, 418, 419, 338, 373, 419, 107, - 338, 341, 373, 338, 338, 341, 341, 341, 39, 341, - 341, 341, 341, 341, 341, 374, 16, 18, 6, 156, - 345, 374, 373, 373, 340, 36, 340, 340, 340, 345, - 419, 3, 245, 3, 356, 117, 182, 339, 340, 383, - 384, 319, 320, 399, 158, 3, 339, 341, 341, 373, - 163, 352, 135, 187, 369, 362, 360, 129, 165, 367, - 3, 360, 346, 426, 339, 50, 184, 338, 407, 3, - 415, 356, 238, 345, 245, 346, 341, 373, 373, 96, - 217, 373, 373, 373, 107, 126, 378, 378, 4, 53, - 89, 103, 125, 377, 340, 16, 341, 341, 345, 374, - 373, 345, 345, 345, 341, 418, 4, 338, 341, 3, - 3, 108, 118, 130, 169, 386, 387, 363, 3, 364, - 48, 354, 367, 367, 129, 158, 362, 129, 346, 3, - 3, 20, 158, 338, 409, 341, 340, 413, 409, 4, - 110, 347, 96, 373, 104, 341, 338, 341, 373, 373, - 341, 341, 341, 345, 374, 375, 340, 341, 341, 341, - 341, 341, 419, 338, 356, 157, 167, 340, 383, 43, - 44, 45, 46, 53, 71, 72, 78, 89, 97, 103, - 124, 125, 143, 144, 148, 149, 150, 177, 196, 198, - 215, 218, 219, 220, 221, 222, 239, 240, 247, 389, - 118, 130, 340, 340, 130, 341, 338, 345, 340, 353, - 373, 362, 373, 158, 234, 370, 371, 362, 339, 80, - 373, 159, 3, 80, 373, 414, 409, 338, 48, 111, - 351, 373, 373, 341, 73, 74, 75, 76, 77, 174, - 247, 248, 250, 341, 338, 341, 345, 374, 4, 20, - 158, 386, 340, 391, 391, 391, 391, 391, 391, 340, - 391, 391, 391, 44, 392, 391, 392, 391, 340, 391, - 392, 391, 392, 340, 391, 388, 340, 340, 356, 356, - 340, 383, 386, 3, 400, 40, 83, 349, 373, 340, - 371, 3, 130, 20, 338, 338, 341, 4, 348, 373, - 373, 352, 341, 341, 375, 341, 341, 189, 5, 3, - 341, 5, 393, 394, 393, 393, 4, 390, 393, 393, - 393, 394, 393, 394, 393, 390, 393, 394, 393, 394, - 5, 394, 18, 41, 57, 80, 130, 156, 169, 228, - 356, 356, 341, 341, 356, 391, 341, 338, 356, 231, - 80, 373, 414, 414, 338, 189, 245, 350, 338, 354, - 20, 167, 383, 338, 341, 230, 249, 53, 55, 338, - 341, 341, 341, 156, 4, 4, 5, 6, 7, 130, - 130, 340, 341, 341, 341, 349, 353, 341, 415, 413, - 20, 188, 348, 127, 355, 7, 158, 5, 196, 3, - 4, 394, 394, 356, 338, 7, 356, 3, 341, 3, - 341, 400 -}; +static const unsigned short int yystos[] = { + 0, 1, 38, 62, 82, 83, 90, 124, 139, 194, 198, 232, 343, 344, 345, + 380, 382, 395, 396, 401, 402, 403, 405, 408, 416, 423, 427, 337, 214, 69, + 109, 172, 191, 202, 215, 229, 385, 397, 424, 3, 69, 119, 191, 385, 410, + 214, 357, 3, 4, 5, 6, 7, 8, 17, 18, 24, 25, 51, 62, 69, + 99, 119, 191, 214, 281, 282, 289, 295, 296, 298, 301, 303, 304, 305, 307, + 309, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 333, 334, 335, 336, + 340, 373, 417, 421, 422, 3, 340, 362, 372, 0, 1, 344, 337, 3, 117, + 381, 159, 381, 214, 119, 3, 107, 118, 147, 174, 425, 117, 404, 3, 404, + 214, 81, 113, 118, 128, 147, 411, 3, 26, 33, 86, 87, 113, 208, 209, + 210, 213, 358, 359, 373, 339, 9, 373, 99, 373, 373, 373, 243, 373, 379, + 69, 191, 214, 3, 340, 107, 3, 300, 197, 14, 419, 3, 302, 137, 354, + 104, 300, 107, 418, 354, 278, 340, 340, 340, 340, 340, 340, 340, 340, 340, + 340, 340, 340, 340, 373, 374, 10, 11, 12, 13, 14, 15, 16, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 303, 309, 278, 297, + 306, 299, 300, 308, 3, 39, 339, 364, 345, 360, 361, 362, 365, 197, 39, + 363, 337, 337, 107, 18, 3, 3, 3, 381, 3, 339, 426, 3, 360, 425, + 107, 338, 99, 3, 159, 3, 404, 3, 107, 107, 338, 364, 3, 26, 373, + 340, 373, 379, 93, 96, 243, 3, 3, 3, 345, 3, 418, 419, 4, 299, + 300, 373, 8, 3, 14, 420, 418, 373, 47, 134, 225, 373, 376, 373, 373, + 26, 373, 373, 373, 373, 374, 373, 373, 373, 373, 374, 338, 341, 341, 373, + 373, 373, 373, 373, 6, 18, 157, 340, 13, 14, 16, 373, 33, 37, 200, + 340, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 418, 107, 419, + 419, 3, 3, 341, 341, 63, 108, 121, 136, 155, 188, 213, 338, 366, 368, + 373, 428, 3, 374, 99, 340, 3, 235, 398, 26, 339, 244, 346, 235, 360, + 3, 3, 3, 406, 197, 340, 412, 374, 360, 358, 345, 218, 93, 96, 373, + 373, 341, 418, 419, 338, 373, 419, 107, 338, 341, 373, 338, 338, 341, 341, + 341, 39, 341, 341, 341, 341, 341, 341, 374, 16, 18, 6, 157, 345, 374, + 373, 373, 340, 36, 340, 340, 340, 345, 419, 3, 364, 108, 136, 188, 369, + 362, 360, 130, 166, 367, 20, 338, 346, 246, 3, 356, 118, 183, 339, 340, + 383, 384, 320, 321, 399, 159, 373, 164, 352, 360, 346, 426, 339, 50, 185, + 338, 407, 3, 415, 356, 239, 345, 246, 346, 341, 373, 373, 96, 218, 373, + 373, 373, 107, 127, 378, 378, 4, 53, 89, 103, 126, 377, 340, 16, 341, + 341, 345, 374, 373, 345, 345, 345, 341, 418, 367, 367, 367, 130, 159, 362, + 130, 373, 373, 4, 338, 341, 3, 3, 109, 119, 131, 170, 386, 387, 363, + 3, 48, 354, 346, 3, 3, 20, 159, 338, 409, 341, 340, 413, 409, 4, + 111, 347, 96, 373, 104, 341, 338, 341, 373, 373, 341, 341, 341, 345, 374, + 375, 340, 341, 341, 341, 341, 341, 419, 362, 373, 159, 235, 370, 371, 362, + 20, 338, 356, 158, 168, 340, 383, 43, 44, 45, 46, 53, 71, 72, 78, + 89, 97, 103, 125, 126, 144, 145, 149, 150, 151, 178, 197, 199, 216, 219, + 220, 221, 222, 223, 240, 241, 248, 389, 119, 131, 340, 340, 131, 341, 338, + 345, 340, 353, 373, 339, 80, 373, 160, 3, 80, 373, 414, 409, 338, 48, + 112, 351, 373, 373, 341, 73, 74, 75, 76, 77, 175, 248, 249, 251, 341, + 338, 341, 345, 374, 373, 340, 371, 373, 4, 20, 159, 386, 340, 391, 391, + 391, 391, 391, 391, 340, 391, 391, 391, 44, 392, 391, 392, 391, 340, 391, + 392, 391, 392, 340, 391, 388, 340, 340, 356, 356, 340, 383, 386, 3, 400, + 40, 83, 349, 3, 131, 20, 338, 338, 341, 4, 348, 373, 373, 352, 341, + 341, 375, 341, 341, 356, 190, 5, 3, 341, 5, 393, 394, 393, 393, 4, + 390, 393, 393, 393, 394, 393, 394, 393, 390, 393, 394, 393, 394, 5, 394, + 18, 41, 57, 80, 131, 157, 170, 229, 356, 356, 341, 341, 356, 391, 341, + 338, 232, 80, 373, 414, 414, 338, 190, 246, 350, 338, 354, 341, 20, 168, + 383, 338, 341, 231, 250, 53, 55, 338, 341, 341, 341, 157, 4, 4, 5, + 6, 7, 131, 131, 340, 341, 341, 341, 349, 353, 415, 413, 20, 189, 348, + 128, 355, 7, 159, 5, 197, 3, 4, 394, 394, 356, 338, 7, 356, 3, + 341, 3, 341, 400}; /* Error token number */ #define YYTERROR 1 - /* This macro is provided for backward compatibility. */ #ifndef YY_LOCATION_PRINT -# define YY_LOCATION_PRINT(File, Loc) ((void) 0) +#define YY_LOCATION_PRINT(File, Loc) ((void)0) #endif - /* YYLEX -- calling `yylex' with the right arguments. */ #ifdef YYLEX_PARAM -# define YYLEX yylex (&yylval, YYLEX_PARAM) +#define YYLEX yylex(&yylval, YYLEX_PARAM) #else -# define YYLEX yylex (&yylval) +#define YYLEX yylex(&yylval) #endif #undef yynerrs @@ -1863,81 +1566,75 @@ static const unsigned short int yystos[] = #undef yylloc #define yylloc (yystackp->yyloc) - static const int YYEOF = 0; static const int YYEMPTY = -2; typedef enum { yyok, yyaccept, yyabort, yyerr } YYRESULTTAG; -#define YYCHK(YYE) \ - do { YYRESULTTAG yyflag = YYE; if (yyflag != yyok) return yyflag; } \ - while (YYID (0)) +#define YYCHK(YYE) \ + do { \ + YYRESULTTAG yyflag = YYE; \ + if (yyflag != yyok) return yyflag; \ + } while (YYID(0)) #if YYDEBUG -# ifndef YYFPRINTF -# define YYFPRINTF fprintf -# endif - -# define YYDPRINTF(Args) \ -do { \ - if (yydebug) \ - YYFPRINTF Args; \ -} while (YYID (0)) +#ifndef YYFPRINTF +#define YYFPRINTF fprintf +#endif +#define YYDPRINTF(Args) \ + do { \ + if (yydebug) YYFPRINTF Args; \ + } while (YYID(0)) /*--------------------------------. | Print this symbol on YYOUTPUT. | `--------------------------------*/ /*ARGSUSED*/ -static void -yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, struct ParseResult* result) -{ +static void yy_symbol_value_print(FILE *yyoutput, int yytype, + YYSTYPE const *const yyvaluep, + struct ParseResult *result) { FILE *yyo = yyoutput; - YYUSE (yyo); - if (!yyvaluep) - return; - YYUSE (result); -# ifdef YYPRINT - if (yytype < YYNTOKENS) - YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); -# else - YYUSE (yyoutput); -# endif - switch (yytype) - { - default: - break; - } + YYUSE(yyo); + if (!yyvaluep) return; + YYUSE(result); +#ifdef YYPRINT + if (yytype < YYNTOKENS) YYPRINT(yyoutput, yytoknum[yytype], *yyvaluep); +#else + YYUSE(yyoutput); +#endif + switch (yytype) { + default: + break; + } } - /*--------------------------------. | Print this symbol on YYOUTPUT. | `--------------------------------*/ -static void -yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, struct ParseResult* result) -{ +static void yy_symbol_print(FILE *yyoutput, int yytype, + YYSTYPE const *const yyvaluep, + struct ParseResult *result) { if (yytype < YYNTOKENS) - YYFPRINTF (yyoutput, "token %s (", yytname[yytype]); + YYFPRINTF(yyoutput, "token %s (", yytname[yytype]); else - YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]); + YYFPRINTF(yyoutput, "nterm %s (", yytname[yytype]); - yy_symbol_value_print (yyoutput, yytype, yyvaluep, result); - YYFPRINTF (yyoutput, ")"); + yy_symbol_value_print(yyoutput, yytype, yyvaluep, result); + YYFPRINTF(yyoutput, ")"); } -# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ -do { \ - if (yydebug) \ - { \ - YYFPRINTF (stderr, "%s ", Title); \ - yy_symbol_print (stderr, Type, Value, result); \ - YYFPRINTF (stderr, "\n"); \ - } \ -} while (YYID (0)) +#define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ + do { \ + if (yydebug) { \ + YYFPRINTF(stderr, "%s ", Title); \ + yy_symbol_print(stderr, Type, Value, result); \ + YYFPRINTF(stderr, "\n"); \ + } \ + } while (YYID(0)) /* Nonzero means print parse trace. It is left uninitialized so that multiple parsers can coexist. */ @@ -1945,14 +1642,14 @@ int yydebug; #else /* !YYDEBUG */ -# define YYDPRINTF(Args) -# define YY_SYMBOL_PRINT(Title, Type, Value, Location) +#define YYDPRINTF(Args) +#define YY_SYMBOL_PRINT(Title, Type, Value, Location) #endif /* !YYDEBUG */ /* YYINITDEPTH -- initial size of the parser's stacks. */ #ifndef YYINITDEPTH -# define YYINITDEPTH 200 +#define YYINITDEPTH 200 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only @@ -1963,7 +1660,7 @@ int yydebug; evaluated with infinite-precision integer arithmetic. */ #ifndef YYMAXDEPTH -# define YYMAXDEPTH 10000 +#define YYMAXDEPTH 10000 #endif /* Minimum number of free items on the stack allowed after an @@ -1974,52 +1671,45 @@ int yydebug; #define YYHEADROOM 2 #ifndef YYSTACKEXPANDABLE -# if (! defined __cplusplus \ - || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)) -# define YYSTACKEXPANDABLE 1 -# else -# define YYSTACKEXPANDABLE 0 -# endif +#if (!defined __cplusplus || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)) +#define YYSTACKEXPANDABLE 1 +#else +#define YYSTACKEXPANDABLE 0 +#endif #endif #if YYSTACKEXPANDABLE -# define YY_RESERVE_GLRSTACK(Yystack) \ - do { \ - if (Yystack->yyspaceLeft < YYHEADROOM) \ - yyexpandGLRStack (Yystack); \ - } while (YYID (0)) +#define YY_RESERVE_GLRSTACK(Yystack) \ + do { \ + if (Yystack->yyspaceLeft < YYHEADROOM) yyexpandGLRStack(Yystack); \ + } while (YYID(0)) #else -# define YY_RESERVE_GLRSTACK(Yystack) \ - do { \ - if (Yystack->yyspaceLeft < YYHEADROOM) \ - yyMemoryExhausted (Yystack); \ - } while (YYID (0)) +#define YY_RESERVE_GLRSTACK(Yystack) \ + do { \ + if (Yystack->yyspaceLeft < YYHEADROOM) yyMemoryExhausted(Yystack); \ + } while (YYID(0)) #endif - #if YYERROR_VERBOSE -# ifndef yystpcpy -# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE -# define yystpcpy stpcpy -# else +#ifndef yystpcpy +#if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE +#define yystpcpy stpcpy +#else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ -static char * -yystpcpy (char *yydest, const char *yysrc) -{ +static char *yystpcpy(char *yydest, const char *yysrc) { char *yyd = yydest; const char *yys = yysrc; - while ((*yyd++ = *yys++) != '\0') - continue; + while ((*yyd++ = *yys++) != '\0') continue; return yyd - 1; } -# endif -# endif +#endif +#endif -# ifndef yytnamerr +#ifndef yytnamerr /* Copy to YYRES the contents of YYSTR after stripping away unnecessary quotes and backslashes, so that it's suitable for yyerror. The heuristic is that double-quoting is unnecessary unless the string @@ -2027,45 +1717,37 @@ yystpcpy (char *yydest, const char *yysrc) backslash-backslash). YYSTR is taken from yytname. If YYRES is null, do not copy; instead, return the length of what the result would have been. */ -static size_t -yytnamerr (char *yyres, const char *yystr) -{ - if (*yystr == '"') - { - size_t yyn = 0; - char const *yyp = yystr; - - for (;;) - switch (*++yyp) - { - case '\'': - case ',': - goto do_not_strip_quotes; - - case '\\': - if (*++yyp != '\\') - goto do_not_strip_quotes; - /* Fall through. */ - default: - if (yyres) - yyres[yyn] = *yyp; - yyn++; - break; - - case '"': - if (yyres) - yyres[yyn] = '\0'; - return yyn; - } - do_not_strip_quotes: ; - } +static size_t yytnamerr(char *yyres, const char *yystr) { + if (*yystr == '"') { + size_t yyn = 0; + char const *yyp = yystr; + + for (;;) switch (*++yyp) { + case '\'': + case ',': + goto do_not_strip_quotes; + + case '\\': + if (*++yyp != '\\') goto do_not_strip_quotes; + /* Fall through. */ + default: + if (yyres) yyres[yyn] = *yyp; + yyn++; + break; + + case '"': + if (yyres) yyres[yyn] = '\0'; + return yyn; + } + do_not_strip_quotes: + ; + } - if (! yyres) - return strlen (yystr); + if (!yyres) return strlen(yystr); - return yystpcpy (yyres, yystr) - yyres; + return yystpcpy(yyres, yystr) - yyres; } -# endif +#endif #endif /* !YYERROR_VERBOSE */ @@ -2096,26 +1778,26 @@ struct yyGLRState { /** Number of corresponding LALR(1) machine state. */ yyStateNum yylrState; /** Preceding state in this stack */ - yyGLRState* yypred; + yyGLRState *yypred; /** Source position of the first token produced by my symbol */ size_t yyposn; union { /** First in a chain of alternative reductions producing the * non-terminal corresponding to this state, threaded through * yynext. */ - yySemanticOption* yyfirstVal; + yySemanticOption *yyfirstVal; /** Semantic value for this state. */ YYSTYPE yysval; } yysemantics; }; struct yyGLRStateSet { - yyGLRState** yystates; + yyGLRState **yystates; /** During nondeterministic operation, yylookaheadNeeds tracks which * stacks have actually needed the current lookahead. During deterministic * operation, yylookaheadNeeds[0] is not maintained since it would merely * duplicate yychar != YYEMPTY. */ - yybool* yylookaheadNeeds; + yybool *yylookaheadNeeds; size_t yysize, yycapacity; }; @@ -2125,13 +1807,13 @@ struct yySemanticOption { /** Rule number for this reduction */ yyRuleNum yyrule; /** The last RHS state in the list of states to be reduced. */ - yyGLRState* yystate; + yyGLRState *yystate; /** The lookahead for this reduction. */ int yyrawchar; YYSTYPE yyval; /** Next sibling in chain of options. To facilitate merging, * options are chained in decreasing order by address. */ - yySemanticOption* yynext; + yySemanticOption *yynext; }; /** Type of the items in the GLR stack. The yyisState field @@ -2144,49 +1826,41 @@ union yyGLRStackItem { struct yyGLRStack { int yyerrState; - int yyerrcnt; int yyrawchar; YYSTYPE yyval; YYJMP_BUF yyexception_buffer; - yyGLRStackItem* yyitems; - yyGLRStackItem* yynextFree; + yyGLRStackItem *yyitems; + yyGLRStackItem *yynextFree; size_t yyspaceLeft; - yyGLRState* yysplitPoint; - yyGLRState* yylastDeleted; + yyGLRState *yysplitPoint; + yyGLRState *yylastDeleted; yyGLRStateSet yytops; }; #if YYSTACKEXPANDABLE -static void yyexpandGLRStack (yyGLRStack* yystackp); +static void yyexpandGLRStack(yyGLRStack *yystackp); #endif -static void yyFail (yyGLRStack* yystackp, struct ParseResult* result, const char* yymsg) - __attribute__ ((__noreturn__)); -static void -yyFail (yyGLRStack* yystackp, struct ParseResult* result, const char* yymsg) -{ - if (yymsg != YY_NULL) - yyerror (result, yymsg); - YYLONGJMP (yystackp->yyexception_buffer, 1); +static void yyFail(yyGLRStack *yystackp, struct ParseResult *result, + const char *yymsg) __attribute__((__noreturn__)); +static void yyFail(yyGLRStack *yystackp, struct ParseResult *result, + const char *yymsg) { + if (yymsg != YY_NULL) yyerror(result, yymsg); + YYLONGJMP(yystackp->yyexception_buffer, 1); } -static void yyMemoryExhausted (yyGLRStack* yystackp) - __attribute__ ((__noreturn__)); -static void -yyMemoryExhausted (yyGLRStack* yystackp) -{ - YYLONGJMP (yystackp->yyexception_buffer, 2); +static void yyMemoryExhausted(yyGLRStack *yystackp) + __attribute__((__noreturn__)); +static void yyMemoryExhausted(yyGLRStack *yystackp) { + YYLONGJMP(yystackp->yyexception_buffer, 2); } #if YYDEBUG || YYERROR_VERBOSE /** A printable representation of TOKEN. */ -static inline const char* -yytokenName (yySymbol yytoken) -{ - if (yytoken == YYEMPTY) - return ""; +static inline const char *yytokenName(yySymbol yytoken) { + if (yytoken == YYEMPTY) return ""; return yytname[yytoken]; } @@ -2195,34 +1869,29 @@ yytokenName (yySymbol yytoken) /** Fill in YYVSP[YYLOW1 .. YYLOW0-1] from the chain of states starting * at YYVSP[YYLOW0].yystate.yypred. Leaves YYVSP[YYLOW1].yystate.yypred * containing the pointer to the next state in the chain. */ -static void yyfillin (yyGLRStackItem *, int, int) __attribute__ ((__unused__)); -static void -yyfillin (yyGLRStackItem *yyvsp, int yylow0, int yylow1) -{ +static void yyfillin(yyGLRStackItem *, int, int) __attribute__((__unused__)); +static void yyfillin(yyGLRStackItem *yyvsp, int yylow0, int yylow1) { int i; yyGLRState *s = yyvsp[yylow0].yystate.yypred; - for (i = yylow0-1; i >= yylow1; i -= 1) - { - YYASSERT (s->yyresolved); - yyvsp[i].yystate.yyresolved = yytrue; - yyvsp[i].yystate.yysemantics.yysval = s->yysemantics.yysval; - s = yyvsp[i].yystate.yypred = s->yypred; - } + for (i = yylow0 - 1; i >= yylow1; i -= 1) { + YYASSERT(s->yyresolved); + yyvsp[i].yystate.yyresolved = yytrue; + yyvsp[i].yystate.yysemantics.yysval = s->yysemantics.yysval; + s = yyvsp[i].yystate.yypred = s->yypred; + } } /* Do nothing if YYNORMAL or if *YYLOW <= YYLOW1. Otherwise, fill in * YYVSP[YYLOW1 .. *YYLOW-1] as in yyfillin and set *YYLOW = YYLOW1. * For convenience, always return YYLOW1. */ -static inline int yyfill (yyGLRStackItem *, int *, int, yybool) - __attribute__ ((__unused__)); -static inline int -yyfill (yyGLRStackItem *yyvsp, int *yylow, int yylow1, yybool yynormal) -{ - if (!yynormal && yylow1 < *yylow) - { - yyfillin (yyvsp, *yylow, yylow1); - *yylow = yylow1; - } +static inline int yyfill(yyGLRStackItem *, int *, int, yybool) + __attribute__((__unused__)); +static inline int yyfill(yyGLRStackItem *yyvsp, int *yylow, int yylow1, + yybool yynormal) { + if (!yynormal && yylow1 < *yylow) { + yyfillin(yyvsp, *yylow, yylow1); + *yylow = yylow1; + } return yylow1; } @@ -2231,2383 +1900,3972 @@ yyfill (yyGLRStackItem *yyvsp, int *yylow, int yylow1, yybool yynormal) * value ($$), and yylocp points to place for location information * (@$). Returns yyok for normal return, yyaccept for YYACCEPT, * yyerr for YYERROR, yyabort for YYABORT. */ -/*ARGSUSED*/ static YYRESULTTAG -yyuserAction (yyRuleNum yyn, int yyrhslen, yyGLRStackItem* yyvsp, - yyGLRStack* yystackp, - YYSTYPE* yyvalp, struct ParseResult* result) -{ - yybool yynormal __attribute__ ((__unused__)) = - (yystackp->yysplitPoint == YY_NULL); +/*ARGSUSED*/ static YYRESULTTAG yyuserAction(yyRuleNum yyn, int yyrhslen, + yyGLRStackItem *yyvsp, + yyGLRStack *yystackp, + YYSTYPE *yyvalp, + struct ParseResult *result) { + yybool yynormal __attribute__((__unused__)) = + (yystackp->yysplitPoint == YY_NULL); int yylow; - YYUSE (result); -# undef yyerrok -# define yyerrok (yystackp->yyerrState = 0) -# undef YYACCEPT -# define YYACCEPT return yyaccept -# undef YYABORT -# define YYABORT return yyabort -# undef YYERROR -# define YYERROR return yyerrok, yyerr -# undef YYRECOVERING -# define YYRECOVERING() (yystackp->yyerrState != 0) -# undef yyclearin -# define yyclearin (yychar = YYEMPTY) -# undef YYFILL -# define YYFILL(N) yyfill (yyvsp, &yylow, N, yynormal) -# undef YYBACKUP -# define YYBACKUP(Token, Value) \ - return yyerror (result, YY_("syntax error: cannot back up")), \ - yyerrok, yyerr + YYUSE(result); +#undef yyerrok +#define yyerrok (yystackp->yyerrState = 0) +#undef YYACCEPT +#define YYACCEPT return yyaccept +#undef YYABORT +#define YYABORT return yyabort +#undef YYERROR +#define YYERROR return yyerrok, yyerr +#undef YYRECOVERING +#define YYRECOVERING() (yystackp->yyerrState != 0) +#undef yyclearin +#define yyclearin (yychar = YYEMPTY) +#undef YYFILL +#define YYFILL(N) yyfill(yyvsp, &yylow, N, yynormal) +#undef YYBACKUP +#define YYBACKUP(Token, Value) \ + return yyerror(result, YY_("syntax error: cannot back up")), yyerrok, yyerr yylow = 1; if (yyrhslen == 0) *yyvalp = yyval_default; else - *yyvalp = yyvsp[YYFILL (1-yyrhslen)].yystate.yysemantics.yysval; - switch (yyn) + *yyvalp = yyvsp[YYFILL(1 - yyrhslen)].yystate.yysemantics.yysval; + switch (yyn) { + case 2: +/* Line 868 of glr.c */ +#line 443 "sql.ypp" { - case 2: + printf("> \n"); + ((*yyvalp).ast_node) = new AstStmtList( + AST_STMT_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.ast_node), + NULL); + if (result->error_number == 0) { + result->ast = ((*yyvalp).ast_node); + } else { + result->ast = NULL; + } + + } break; + + case 3: /* Line 868 of glr.c */ -#line 436 "sql.ypp" - { - printf("> \n"); - ((*yyvalp).ast_node)=new AstStmtList(AST_STMT_LIST,(((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.ast_node),NULL); - if (result->error_number == 0) - { - result->ast = ((*yyvalp).ast_node); - } - else - { - result->ast = NULL; - } - - } - break; - - case 3: -/* Line 868 of glr.c */ -#line 450 "sql.ypp" - { - printf(">> \n"); - ((*yyvalp).ast_node)=new AstStmtList(AST_STMT_LIST,(((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node)); - if (result->error_number == 0) - { - result->ast = ((*yyvalp).ast_node); - } - else - { - result->ast = NULL; - } - } - break; - - case 4: -/* Line 868 of glr.c */ -#line 463 "sql.ypp" - { - printf(">> \n"); - result->ast = NULL; - result->error_number++; - //yyerror(result,"First statement discarded, input new statement"); - yyclearin; - yyerrok; - } - break; - - case 5: -/* Line 868 of glr.c */ -#line 472 "sql.ypp" - { - printf(">> \n"); - result->ast = NULL; - result->error_number++; - yyclearin; - yyerrok; - } - break; - - case 6: -/* Line 868 of glr.c */ -#line 483 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node);} - break; - - case 7: -/* Line 868 of glr.c */ -#line 488 "sql.ypp" - { - ((*yyvalp).ast_node)=new AstSelectStmt(AST_SELECT_STMT, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.intval),(((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node),NULL,NULL,NULL,NULL,NULL,NULL,NULL); - } - break; - - case 8: -/* Line 868 of glr.c */ -#line 493 "sql.ypp" - { - ((*yyvalp).ast_node)=new AstSelectStmt(AST_SELECT_STMT, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (11))].yystate.yysemantics.yysval.intval),(((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (11))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (11))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((6) - (11))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (11))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((8) - (11))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((9) - (11))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((10) - (11))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((11) - (11))].yystate.yysemantics.yysval.ast_node)); - } - break; - - case 9: -/* Line 868 of glr.c */ -#line 498 "sql.ypp" - { ((*yyvalp).ast_node) = NULL;} - break; - - case 10: -/* Line 868 of glr.c */ -#line 499 "sql.ypp" - { ((*yyvalp).ast_node) = new AstWhereClause(AST_WHERE_CLAUSE, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.ast_node));} - break; - - case 11: -/* Line 868 of glr.c */ -#line 502 "sql.ypp" - { ((*yyvalp).ast_node) = NULL;} - break; - - case 12: -/* Line 868 of glr.c */ -#line 503 "sql.ypp" - { ((*yyvalp).ast_node) = new AstGroupByClause(AST_GROUPBY_CLAUSE, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.intval));} - break; - - case 13: +#line 457 "sql.ypp" + { + printf(">> \n"); + ((*yyvalp).ast_node) = new AstStmtList( + AST_STMT_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + if (result->error_number == 0) { + result->ast = ((*yyvalp).ast_node); + } else { + result->ast = NULL; + } + } break; + + case 4: /* Line 868 of glr.c */ -#line 508 "sql.ypp" - { ((*yyvalp).ast_node)=new AstGroupByList(AST_GROUPBY_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node), NULL); } - break; +#line 470 "sql.ypp" + { + printf(">> \n"); + result->ast = NULL; + result->error_number++; + // yyerror(result,"First statement discarded, input new statement"); + yyclearin; + yyerrok; + } break; - case 14: + case 5: /* Line 868 of glr.c */ -#line 509 "sql.ypp" - { ((*yyvalp).ast_node)=new AstGroupByList(AST_GROUPBY_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 479 "sql.ypp" + { + printf(">> \n"); + result->ast = NULL; + result->error_number++; + yyclearin; + yyerrok; + } break; - case 15: + case 6: /* Line 868 of glr.c */ -#line 511 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; +#line 490 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 16: + case 7: /* Line 868 of glr.c */ -#line 512 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; +#line 495 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstSelectStmt( + AST_SELECT_STMT, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node), + NULL, NULL, NULL, NULL, NULL, NULL, NULL); + } break; - case 17: + case 8: /* Line 868 of glr.c */ -#line 513 "sql.ypp" - { ((*yyvalp).intval) = 1; } - break; +#line 500 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstSelectStmt( + AST_SELECT_STMT, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (11))] + .yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (11))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (11))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((6) - (11))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((7) - (11))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((8) - (11))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((9) - (11))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((10) - (11))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((11) - (11))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 9: +/* Line 868 of glr.c */ +#line 505 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; - case 18: + case 10: /* Line 868 of glr.c */ -#line 516 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; +#line 506 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstWhereClause( + AST_WHERE_CLAUSE, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (2))] + .yystate.yysemantics.yysval.ast_node)); + } break; - case 19: + case 11: /* Line 868 of glr.c */ -#line 517 "sql.ypp" - { ((*yyvalp).intval) = 1; } - break; +#line 509 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; - case 20: + case 12: /* Line 868 of glr.c */ -#line 520 "sql.ypp" - { ((*yyvalp).ast_node)=NULL; } - break; +#line 510 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstGroupByClause( + AST_GROUPBY_CLAUSE, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (4))].yystate.yysemantics.yysval.intval)); + } break; - case 21: + case 13: /* Line 868 of glr.c */ -#line 521 "sql.ypp" - { ((*yyvalp).ast_node)=new AstHavingClause(AST_HAVING_CLAUSE, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 515 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstGroupByList( + AST_GROUPBY_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 22: + case 14: /* Line 868 of glr.c */ -#line 524 "sql.ypp" - { ((*yyvalp).ast_node)=NULL; } - break; +#line 516 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstGroupByList( + AST_GROUPBY_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 23: + case 15: /* Line 868 of glr.c */ -#line 525 "sql.ypp" - { ((*yyvalp).ast_node)=new AstOrderByClause(AST_ORDERBY_CLAUSE, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 518 "sql.ypp" + { + ((*yyvalp).intval) = 0; + } break; - case 24: + case 16: /* Line 868 of glr.c */ -#line 528 "sql.ypp" - { ((*yyvalp).ast_node)=new AstOrderByList(AST_ORDERBY_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.intval), NULL);} - break; +#line 519 "sql.ypp" + { + ((*yyvalp).intval) = 0; + } break; - case 25: + case 17: /* Line 868 of glr.c */ -#line 529 "sql.ypp" - { ((*yyvalp).ast_node)=new AstOrderByList(AST_ORDERBY_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (4))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (4))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 520 "sql.ypp" + { + ((*yyvalp).intval) = 1; + } break; - case 26: + case 18: /* Line 868 of glr.c */ -#line 531 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; +#line 523 "sql.ypp" + { + ((*yyvalp).intval) = 0; + } break; - case 27: + case 19: /* Line 868 of glr.c */ -#line 532 "sql.ypp" - { ((*yyvalp).ast_node)=new AstLimitClause(AST_LIMIT_CLAUSE, NULL,(((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.ast_node));} - break; +#line 524 "sql.ypp" + { + ((*yyvalp).intval) = 1; + } break; - case 28: + case 20: /* Line 868 of glr.c */ -#line 533 "sql.ypp" - { ((*yyvalp).ast_node)=new AstLimitClause(AST_LIMIT_CLAUSE, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (4))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.ast_node));} - break; +#line 527 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; - case 29: + case 21: /* Line 868 of glr.c */ -#line 536 "sql.ypp" - { ((*yyvalp).ast_node)=NULL; } - break; +#line 528 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstHavingClause( + AST_HAVING_CLAUSE, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (2))] + .yystate.yysemantics.yysval.ast_node)); + } break; - case 30: + case 22: /* Line 868 of glr.c */ -#line 537 "sql.ypp" - { ((*yyvalp).ast_node)=NULL;} - break; +#line 531 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; - case 31: + case 23: /* Line 868 of glr.c */ -#line 541 "sql.ypp" - { ((*yyvalp).ast_node) = new AstColumn(AST_COLUMN, string("NULL"), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.strval)));} - break; +#line 532 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstOrderByClause( + AST_ORDERBY_CLAUSE, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 32: + case 24: /* Line 868 of glr.c */ -#line 542 "sql.ypp" - { ((*yyvalp).ast_node) = new AstColumn(AST_COLUMN, string("NULL"), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node));} - break; +#line 535 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstOrderByList( + AST_ORDERBY_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (2))].yystate.yysemantics.yysval.intval), + NULL); + } break; - case 33: + case 25: /* Line 868 of glr.c */ -#line 545 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; +#line 536 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstOrderByList( + AST_ORDERBY_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (4))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (4))].yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 26: +/* Line 868 of glr.c */ +#line 538 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; - case 34: + case 27: /* Line 868 of glr.c */ -#line 546 "sql.ypp" - { if(((*yyvalp).intval) & 1) yyerror(result,"duplicate ALL option"); ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 1; } - break; +#line 539 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstLimitClause( + AST_LIMIT_CLAUSE, NULL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (2))].yystate.yysemantics.yysval.ast_node)); + } break; - case 35: + case 28: /* Line 868 of glr.c */ -#line 547 "sql.ypp" - { if(((*yyvalp).intval) & 2) yyerror(result,"duplicate DISTINCT option"); ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 2; } - break; +#line 540 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstLimitClause( + AST_LIMIT_CLAUSE, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (4))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 36: + case 29: /* Line 868 of glr.c */ -#line 548 "sql.ypp" - { if(((*yyvalp).intval) & 4) yyerror(result,"duplicate DISTINCTROW option"); ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 4; } - break; +#line 543 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; - case 37: + case 30: /* Line 868 of glr.c */ -#line 549 "sql.ypp" - { if(((*yyvalp).intval) & 8) yyerror(result,"duplicate HIGH_PRIORITY option"); ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 8; } - break; +#line 544 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; - case 38: + case 31: /* Line 868 of glr.c */ -#line 550 "sql.ypp" - { if(((*yyvalp).intval) & 16) yyerror(result,"duplicate STRAIGHT_JOIN option"); ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 16; } - break; +#line 548 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstColumn( + AST_COLUMN, string("NULL"), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.strval))); + } break; - case 39: + case 32: /* Line 868 of glr.c */ -#line 551 "sql.ypp" - { if(((*yyvalp).intval) & 32) yyerror(result,"duplicate SQL_SMALL_RESULT option"); ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 32; } - break; +#line 549 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstColumn( + AST_COLUMN, string("NULL"), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 40: + case 33: /* Line 868 of glr.c */ #line 552 "sql.ypp" - { if(((*yyvalp).intval) & 64) yyerror(result,"duplicate SQL_BIG_RESULT option"); ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 64; } - break; + { + ((*yyvalp).intval) = 0; + } break; - case 41: + case 34: /* Line 868 of glr.c */ #line 553 "sql.ypp" - { if(((*yyvalp).intval) & 128) yyerror(result,"duplicate SQL_CALC_FOUND_ROWS option"); ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 128; } - break; + { + if (((*yyvalp).intval) & 1) yyerror(result, "duplicate ALL option"); + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 1; + } break; - case 42: + case 35: /* Line 868 of glr.c */ -#line 557 "sql.ypp" - { ((*yyvalp).ast_node) = new AstSelectList(AST_SELECT_LIST, 0,(((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node),NULL);} - break; +#line 554 "sql.ypp" + { + if (((*yyvalp).intval) & 2) yyerror(result, "duplicate DISTINCT option"); + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 2; + } break; - case 43: + case 36: /* Line 868 of glr.c */ -#line 558 "sql.ypp" - { ((*yyvalp).ast_node) = new AstSelectList(AST_SELECT_LIST, 0,(((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node));} - break; +#line 555 "sql.ypp" + { + if (((*yyvalp).intval) & 4) + yyerror(result, "duplicate DISTINCTROW option"); + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 4; + } break; - case 44: + case 37: /* Line 868 of glr.c */ -#line 559 "sql.ypp" - { ((*yyvalp).ast_node) = new AstSelectList(AST_SELECT_LIST, 1,NULL,NULL);} - break; +#line 556 "sql.ypp" + { + if (((*yyvalp).intval) & 8) + yyerror(result, "duplicate HIGH_PRIORITY option"); + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 8; + } break; - case 45: + case 38: /* Line 868 of glr.c */ -#line 563 "sql.ypp" - {((*yyvalp).ast_node) = new AstSelectExpr(AST_SELECT_EXPR, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.strval)),(((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.ast_node));} - break; +#line 557 "sql.ypp" + { + if (((*yyvalp).intval) & 16) + yyerror(result, "duplicate STRAIGHT_JOIN option"); + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 16; + } break; - case 46: + case 39: /* Line 868 of glr.c */ -#line 566 "sql.ypp" - { ((*yyvalp).ast_node) = new AstFromList(AST_FROM_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node), NULL); } - break; +#line 558 "sql.ypp" + { + if (((*yyvalp).intval) & 32) + yyerror(result, "duplicate SQL_SMALL_RESULT option"); + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 32; + } break; - case 47: + case 40: /* Line 868 of glr.c */ -#line 567 "sql.ypp" - { ((*yyvalp).ast_node) = new AstFromList(AST_FROM_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node));} - break; +#line 559 "sql.ypp" + { + if (((*yyvalp).intval) & 64) + yyerror(result, "duplicate SQL_BIG_RESULT option"); + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 64; + } break; - case 48: + case 41: /* Line 868 of glr.c */ -#line 571 "sql.ypp" - { ((*yyvalp).ast_node)=(((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node); } - break; +#line 560 "sql.ypp" + { + if (((*yyvalp).intval) & 128) + yyerror(result, "duplicate SQL_CALC_FOUND_ROWS option"); + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 128; + } break; - case 49: + case 42: /* Line 868 of glr.c */ -#line 572 "sql.ypp" - { ((*yyvalp).ast_node)=(((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node); } - break; +#line 564 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstSelectList( + AST_SELECT_LIST, 0, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (1))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 50: + case 43: /* Line 868 of glr.c */ -#line 577 "sql.ypp" - { ((*yyvalp).ast_node) = new AstTable(AST_TABLE, string("NULL"),string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.strval)),string((((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.strval)));} - break; +#line 565 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstSelectList( + AST_SELECT_LIST, 0, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 51: + case 44: /* Line 868 of glr.c */ -#line 579 "sql.ypp" - { ((*yyvalp).ast_node) = new AstTable(AST_TABLE, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (4))].yystate.yysemantics.yysval.strval)),string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.strval)),string((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.strval))); } - break; +#line 566 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstSelectList(AST_SELECT_LIST, 1, NULL, NULL); + } break; - case 52: + case 45: /* Line 868 of glr.c */ -#line 580 "sql.ypp" - { ((*yyvalp).ast_node) = new AstSubquery(AST_SUBQUERY, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.strval)),(((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 570 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstSelectExpr( + AST_SELECT_EXPR, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (2))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (2))].yystate.yysemantics.yysval.ast_node)); + } break; - case 53: + case 46: /* Line 868 of glr.c */ -#line 581 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node); } - break; +#line 573 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstFromList( + AST_FROM_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 56: + case 47: /* Line 868 of glr.c */ -#line 588 "sql.ypp" - { ((*yyvalp).strval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.strval); } - break; +#line 574 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstFromList( + AST_FROM_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 57: + case 48: /* Line 868 of glr.c */ -#line 589 "sql.ypp" - { ((*yyvalp).strval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.strval); } - break; +#line 578 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 58: + case 49: /* Line 868 of glr.c */ -#line 590 "sql.ypp" - { ((*yyvalp).strval) = "NULL"; } - break; +#line 579 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 59: + case 50: /* Line 868 of glr.c */ -#line 604 "sql.ypp" - { ((*yyvalp).ast_node) = new AstJoin(AST_JOIN, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (5))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.ast_node));} - break; +#line 584 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstTable( + AST_TABLE, string("NULL"), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (2))] + .yystate.yysemantics.yysval.strval))); + } break; - case 60: + case 51: /* Line 868 of glr.c */ -#line 605 "sql.ypp" - { ((*yyvalp).ast_node) = new AstJoin(AST_JOIN, -1, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node), NULL);} - break; +#line 586 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstTable( + AST_TABLE, string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (4))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (4))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (4))] + .yystate.yysemantics.yysval.strval))); + } break; + + case 52: +/* Line 868 of glr.c */ +#line 587 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstSubquery( + AST_SUBQUERY, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (3))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 61: + case 53: /* Line 868 of glr.c */ -#line 606 "sql.ypp" - { ((*yyvalp).ast_node) = new AstJoin(AST_JOIN, -1, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.ast_node));} - break; +#line 588 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 62: + case 56: /* Line 868 of glr.c */ -#line 607 "sql.ypp" - { ((*yyvalp).ast_node) = new AstJoin(AST_JOIN, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (6))].yystate.yysemantics.yysval.intval) + (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (6))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((6) - (6))].yystate.yysemantics.yysval.ast_node));} - break; +#line 595 "sql.ypp" + { + ((*yyvalp).strval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (2))] + .yystate.yysemantics.yysval.strval); + } break; - case 63: + case 57: /* Line 868 of glr.c */ -#line 608 "sql.ypp" - { ((*yyvalp).ast_node) = new AstJoin(AST_JOIN, 32 + (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (5))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.ast_node), NULL);} - break; +#line 596 "sql.ypp" + { + ((*yyvalp).strval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.strval); + } break; - case 64: + case 58: /* Line 868 of glr.c */ -#line 611 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; +#line 597 "sql.ypp" + { + ((*yyvalp).strval) = "NULL"; + } break; - case 65: + case 59: /* Line 868 of glr.c */ #line 612 "sql.ypp" - { ((*yyvalp).intval) = 1; } - break; - - case 66: + { + ((*yyvalp).ast_node) = new AstJoin( + AST_JOIN, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (5))] + .yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 60: /* Line 868 of glr.c */ #line 613 "sql.ypp" - { ((*yyvalp).intval) = 2; } - break; + { + ((*yyvalp).ast_node) = new AstJoin( + AST_JOIN, -1, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 67: + case 61: +/* Line 868 of glr.c */ +#line 614 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstJoin( + AST_JOIN, -1, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (5))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 62: +/* Line 868 of glr.c */ +#line 615 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstJoin( + AST_JOIN, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (6))].yystate.yysemantics.yysval.intval) + + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (6))].yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((6) - (6))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 63: /* Line 868 of glr.c */ #line 616 "sql.ypp" - { ((*yyvalp).intval) = 4; } - break; + { + ((*yyvalp).ast_node) = new AstJoin( + AST_JOIN, 32 + (((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (5))] + .yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (5))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; + + case 64: +/* Line 868 of glr.c */ +#line 619 "sql.ypp" + { + ((*yyvalp).intval) = 0; + } break; - case 68: + case 65: /* Line 868 of glr.c */ -#line 617 "sql.ypp" - { ((*yyvalp).intval) = 4; } - break; +#line 620 "sql.ypp" + { + ((*yyvalp).intval) = 1; + } break; - case 69: + case 66: /* Line 868 of glr.c */ #line 621 "sql.ypp" - { ((*yyvalp).intval) = 8; } - break; + { + ((*yyvalp).intval) = 2; + } break; - case 70: + case 67: /* Line 868 of glr.c */ -#line 622 "sql.ypp" - { ((*yyvalp).intval) = 16; } - break; +#line 624 "sql.ypp" + { + ((*yyvalp).intval) = 4; + } break; - case 71: + case 68: /* Line 868 of glr.c */ -#line 626 "sql.ypp" - { ((*yyvalp).intval) = 8 + (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.intval); } - break; +#line 625 "sql.ypp" + { + ((*yyvalp).intval) = 4; + } break; - case 72: + case 69: /* Line 868 of glr.c */ -#line 627 "sql.ypp" - { ((*yyvalp).intval) = 16 + (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.intval); } - break; +#line 629 "sql.ypp" + { + ((*yyvalp).intval) = 8; + } break; - case 73: + case 70: /* Line 868 of glr.c */ -#line 628 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; +#line 630 "sql.ypp" + { + ((*yyvalp).intval) = 16; + } break; - case 74: + case 71: /* Line 868 of glr.c */ #line 631 "sql.ypp" - {((*yyvalp).ast_node) = NULL;} - break; - - case 75: -/* Line 868 of glr.c */ -#line 632 "sql.ypp" - {((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node);} - break; - - case 76: -/* Line 868 of glr.c */ -#line 640 "sql.ypp" - {((*yyvalp).ast_node) = new AstJoinCondition(AST_JOIN_CONDITION, "ON", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 77: -/* Line 868 of glr.c */ -#line 641 "sql.ypp" - {((*yyvalp).ast_node) = new AstJoinCondition(AST_JOIN_CONDITION, "USING", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 78: -/* Line 868 of glr.c */ -#line 646 "sql.ypp" - { ((*yyvalp).ast_node)=(((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node); } - break; + { + ((*yyvalp).intval) = 64; + } break; - case 79: + case 72: /* Line 868 of glr.c */ -#line 653 "sql.ypp" - { ((*yyvalp).ast_node) = new AstColumn(AST_COLUMN, string("NULL"),string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.strval)));} - break; +#line 635 "sql.ypp" + { + ((*yyvalp).intval) = + 8 + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (2))].yystate.yysemantics.yysval.intval); + } break; - case 80: + case 73: /* Line 868 of glr.c */ -#line 654 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "CONST",string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.strval))); } - break; +#line 636 "sql.ypp" + { + ((*yyvalp).intval) = + 16 + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (2))].yystate.yysemantics.yysval.intval); + } break; - case 81: + case 74: /* Line 868 of glr.c */ -#line 655 "sql.ypp" - { ((*yyvalp).ast_node) = new AstColumn(AST_COLUMN, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.strval)),string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.strval))); } - break; +#line 637 "sql.ypp" + { + ((*yyvalp).intval) = + 64 + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (2))].yystate.yysemantics.yysval.intval); + } break; - case 82: + case 75: /* Line 868 of glr.c */ -#line 656 "sql.ypp" - { ((*yyvalp).ast_node) = new AstColumn(AST_COLUMN_ALL, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.strval)),string("*"));} - break; +#line 638 "sql.ypp" + { + ((*yyvalp).intval) = 0; + } break; - case 83: + case 76: /* Line 868 of glr.c */ -#line 657 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "CONST_STRING", string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.strval))); } - break; +#line 641 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; - case 84: + case 77: /* Line 868 of glr.c */ -#line 658 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "CONST_INT", string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.strval))); } - break; +#line 642 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 85: + case 78: /* Line 868 of glr.c */ -#line 659 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "CONST_DOUBLE", string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.strval))); } - break; +#line 650 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstJoinCondition( + AST_JOIN_CONDITION, "ON", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (2))].yystate.yysemantics.yysval.ast_node)); + } break; - case 86: + case 79: /* Line 868 of glr.c */ -#line 660 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "CONST_BOOL", string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.strval))); } - break; +#line 651 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstJoinCondition( + AST_JOIN_CONDITION, "USING", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 87: + case 80: /* Line 868 of glr.c */ -#line 662 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, "+", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 656 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 88: + case 81: /* Line 868 of glr.c */ #line 663 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, "-", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstColumn( + AST_COLUMN, string("NULL"), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.strval))); + } break; - case 89: + case 82: /* Line 868 of glr.c */ #line 664 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, "*", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprConst( + AST_EXPR_CONST, "CONST", + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.strval))); + } break; - case 90: + case 83: /* Line 868 of glr.c */ #line 665 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, "/", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstColumn( + AST_COLUMN, string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (3))] + .yystate.yysemantics.yysval.strval))); + } break; - case 91: + case 84: /* Line 868 of glr.c */ #line 666 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, "MOD", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstColumn( + AST_COLUMN_ALL, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.strval)), + string("*")); + } break; - case 92: + case 85: /* Line 868 of glr.c */ #line 667 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, "%", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprConst( + AST_EXPR_CONST, "CONST_STRING", + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.strval))); + } break; + + case 86: +/* Line 868 of glr.c */ +#line 668 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprConst( + AST_EXPR_CONST, "CONST_INT", + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.strval))); + } break; - case 93: + case 87: /* Line 868 of glr.c */ #line 669 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "-", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprConst( + AST_EXPR_CONST, "CONST_DOUBLE", + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.strval))); + } break; - case 94: + case 88: /* Line 868 of glr.c */ #line 670 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "+", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprConst( + AST_EXPR_CONST, "CONST_BOOL", + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.strval))); + } break; - case 95: + case 89: /* Line 868 of glr.c */ #line 672 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_BOOL_BINARY, "AND", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, "+", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 96: + case 90: /* Line 868 of glr.c */ #line 673 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_BOOL_BINARY, "OR", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, "-", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 97: + case 91: /* Line 868 of glr.c */ #line 674 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_BOOL_BINARY, "XOR", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, "*", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 98: + case 92: /* Line 868 of glr.c */ -#line 676 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCmpBinary(AST_EXPR_CMP_BINARY, "", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.subtok), (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 675 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, "/", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 99: + case 93: /* Line 868 of glr.c */ -#line 677 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCmpBinary(AST_EXPR_CMP_BINARY, "SUBQUERY", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (5))].yystate.yysemantics.yysval.subtok), (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 676 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, "MOD", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 100: + case 94: /* Line 868 of glr.c */ -#line 678 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCmpBinary(AST_EXPR_CMP_BINARY, "ANY", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (6))].yystate.yysemantics.yysval.subtok), (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 677 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, "%", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 101: + case 95: /* Line 868 of glr.c */ #line 679 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCmpBinary(AST_EXPR_CMP_BINARY, "SOME", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (6))].yystate.yysemantics.yysval.subtok), (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "-", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (2))].yystate.yysemantics.yysval.ast_node)); + } break; - case 102: + case 96: /* Line 868 of glr.c */ #line 680 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCmpBinary(AST_EXPR_CMP_BINARY, "ALL", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (6))].yystate.yysemantics.yysval.subtok), (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "+", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (2))].yystate.yysemantics.yysval.ast_node)); + } break; - case 103: + case 97: /* Line 868 of glr.c */ #line 682 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, "|", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_BOOL_BINARY, "AND", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 104: + case 98: /* Line 868 of glr.c */ #line 683 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, "&", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_BOOL_BINARY, "OR", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 105: + case 99: /* Line 868 of glr.c */ #line 684 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, "^", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_BOOL_BINARY, "XOR", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 106: + case 100: /* Line 868 of glr.c */ -#line 685 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, SHIFT==1?"LSHIFT":"RSHIFT",(((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 107: +#line 686 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprCmpBinary( + AST_EXPR_CMP_BINARY, "", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.subtok), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 101: /* Line 868 of glr.c */ #line 687 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "!", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 108: + { + ((*yyvalp).ast_node) = new AstExprCmpBinary( + AST_EXPR_CMP_BINARY, "SUBQUERY", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (5))].yystate.yysemantics.yysval.subtok), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 102: /* Line 868 of glr.c */ #line 688 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "NOT", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 109: + { + ((*yyvalp).ast_node) = new AstExprCmpBinary( + AST_EXPR_CMP_BINARY, "ANY", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (6))].yystate.yysemantics.yysval.subtok), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (6))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 103: /* Line 868 of glr.c */ #line 689 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; - - case 110: + { + ((*yyvalp).ast_node) = new AstExprCmpBinary( + AST_EXPR_CMP_BINARY, "SOME", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (6))].yystate.yysemantics.yysval.subtok), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (6))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 104: /* Line 868 of glr.c */ #line 690 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node); } - break; + { + ((*yyvalp).ast_node) = new AstExprCmpBinary( + AST_EXPR_CMP_BINARY, "ALL", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (6))].yystate.yysemantics.yysval.subtok), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (6))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 105: +/* Line 868 of glr.c */ +#line 692 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, "|", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 111: + case 106: /* Line 868 of glr.c */ #line 693 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "IS_NULL", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, "&", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 112: + case 107: /* Line 868 of glr.c */ #line 694 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "IS_NOT_NULL", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, "^", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 113: + case 108: /* Line 868 of glr.c */ #line 695 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "IS_BOOL", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, SHIFT == 1 ? "LSHIFT" : "RSHIFT", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 109: +/* Line 868 of glr.c */ +#line 697 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "!", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (2))].yystate.yysemantics.yysval.ast_node)); + } break; - case 114: + case 110: /* Line 868 of glr.c */ -#line 696 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "IS_NOT_BOOL", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 698 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "NOT", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (2))].yystate.yysemantics.yysval.ast_node)); + } break; - case 115: + case 111: /* Line 868 of glr.c */ #line 699 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "BETWEEN_AND", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 116: + case 112: /* Line 868 of glr.c */ -#line 702 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprList(AST_EXPR_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node), NULL);} - break; +#line 700 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 117: + case 113: /* Line 868 of glr.c */ #line 703 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprList(AST_EXPR_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "IS_NULL", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 118: + case 114: /* Line 868 of glr.c */ #line 704 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node); } - break; + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "IS_NOT_NULL", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 119: + case 115: /* Line 868 of glr.c */ -#line 707 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprList(AST_EXPR_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node), NULL); } - break; +#line 705 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "IS_BOOL", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 120: + case 116: /* Line 868 of glr.c */ -#line 708 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprList(AST_EXPR_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 706 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "IS_NOT_BOOL", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 121: + case 117: /* Line 868 of glr.c */ -#line 711 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCmpBinary(AST_EXPR_CMP_BINARY, "EXPR_IN_LIST", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 122: +#line 709 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "BETWEEN_AND", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 118: /* Line 868 of glr.c */ #line 712 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCmpBinary(AST_EXPR_CMP_BINARY, "LIST_IN_LIST", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (7))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((6) - (7))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprList( + AST_EXPR_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 123: + case 119: /* Line 868 of glr.c */ #line 713 "sql.ypp" - { AstNode* tmp_node= new AstExprCmpBinary(AST_EXPR_CMP_BINARY, "EXPR_IN_LIST", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.ast_node)); - ((*yyvalp).ast_node)=new AstExprUnary(AST_EXPR_UNARY, "NOT", tmp_node); } - break; + { + ((*yyvalp).ast_node) = new AstExprList( + AST_EXPR_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 124: + case 120: /* Line 868 of glr.c */ -#line 715 "sql.ypp" - { AstNode* tmp_node= new AstExprCmpBinary(AST_EXPR_CMP_BINARY, "LIST_IN_LIST", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (8))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (8))].yystate.yysemantics.yysval.ast_node)); - ((*yyvalp).ast_node)=new AstExprUnary(AST_EXPR_UNARY, "NOT", tmp_node); } - break; +#line 714 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 125: + case 121: /* Line 868 of glr.c */ #line 717 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCmpBinary(AST_EXPR_CMP_BINARY, "EXPR_IN_SELECT", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprList( + AST_EXPR_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 126: + case 122: /* Line 868 of glr.c */ #line 718 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCmpBinary(AST_EXPR_CMP_BINARY, "LIST_IN_SELECT", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (7))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((6) - (7))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 127: -/* Line 868 of glr.c */ -#line 719 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCmpBinary(AST_EXPR_CMP_BINARY, "EXPR_NOT_IN_SELECT", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprList( + AST_EXPR_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 128: + case 123: /* Line 868 of glr.c */ -#line 720 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCmpBinary(AST_EXPR_CMP_BINARY, "LIST_NOT_IN_SELECT", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (8))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (8))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 721 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprCmpBinary( + AST_EXPR_CMP_BINARY, "EXPR_IN_LIST", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; - case 129: + case 124: /* Line 868 of glr.c */ #line 722 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "EXSIST", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprCmpBinary( + AST_EXPR_CMP_BINARY, "LIST_IN_LIST", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (7))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((6) - (7))].yystate.yysemantics.yysval.ast_node)); + } break; - case 130: + case 125: /* Line 868 of glr.c */ #line 723 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "NOT_EXSIST", (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 131: -/* Line 868 of glr.c */ -#line 733 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "COUNT_ALL", NULL); } - break; - - case 132: -/* Line 868 of glr.c */ -#line 734 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "COUNT", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; + { + AstNode *tmp_node = new AstExprCmpBinary( + AST_EXPR_CMP_BINARY, "EXPR_IN_LIST", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (6))].yystate.yysemantics.yysval.ast_node)); + ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "NOT", tmp_node); + } break; + + case 126: +/* Line 868 of glr.c */ +#line 725 "sql.ypp" + { + AstNode *tmp_node = new AstExprCmpBinary( + AST_EXPR_CMP_BINARY, "LIST_IN_LIST", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (8))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((7) - (8))].yystate.yysemantics.yysval.ast_node)); + ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "NOT", tmp_node); + } break; + + case 127: +/* Line 868 of glr.c */ +#line 727 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprCmpBinary( + AST_EXPR_CMP_BINARY, "EXPR_IN_SELECT", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; - case 133: + case 128: /* Line 868 of glr.c */ -#line 735 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "SUM", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 728 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprCmpBinary( + AST_EXPR_CMP_BINARY, "LIST_IN_SELECT", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (7))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((6) - (7))].yystate.yysemantics.yysval.ast_node)); + } break; - case 134: + case 129: /* Line 868 of glr.c */ -#line 736 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "AVG", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 729 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprCmpBinary( + AST_EXPR_CMP_BINARY, "EXPR_NOT_IN_SELECT", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (6))].yystate.yysemantics.yysval.ast_node)); + } break; - case 135: + case 130: /* Line 868 of glr.c */ -#line 737 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "MIN", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 730 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprCmpBinary( + AST_EXPR_CMP_BINARY, "LIST_NOT_IN_SELECT", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (8))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((7) - (8))].yystate.yysemantics.yysval.ast_node)); + } break; - case 136: + case 131: /* Line 868 of glr.c */ -#line 738 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprUnary(AST_EXPR_UNARY, "MAX", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 732 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "EXSIST", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 137: + case 132: /* Line 868 of glr.c */ -#line 742 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "SUBSTRING_EXPR_EXPR", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.ast_node), NULL); } - break; +#line 733 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "NOT_EXSIST", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; - case 138: + case 133: /* Line 868 of glr.c */ #line 743 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "SUBSTRING_EXPR_FROM_EXPR", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.ast_node), NULL);} - break; + { + ((*yyvalp).ast_node) = + new AstExprUnary(AST_EXPR_UNARY, "COUNT_ALL", NULL); + } break; - case 139: + case 134: /* Line 868 of glr.c */ #line 744 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "SUBSTRING_EXPR_EXPR_EXPR", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (8))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (8))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (8))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "COUNT", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 140: + case 135: /* Line 868 of glr.c */ #line 745 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "SUBSTRING_EXPR_FROM_EXPR_FOR_EXPR", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (8))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (8))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (8))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "SUM", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 141: + case 136: /* Line 868 of glr.c */ #line 746 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "TRIM_BOTH", NULL,(((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node), NULL); } - break; + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "AVG", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 142: + case 137: /* Line 868 of glr.c */ #line 747 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (7))].yystate.yysemantics.yysval.strval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (7))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((6) - (7))].yystate.yysemantics.yysval.ast_node), NULL); } - break; + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "MIN", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 143: + case 138: /* Line 868 of glr.c */ #line 748 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "UPPER", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node), NULL, NULL); } - break; - - case 144: -/* Line 868 of glr.c */ -#line 749 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "CAST", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.ast_node), NULL); } - break; + { + ((*yyvalp).ast_node) = new AstExprUnary( + AST_EXPR_UNARY, "MAX", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 145: + case 139: /* Line 868 of glr.c */ -#line 750 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "COALESCE", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node), NULL, NULL); } - break; +#line 752 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "SUBSTRING_EXPR_EXPR", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (6))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 146: + case 140: /* Line 868 of glr.c */ #line 753 "sql.ypp" - { ((*yyvalp).strval)="TRIM_LEADING"; } - break; + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "SUBSTRING_EXPR_FROM_EXPR", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (6))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 147: + case 141: /* Line 868 of glr.c */ #line 754 "sql.ypp" - { ((*yyvalp).strval)="TRIM_TRAILING"; } - break; - - case 148: + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "SUBSTRING_EXPR_EXPR_EXPR", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (8))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (8))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((7) - (8))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 142: /* Line 868 of glr.c */ #line 755 "sql.ypp" - { ((*yyvalp).strval)="TRIM_BOTH"; } - break; + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "SUBSTRING_EXPR_FROM_EXPR_FOR_EXPR", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (8))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (8))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((7) - (8))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 143: +/* Line 868 of glr.c */ +#line 756 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "TRIM_BOTH", NULL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 149: + case 144: +/* Line 868 of glr.c */ +#line 757 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, (((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (7))] + .yystate.yysemantics.yysval.strval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (7))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((6) - (7))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; + + case 145: /* Line 868 of glr.c */ #line 758 "sql.ypp" - { ((*yyvalp).ast_node) = NULL;} - break; + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "UPPER", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node), + NULL, NULL); + } break; - case 150: + case 146: /* Line 868 of glr.c */ #line 759 "sql.ypp" - {((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "INT", NULL);} - break; + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "CAST", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (6))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 151: + case 147: /* Line 868 of glr.c */ #line 760 "sql.ypp" - {((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "STRING", NULL);} - break; + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "COALESCE", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node), + NULL, NULL); + } break; + + case 148: +/* Line 868 of glr.c */ +#line 763 "sql.ypp" + { + ((*yyvalp).strval) = "TRIM_LEADING"; + } break; - case 152: + case 149: /* Line 868 of glr.c */ -#line 761 "sql.ypp" - {((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "DOUBLE", NULL);} - break; +#line 764 "sql.ypp" + { + ((*yyvalp).strval) = "TRIM_TRAILING"; + } break; - case 153: + case 150: /* Line 868 of glr.c */ -#line 762 "sql.ypp" - {((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "FLOAT", NULL);} - break; +#line 765 "sql.ypp" + { + ((*yyvalp).strval) = "TRIM_BOTH"; + } break; - case 154: + case 151: /* Line 868 of glr.c */ -#line 763 "sql.ypp" - {((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "CHAR", NULL);} - break; +#line 768 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; - case 155: + case 152: /* Line 868 of glr.c */ -#line 766 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "DATE_ADD", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.ast_node), NULL); } - break; +#line 769 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "INT", NULL); + } break; - case 156: + case 153: /* Line 868 of glr.c */ -#line 767 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "DATE_SUB", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.ast_node), NULL); } - break; +#line 770 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "STRING", NULL); + } break; - case 157: + case 154: /* Line 868 of glr.c */ #line 771 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "INTERVAL_HOUR", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), NULL, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "DOUBLE", NULL); + } break; - case 158: + case 155: /* Line 868 of glr.c */ #line 772 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "INTERVAL_MICROSECOND", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), NULL, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "FLOAT", NULL); + } break; - case 159: + case 156: /* Line 868 of glr.c */ #line 773 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "INTERVAL_MINUTE", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), NULL, NULL); } - break; - - case 160: -/* Line 868 of glr.c */ -#line 774 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "INTERVAL_SECOND", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), NULL, NULL); } - break; - - case 161: -/* Line 868 of glr.c */ -#line 775 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "INTERVAL_DAY", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), NULL, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstExprConst(AST_EXPR_CONST, "CHAR", NULL); + } break; - case 162: + case 157: /* Line 868 of glr.c */ #line 776 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "INTERVAL_MONTH", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), NULL, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "DATE_ADD", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (6))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 163: + case 158: /* Line 868 of glr.c */ #line 777 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "INTERVAL_YEAR", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), NULL, NULL); } - break; - - case 164: -/* Line 868 of glr.c */ -#line 778 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "INTERVAL_WEEK", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), NULL, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "DATE_SUB", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (6))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; + + case 159: +/* Line 868 of glr.c */ +#line 781 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "INTERVAL_HOUR", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.ast_node), + NULL, NULL); + } break; - case 165: + case 160: /* Line 868 of glr.c */ -#line 779 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "INTERVAL_QUARTER", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), NULL, NULL); } - break; +#line 782 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "INTERVAL_MICROSECOND", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.ast_node), + NULL, NULL); + } break; - case 166: + case 161: /* Line 868 of glr.c */ #line 783 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "CASE1", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (4))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node), NULL); } - break; + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "INTERVAL_MINUTE", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.ast_node), + NULL, NULL); + } break; - case 167: + case 162: /* Line 868 of glr.c */ #line 784 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "CASE1_ELSE", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (6))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "INTERVAL_SECOND", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.ast_node), + NULL, NULL); + } break; - case 168: + case 163: /* Line 868 of glr.c */ #line 785 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "CASE2", NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), NULL); } - break; + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "INTERVAL_DAY", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.ast_node), + NULL, NULL); + } break; - case 169: + case 164: /* Line 868 of glr.c */ #line 786 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "CASE2_ELSE", NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "INTERVAL_MONTH", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.ast_node), + NULL, NULL); + } break; - case 170: + case 165: /* Line 868 of glr.c */ -#line 789 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "WHEN", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (4))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.ast_node), NULL); } - break; +#line 787 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "INTERVAL_YEAR", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.ast_node), + NULL, NULL); + } break; + + case 166: +/* Line 868 of glr.c */ +#line 788 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "INTERVAL_WEEK", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.ast_node), + NULL, NULL); + } break; - case 171: + case 167: /* Line 868 of glr.c */ -#line 790 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprFunc(AST_EXPR_FUNC, "WHEN", (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 789 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "INTERVAL_QUARTER", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.ast_node), + NULL, NULL); + } break; - case 172: + case 168: /* Line 868 of glr.c */ #line 793 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, "LIKE", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "CASE1", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (4))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 173: + case 169: /* Line 868 of glr.c */ #line 794 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, "NOT_LIKE", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (4))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "CASE1_ELSE", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (6))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (6))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 170: +/* Line 868 of glr.c */ +#line 795 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "CASE2", NULL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 174: + case 171: /* Line 868 of glr.c */ -#line 797 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, "REGEXP", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 796 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "CASE2_ELSE", NULL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; - case 175: + case 172: /* Line 868 of glr.c */ -#line 798 "sql.ypp" - { ((*yyvalp).ast_node) = new AstExprCalBinary(AST_EXPR_CAL_BINARY, "NOT_REGEXP", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (4))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 799 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "WHEN", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (4))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (4))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; + + case 173: +/* Line 868 of glr.c */ +#line 800 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprFunc( + AST_EXPR_FUNC, "WHEN", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 174: +/* Line 868 of glr.c */ +#line 803 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, "LIKE", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 176: + case 175: /* Line 868 of glr.c */ #line 804 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node); } - break; - - case 177: -/* Line 868 of glr.c */ -#line 808 "sql.ypp" - { string temp = ((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.strval) == NULL)?"":string((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.strval)); ((*yyvalp).ast_node) = new AstCreateDatabase(AST_CREATE_DATABASE, 1, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.intval), temp); } - break; + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, "NOT_LIKE", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (4))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 178: + case 176: /* Line 868 of glr.c */ -#line 809 "sql.ypp" - { string temp = ((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.strval) == NULL)?"":string((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.strval)); ((*yyvalp).ast_node) = new AstCreateDatabase(AST_CREATE_SCHEMA, 2, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.intval), temp); } - break; +#line 807 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, "REGEXP", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 179: + case 177: /* Line 868 of glr.c */ -#line 812 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; +#line 808 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstExprCalBinary( + AST_EXPR_CAL_BINARY, "NOT_REGEXP", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (4))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 180: + case 178: /* Line 868 of glr.c */ -#line 813 "sql.ypp" - { ((*yyvalp).intval) = 1; } - break; +#line 814 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 181: + case 179: /* Line 868 of glr.c */ #line 818 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node);} - break; - - case 182: + { + string temp = + ((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (4))] + .yystate.yysemantics.yysval.strval) == NULL) + ? "" + : string((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (4))] + .yystate.yysemantics.yysval.strval)); + ((*yyvalp).ast_node) = new AstCreateDatabase( + AST_CREATE_DATABASE, 1, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.intval), + temp); + } break; + + case 180: +/* Line 868 of glr.c */ +#line 819 "sql.ypp" + { + string temp = + ((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (4))] + .yystate.yysemantics.yysval.strval) == NULL) + ? "" + : string((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (4))] + .yystate.yysemantics.yysval.strval)); + ((*yyvalp).ast_node) = new AstCreateDatabase( + AST_CREATE_SCHEMA, 2, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.intval), + temp); + } break; + + case 181: /* Line 868 of glr.c */ #line 822 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateTable(AST_CREATE_TABLE_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (8))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (8))].yystate.yysemantics.yysval.intval), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (8))].yystate.yysemantics.yysval.strval)), "", (((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (8))].yystate.yysemantics.yysval.ast_node), NULL); } - break; - - case 183: -/* Line 868 of glr.c */ -#line 826 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateTable(AST_CREATE_TABLE_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (10))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (10))].yystate.yysemantics.yysval.intval), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (10))].yystate.yysemantics.yysval.strval)), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (10))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((9) - (10))].yystate.yysemantics.yysval.ast_node), NULL); } - break; - - case 184: -/* Line 868 of glr.c */ -#line 831 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateTable(AST_CREATE_TABLE_LIST_SEL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (9))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (9))].yystate.yysemantics.yysval.intval), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (9))].yystate.yysemantics.yysval.strval)), "", (((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (9))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((9) - (9))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 185: -/* Line 868 of glr.c */ -#line 835 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateTable(AST_CREATE_TABLE_SEL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (6))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (6))].yystate.yysemantics.yysval.intval), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.strval)), "", NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((6) - (6))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 186: -/* Line 868 of glr.c */ -#line 840 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateTable(AST_CREATE_TABLE_LIST_SEL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (11))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (11))].yystate.yysemantics.yysval.intval), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (11))].yystate.yysemantics.yysval.strval)), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (11))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((9) - (11))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((11) - (11))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 187: -/* Line 868 of glr.c */ -#line 844 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateTable(AST_CREATE_TABLE_SEL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (8))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (8))].yystate.yysemantics.yysval.intval), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (8))].yystate.yysemantics.yysval.strval)), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (8))].yystate.yysemantics.yysval.strval)), NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((8) - (8))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 188: -/* Line 868 of glr.c */ -#line 848 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateSelect(AST_CREATE_SEL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.intval), 0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 189: -/* Line 868 of glr.c */ -#line 851 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; + { + ((*yyvalp).intval) = 0; + } break; - case 190: + case 182: /* Line 868 of glr.c */ -#line 852 "sql.ypp" - { ((*yyvalp).intval) = 1; } - break; +#line 823 "sql.ypp" + { + ((*yyvalp).intval) = 1; + } break; - case 191: + case 183: /* Line 868 of glr.c */ -#line 853 "sql.ypp" - { ((*yyvalp).intval) = 2; } - break; +#line 828 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 192: + case 184: /* Line 868 of glr.c */ -#line 856 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; +#line 832 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstCreateTable( + AST_CREATE_TABLE_LIST, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (8))].yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (8))].yystate.yysemantics.yysval.intval), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((5) - (8))] + .yystate.yysemantics.yysval.strval)), + "", (((yyGLRStackItem const *) + yyvsp)[YYFILL((7) - (8))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; + + case 185: +/* Line 868 of glr.c */ +#line 836 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstCreateTable( + AST_CREATE_TABLE_LIST, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (10))].yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (10))].yystate.yysemantics.yysval.intval), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((5) - (10))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((7) - (10))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((9) - (10))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; + + case 186: +/* Line 868 of glr.c */ +#line 841 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstCreateTable( + AST_CREATE_TABLE_LIST_SEL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (9))].yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (9))].yystate.yysemantics.yysval.intval), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((5) - (9))] + .yystate.yysemantics.yysval.strval)), + "", (((yyGLRStackItem const *) + yyvsp)[YYFILL((7) - (9))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((9) - (9))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 187: +/* Line 868 of glr.c */ +#line 845 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstCreateTable( + AST_CREATE_TABLE_SEL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (6))].yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (6))].yystate.yysemantics.yysval.intval), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((5) - (6))] + .yystate.yysemantics.yysval.strval)), + "", NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL((6) - (6))] + .yystate.yysemantics.yysval.ast_node)); + } break; + + case 188: +/* Line 868 of glr.c */ +#line 850 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstCreateTable( + AST_CREATE_TABLE_LIST_SEL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (11))].yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (11))].yystate.yysemantics.yysval.intval), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((5) - (11))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((7) - (11))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((9) - (11))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((11) - (11))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 189: +/* Line 868 of glr.c */ +#line 854 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstCreateTable( + AST_CREATE_TABLE_SEL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (8))].yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (8))].yystate.yysemantics.yysval.intval), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((5) - (8))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((7) - (8))] + .yystate.yysemantics.yysval.strval)), + NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL((8) - (8))] + .yystate.yysemantics.yysval.ast_node)); + } break; + + case 190: +/* Line 868 of glr.c */ +#line 858 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstCreateSelect( + AST_CREATE_SEL, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.intval), + 0, (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 193: + case 191: /* Line 868 of glr.c */ -#line 857 "sql.ypp" - { ((*yyvalp).intval) = 1; } - break; +#line 861 "sql.ypp" + { + ((*yyvalp).intval) = 0; + } break; - case 194: + case 192: /* Line 868 of glr.c */ -#line 860 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateColList(AST_CREATE_COL_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node), NULL); } - break; +#line 862 "sql.ypp" + { + ((*yyvalp).intval) = 1; + } break; - case 195: + case 193: /* Line 868 of glr.c */ -#line 861 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateColList(AST_CREATE_COL_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 863 "sql.ypp" + { + ((*yyvalp).intval) = 2; + } break; - case 196: + case 194: /* Line 868 of glr.c */ #line 866 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateDef( AST_CREATE_DEF_NAME, 1, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node), NULL); } - break; + { + ((*yyvalp).intval) = 0; + } break; - case 197: + case 195: /* Line 868 of glr.c */ #line 867 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateDef( AST_CREATE_DEF_PR_KEY, 2, "", NULL, NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 198: -/* Line 868 of glr.c */ -#line 868 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateDef( AST_CREATE_DEF_KEY, 3, "", NULL, NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 199: -/* Line 868 of glr.c */ -#line 869 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateDef( AST_CREATE_DEF_INDEX, 4, "", NULL, NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).intval) = 1; + } break; - case 200: + case 196: /* Line 868 of glr.c */ #line 870 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateDef( AST_CREATE_DEF_FTEXT_INDEX, 5, "", NULL, NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = new AstCreateColList( + AST_CREATE_COL_LIST, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (1))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 201: + case 197: /* Line 868 of glr.c */ #line 871 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateDef( AST_CREATE_DEF_FTEXT_KEY, 6, "", NULL, NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 202: -/* Line 868 of glr.c */ -#line 873 "sql.ypp" - { ((*yyvalp).ast_node) = new AstColumnAtts(AST_COLUMN_ATTS, 0, 0, 0, "", NULL); } - break; - - case 203: -/* Line 868 of glr.c */ -#line 874 "sql.ypp" - { AstColumnAtts* temp = static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node));temp->datatype_ |= 01; ((*yyvalp).ast_node) = temp; } - break; - - case 204: -/* Line 868 of glr.c */ -#line 875 "sql.ypp" - { AstColumnAtts* temp = static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.ast_node));temp->datatype_ |= 02; ((*yyvalp).ast_node) = temp; } - break; + { + ((*yyvalp).ast_node) = new AstCreateColList( + AST_CREATE_COL_LIST, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 205: + case 198: /* Line 868 of glr.c */ #line 876 "sql.ypp" - { AstColumnAtts* temp = static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node));temp->datatype_ |= 04; temp->default_string_ = ((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.strval)==NULL)?"":string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.strval)); ((*yyvalp).ast_node) = temp; } - break; - - case 206: + { + ((*yyvalp).ast_node) = new AstCreateDef( + AST_CREATE_DEF_NAME, 1, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; + + case 199: /* Line 868 of glr.c */ #line 877 "sql.ypp" - { AstColumnAtts* temp = static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node));temp->datatype_ |= 010;temp->int_num_ = atoi((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.strval)); ((*yyvalp).ast_node) = temp; } - break; + { + ((*yyvalp).ast_node) = new AstCreateDef( + AST_CREATE_DEF_PR_KEY, 2, "", NULL, NULL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; - case 207: + case 200: /* Line 868 of glr.c */ #line 878 "sql.ypp" - { AstColumnAtts* temp = static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node));temp->datatype_ |= 020; temp->double_num_ = atof((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.strval)); ((*yyvalp).ast_node) = temp; } - break; + { + ((*yyvalp).ast_node) = new AstCreateDef( + AST_CREATE_DEF_KEY, 3, "", NULL, NULL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 208: + case 201: /* Line 868 of glr.c */ #line 879 "sql.ypp" - { AstColumnAtts* temp = static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node));temp->datatype_ |= 040; temp->int_num_ = atoi((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.strval)); ((*yyvalp).ast_node) = temp; } - break; + { + ((*yyvalp).ast_node) = new AstCreateDef( + AST_CREATE_DEF_INDEX, 4, "", NULL, NULL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.ast_node)); + } break; - case 209: + case 202: /* Line 868 of glr.c */ #line 880 "sql.ypp" - { AstColumnAtts* temp = static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.ast_node));temp->datatype_ |= 0100; ((*yyvalp).ast_node) = temp; } - break; + { + ((*yyvalp).ast_node) = new AstCreateDef( + AST_CREATE_DEF_FTEXT_INDEX, 5, "", NULL, NULL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; - case 210: + case 203: /* Line 868 of glr.c */ #line 881 "sql.ypp" - { AstColumnAtts* temp = static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node));temp->datatype_ |= 0200; ((*yyvalp).ast_node) = temp; } - break; - - case 211: -/* Line 868 of glr.c */ -#line 882 "sql.ypp" - { AstColumnAtts* temp = static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node));temp->datatype_ |= 0400; ((*yyvalp).ast_node) = temp; } - break; + { + ((*yyvalp).ast_node) = new AstCreateDef( + AST_CREATE_DEF_FTEXT_KEY, 6, "", NULL, NULL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; - case 212: + case 204: /* Line 868 of glr.c */ #line 883 "sql.ypp" - { AstColumnAtts* temp = static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.ast_node));temp->datatype_ |= 01000; ((*yyvalp).ast_node) = temp; } - break; + { + ((*yyvalp).ast_node) = + new AstColumnAtts(AST_COLUMN_ATTS, 0, 0, 0, "", NULL); + } break; - case 213: + case 205: /* Line 868 of glr.c */ #line 884 "sql.ypp" - { AstColumnAtts* temp = static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node));temp->datatype_ |= 02000; ((*yyvalp).ast_node) = temp; } - break; + { + AstColumnAtts *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + temp->datatype_ |= 01; + ((*yyvalp).ast_node) = temp; + } break; - case 214: + case 206: /* Line 868 of glr.c */ #line 885 "sql.ypp" - { AstColumnAtts* temp = static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node));temp->datatype_ |= 04000; temp->col_list_ = (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.ast_node); ((*yyvalp).ast_node) = temp;} - break; + { + AstColumnAtts *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (2))].yystate.yysemantics.yysval.ast_node)); + temp->datatype_ |= 02; + ((*yyvalp).ast_node) = temp; + } break; - case 215: + case 207: +/* Line 868 of glr.c */ +#line 886 "sql.ypp" + { + AstColumnAtts *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + temp->datatype_ |= 04; + temp->default_string_ = + ((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (3))] + .yystate.yysemantics.yysval.strval) == NULL) + ? "" + : string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (3))] + .yystate.yysemantics.yysval.strval)); + ((*yyvalp).ast_node) = temp; + } break; + + case 208: +/* Line 868 of glr.c */ +#line 887 "sql.ypp" + { + AstColumnAtts *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + temp->datatype_ |= 010; + temp->int_num_ = atoi((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (3))] + .yystate.yysemantics.yysval.strval)); + ((*yyvalp).ast_node) = temp; + } break; + + case 209: +/* Line 868 of glr.c */ +#line 888 "sql.ypp" + { + AstColumnAtts *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + temp->datatype_ |= 020; + temp->double_num_ = + atof((((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.strval)); + ((*yyvalp).ast_node) = temp; + } break; + + case 210: /* Line 868 of glr.c */ #line 889 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 1, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.ast_node), 0, NULL, 0, NULL); } - break; + { + AstColumnAtts *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + temp->datatype_ |= 040; + temp->int_num_ = atoi((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (3))] + .yystate.yysemantics.yysval.strval)); + ((*yyvalp).ast_node) = temp; + } break; - case 216: + case 211: /* Line 868 of glr.c */ #line 890 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 2, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.intval), NULL, 0, NULL); } - break; + { + AstColumnAtts *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (2))].yystate.yysemantics.yysval.ast_node)); + temp->datatype_ |= 0100; + ((*yyvalp).ast_node) = temp; + } break; - case 217: + case 212: /* Line 868 of glr.c */ #line 891 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 3, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.intval), NULL, 0, NULL); } - break; + { + AstColumnAtts *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + temp->datatype_ |= 0200; + ((*yyvalp).ast_node) = temp; + } break; - case 218: + case 213: /* Line 868 of glr.c */ #line 892 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 4, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.intval), NULL, 0, NULL); } - break; + { + AstColumnAtts *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + temp->datatype_ |= 0400; + ((*yyvalp).ast_node) = temp; + } break; - case 219: + case 214: /* Line 868 of glr.c */ #line 893 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 5, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.intval), NULL, 0, NULL); } - break; + { + AstColumnAtts *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (2))].yystate.yysemantics.yysval.ast_node)); + temp->datatype_ |= 01000; + ((*yyvalp).ast_node) = temp; + } break; - case 220: + case 215: /* Line 868 of glr.c */ #line 894 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 6, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.intval), NULL, 0, NULL); } - break; + { + AstColumnAtts *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + temp->datatype_ |= 02000; + ((*yyvalp).ast_node) = temp; + } break; - case 221: + case 216: /* Line 868 of glr.c */ #line 895 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 7, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.intval), NULL, 0, NULL); } - break; - - case 222: -/* Line 868 of glr.c */ -#line 896 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 8, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.intval), NULL, 0, NULL); } - break; - - case 223: -/* Line 868 of glr.c */ -#line 897 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 9, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.intval), NULL, 0, NULL); } - break; - - case 224: -/* Line 868 of glr.c */ -#line 898 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 10, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.intval), NULL, 0, NULL); } - break; + { + AstColumnAtts *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (5))].yystate.yysemantics.yysval.ast_node)); + temp->datatype_ |= 04000; + temp->col_list_ = (((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (5))] + .yystate.yysemantics.yysval.ast_node); + ((*yyvalp).ast_node) = temp; + } break; - case 225: + case 217: /* Line 868 of glr.c */ #line 899 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 11, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.intval), NULL, 0, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 1, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (2))] + .yystate.yysemantics.yysval.ast_node), + 0, NULL, 0, NULL); + } break; - case 226: + case 218: /* Line 868 of glr.c */ #line 900 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 12, NULL, 0, NULL, 0, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 2, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.intval), + NULL, 0, NULL); + } break; - case 227: + case 219: /* Line 868 of glr.c */ #line 901 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 13, NULL, 0, NULL, 0, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 3, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.intval), + NULL, 0, NULL); + } break; - case 228: + case 220: /* Line 868 of glr.c */ #line 902 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 14, NULL, 0, NULL, 0, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 4, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.intval), + NULL, 0, NULL); + } break; - case 229: + case 221: /* Line 868 of glr.c */ #line 903 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 15, NULL, 0, NULL, 0, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 5, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.intval), + NULL, 0, NULL); + } break; - case 230: + case 222: /* Line 868 of glr.c */ #line 904 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 16, NULL, 0, NULL, 0, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 6, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.intval), + NULL, 0, NULL); + } break; - case 231: + case 223: /* Line 868 of glr.c */ #line 905 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 17, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), 0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node), 0, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 7, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.intval), + NULL, 0, NULL); + } break; - case 232: + case 224: /* Line 868 of glr.c */ #line 906 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 18, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), 0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node), 0, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 8, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.intval), + NULL, 0, NULL); + } break; - case 233: + case 225: /* Line 868 of glr.c */ #line 907 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 19, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.ast_node), 0, NULL, 0, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 9, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.intval), + NULL, 0, NULL); + } break; - case 234: + case 226: /* Line 868 of glr.c */ #line 908 "sql.ypp" - { AstNode* temp = new AstOptLength(AST_OPT_LENGTH,atoi((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.strval)),NULL); - ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 20, temp, 0, NULL, 0, NULL); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 10, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.intval), + NULL, 0, NULL); + } break; + + case 227: +/* Line 868 of glr.c */ +#line 909 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 11, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.intval), + NULL, 0, NULL); + } break; - case 235: + case 228: /* Line 868 of glr.c */ #line 910 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 21, NULL, 0, NULL, 0, NULL); } - break; + { + ((*yyvalp).ast_node) = + new AstDataType(AST_DATA_TYPE, 12, NULL, 0, NULL, 0, NULL); + } break; - case 236: + case 229: /* Line 868 of glr.c */ #line 911 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 22, NULL, 0, NULL, 0, NULL); } - break; + { + ((*yyvalp).ast_node) = + new AstDataType(AST_DATA_TYPE, 13, NULL, 0, NULL, 0, NULL); + } break; - case 237: + case 230: /* Line 868 of glr.c */ #line 912 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 23, NULL, 0, NULL, 0, NULL); } - break; + { + ((*yyvalp).ast_node) = + new AstDataType(AST_DATA_TYPE, 14, NULL, 0, NULL, 0, NULL); + } break; - case 238: + case 231: /* Line 868 of glr.c */ #line 913 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 24, NULL, 0, NULL, 0, NULL); } - break; + { + ((*yyvalp).ast_node) = + new AstDataType(AST_DATA_TYPE, 15, NULL, 0, NULL, 0, NULL); + } break; - case 239: + case 232: /* Line 868 of glr.c */ #line 914 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 25, NULL, 0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.intval), NULL); } - break; + { + ((*yyvalp).ast_node) = + new AstDataType(AST_DATA_TYPE, 16, NULL, 0, NULL, 0, NULL); + } break; - case 240: + case 233: /* Line 868 of glr.c */ #line 915 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 26, NULL, 0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.intval), NULL); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 17, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node), + 0, (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node), + 0, NULL); + } break; - case 241: + case 234: /* Line 868 of glr.c */ #line 916 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 27, NULL, 0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.intval), NULL); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 18, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node), + 0, (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node), + 0, NULL); + } break; - case 242: + case 235: /* Line 868 of glr.c */ #line 917 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 28, NULL, 0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.intval), NULL); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 19, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (2))] + .yystate.yysemantics.yysval.ast_node), + 0, NULL, 0, NULL); + } break; - case 243: + case 236: /* Line 868 of glr.c */ #line 918 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 29, NULL, 0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.ast_node), 0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 244: -/* Line 868 of glr.c */ -#line 919 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDataType (AST_DATA_TYPE, 30, NULL, 0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.ast_node), 0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; + { + AstNode *temp = new AstOptLength( + AST_OPT_LENGTH, + atoi((((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.strval)), + NULL); + ((*yyvalp).ast_node) = + new AstDataType(AST_DATA_TYPE, 20, temp, 0, NULL, 0, NULL); + } break; + + case 237: +/* Line 868 of glr.c */ +#line 920 "sql.ypp" + { + ((*yyvalp).ast_node) = + new AstDataType(AST_DATA_TYPE, 21, NULL, 0, NULL, 0, NULL); + } break; - case 245: + case 238: /* Line 868 of glr.c */ #line 921 "sql.ypp" - { ((*yyvalp).ast_node) = new AstEnumList( AST_ENUM, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.strval)), NULL); } - break; + { + ((*yyvalp).ast_node) = + new AstDataType(AST_DATA_TYPE, 22, NULL, 0, NULL, 0, NULL); + } break; - case 246: + case 239: /* Line 868 of glr.c */ #line 922 "sql.ypp" - { ((*yyvalp).ast_node) = new AstEnumList( AST_ENUM_LIST, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = + new AstDataType(AST_DATA_TYPE, 23, NULL, 0, NULL, 0, NULL); + } break; + + case 240: +/* Line 868 of glr.c */ +#line 923 "sql.ypp" + { + ((*yyvalp).ast_node) = + new AstDataType(AST_DATA_TYPE, 24, NULL, 0, NULL, 0, NULL); + } break; + + case 241: +/* Line 868 of glr.c */ +#line 924 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 25, NULL, 0, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.intval), + NULL); + } break; - case 247: + case 242: /* Line 868 of glr.c */ #line 925 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 26, NULL, 0, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.intval), + NULL); + } break; - case 248: + case 243: /* Line 868 of glr.c */ #line 926 "sql.ypp" - { ((*yyvalp).ast_node) = new AstOptLength (AST_OPT_LENGTH, atoi((((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.strval)), 0); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 27, NULL, 0, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.intval), + NULL); + } break; - case 249: + case 244: /* Line 868 of glr.c */ #line 927 "sql.ypp" - { ((*yyvalp).ast_node) = new AstOptLength (AST_OPT_LENGTH, atoi((((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (5))].yystate.yysemantics.yysval.strval)), atoi((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.strval))); } - break; + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 28, NULL, 0, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.intval), + NULL); + } break; + + case 245: +/* Line 868 of glr.c */ +#line 928 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 29, NULL, 0, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (5))].yystate.yysemantics.yysval.ast_node), + 0, (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; - case 250: + case 246: /* Line 868 of glr.c */ -#line 930 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; +#line 929 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstDataType( + AST_DATA_TYPE, 30, NULL, 0, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (5))].yystate.yysemantics.yysval.ast_node), + 0, (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; - case 251: + case 247: /* Line 868 of glr.c */ #line 931 "sql.ypp" - { ((*yyvalp).intval) = 1; } - break; + { + ((*yyvalp).ast_node) = new AstEnumList( + AST_ENUM, string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.strval)), + NULL); + } break; - case 252: + case 248: /* Line 868 of glr.c */ -#line 934 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; +#line 932 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstEnumList( + AST_ENUM_LIST, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (3))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 253: + case 249: /* Line 868 of glr.c */ #line 935 "sql.ypp" - { ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 01; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 254: + case 250: /* Line 868 of glr.c */ #line 936 "sql.ypp" - { ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 02; } - break; + { + ((*yyvalp).ast_node) = new AstOptLength( + AST_OPT_LENGTH, + atoi((((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.strval)), + 0); + } break; - case 255: + case 251: /* Line 868 of glr.c */ -#line 939 "sql.ypp" - { ((*yyvalp).ast_node) =new AstOptCsc(AST_OPT_CSC, 0, "", "");} - break; +#line 937 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstOptLength( + AST_OPT_LENGTH, + atoi((((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (5))].yystate.yysemantics.yysval.strval)), + atoi((((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (5))].yystate.yysemantics.yysval.strval))); + } break; - case 256: + case 252: /* Line 868 of glr.c */ -#line 941 "sql.ypp" - { AstOptCsc* temp=static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (4))].yystate.yysemantics.yysval.ast_node));temp->data_type_ |= 01; temp->str1_ = string((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.strval)); ((*yyvalp).ast_node) = temp; } - break; +#line 940 "sql.ypp" + { + ((*yyvalp).intval) = 0; + } break; - case 257: + case 253: /* Line 868 of glr.c */ -#line 943 "sql.ypp" - { AstOptCsc* temp=static_cast((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node));temp->data_type_ |= 02; temp->str2_ = string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.strval)); ((*yyvalp).ast_node) = temp; } - break; +#line 941 "sql.ypp" + { + ((*yyvalp).intval) = 1; + } break; - case 258: + case 254: /* Line 868 of glr.c */ -#line 947 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node);/* puts("SQL parser: This is a create_projection statement");*/ } - break; +#line 944 "sql.ypp" + { + ((*yyvalp).intval) = 0; + } break; - case 259: + case 255: /* Line 868 of glr.c */ -#line 951 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateProjection(AST_CREATE_PROJECTION, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (10))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((6) - (10))].yystate.yysemantics.yysval.ast_node), 1, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((10) - (10))].yystate.yysemantics.yysval.strval))); } - break; +#line 945 "sql.ypp" + { + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 01; + } break; - case 260: + case 256: /* Line 868 of glr.c */ -#line 953 "sql.ypp" - { - if ((((yyGLRStackItem const *)yyvsp)[YYFILL ((9) - (13))].yystate.yysemantics.yysval.subtok) != 4) { yyerror(result,"please give a specific number"); } - else { ((*yyvalp).ast_node) = new AstCreateProjection(AST_CREATE_PROJECTION_NUM, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (13))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((6) - (13))].yystate.yysemantics.yysval.ast_node), atoi((((yyGLRStackItem const *)yyvsp)[YYFILL ((10) - (13))].yystate.yysemantics.yysval.strval)), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((13) - (13))].yystate.yysemantics.yysval.strval))); } - } - break; +#line 946 "sql.ypp" + { + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 02; + } break; - case 261: + case 257: /* Line 868 of glr.c */ -#line 960 "sql.ypp" - { ((*yyvalp).ast_node)=(((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node);} - break; +#line 949 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstOptCsc(AST_OPT_CSC, 0, "", ""); + } break; - case 262: + case 258: /* Line 868 of glr.c */ -#line 965 "sql.ypp" - { ((*yyvalp).ast_node) = new AstCreateIndex(AST_CREATE_INDEX, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (10))].yystate.yysemantics.yysval.intval), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (10))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (10))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (10))].yystate.yysemantics.yysval.strval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((9) - (10))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 951 "sql.ypp" + { + AstOptCsc *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (4))].yystate.yysemantics.yysval.ast_node)); + temp->data_type_ |= 01; + temp->str1_ = string((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (4))] + .yystate.yysemantics.yysval.strval)); + ((*yyvalp).ast_node) = temp; + } break; - case 263: + case 259: /* Line 868 of glr.c */ -#line 968 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; +#line 953 "sql.ypp" + { + AstOptCsc *temp = static_cast( + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + temp->data_type_ |= 02; + temp->str2_ = string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (3))] + .yystate.yysemantics.yysval.strval)); + ((*yyvalp).ast_node) = temp; + } break; + + case 260: +/* Line 868 of glr.c */ +#line 957 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval + .ast_node); /* puts("SQL parser: This is a + create_projection + statement");*/ + } break; - case 264: + case 261: /* Line 868 of glr.c */ -#line 969 "sql.ypp" - { ((*yyvalp).intval) = 1; } - break; +#line 961 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstCreateProjection( + AST_CREATE_PROJECTION, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (10))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((6) - (10))].yystate.yysemantics.yysval.ast_node), + 1, string((((yyGLRStackItem const *)yyvsp)[YYFILL((10) - (10))] + .yystate.yysemantics.yysval.strval))); + } break; + + case 262: +/* Line 868 of glr.c */ +#line 963 "sql.ypp" + { + if ((((yyGLRStackItem const *)yyvsp)[YYFILL((9) - (13))] + .yystate.yysemantics.yysval.subtok) != 4) { + yyerror(result, "please give a specific number"); + } else { + ((*yyvalp).ast_node) = new AstCreateProjection( + AST_CREATE_PROJECTION_NUM, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (13))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((6) - (13))].yystate.yysemantics.yysval.ast_node), + atoi((((yyGLRStackItem const *)yyvsp)[YYFILL((10) - (13))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((13) - (13))] + .yystate.yysemantics.yysval.strval))); + } + } break; - case 265: + case 263: /* Line 868 of glr.c */ #line 970 "sql.ypp" - { ((*yyvalp).intval) = 2; } - break; + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 266: -/* Line 868 of glr.c */ -#line 971 "sql.ypp" - { ((*yyvalp).intval) = 3; } - break; - - case 267: -/* Line 868 of glr.c */ -#line 974 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; - - case 268: + case 264: /* Line 868 of glr.c */ #line 975 "sql.ypp" - { ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.intval); } - break; - - case 269: + { + ((*yyvalp).ast_node) = new AstCreateIndex( + AST_CREATE_INDEX, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (10))] + .yystate.yysemantics.yysval.intval), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (10))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (10))].yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((7) - (10))].yystate.yysemantics.yysval.strval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((9) - (10))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 265: /* Line 868 of glr.c */ #line 978 "sql.ypp" - { ((*yyvalp).intval) = 1; } - break; + { + ((*yyvalp).intval) = 0; + } break; - case 270: + case 266: /* Line 868 of glr.c */ #line 979 "sql.ypp" - { ((*yyvalp).intval) = 2; } - break; - - case 271: -/* Line 868 of glr.c */ -#line 982 "sql.ypp" - { ((*yyvalp).ast_node) = new AstIndexColList(AST_INDEX_COL, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.intval), NULL); } - break; - - case 272: -/* Line 868 of glr.c */ -#line 983 "sql.ypp" - { ((*yyvalp).ast_node) = new AstIndexColList(AST_INDEX_COL_LIST, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (5))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 273: -/* Line 868 of glr.c */ -#line 987 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node);} - break; + { + ((*yyvalp).intval) = 1; + } break; - case 274: + case 267: /* Line 868 of glr.c */ -#line 991 "sql.ypp" +#line 980 "sql.ypp" { - if ((((yyGLRStackItem const *)yyvsp)[YYFILL ((11) - (12))].yystate.yysemantics.yysval.subtok) != 4) { yyerror(result,"please give a specific number"); } - else {((*yyvalp).ast_node) = new AstLoadTable(AST_LOAD_TABLE, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (12))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (12))].yystate.yysemantics.yysval.ast_node), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (12))].yystate.yysemantics.yysval.strval)), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((9) - (12))].yystate.yysemantics.yysval.strval)), atof((((yyGLRStackItem const *)yyvsp)[YYFILL ((12) - (12))].yystate.yysemantics.yysval.strval)), 0);} - } - break; + ((*yyvalp).intval) = 2; + } break; - case 275: + case 268: /* Line 868 of glr.c */ -#line 995 "sql.ypp" - { ((*yyvalp).ast_node) = new AstLoadTable(AST_LOAD_TABLE, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (9))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (9))].yystate.yysemantics.yysval.ast_node), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (9))].yystate.yysemantics.yysval.strval)), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((9) - (9))].yystate.yysemantics.yysval.strval)), 1.0, 0);} - break; +#line 981 "sql.ypp" + { + ((*yyvalp).intval) = 3; + } break; - case 276: + case 269: /* Line 868 of glr.c */ -#line 996 "sql.ypp" - { ((*yyvalp).ast_node) = new AstLoadTable(AST_LOAD_TABLE, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (9))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (9))].yystate.yysemantics.yysval.ast_node), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (9))].yystate.yysemantics.yysval.strval)), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((9) - (9))].yystate.yysemantics.yysval.strval)), 1.0, 1);} - break; +#line 984 "sql.ypp" + { + ((*yyvalp).intval) = 0; + } break; - case 277: + case 270: /* Line 868 of glr.c */ -#line 998 "sql.ypp" - { if ((((yyGLRStackItem const *)yyvsp)[YYFILL ((11) - (12))].yystate.yysemantics.yysval.subtok) != 4) { yyerror(result,"please give a specific number"); } - else {((*yyvalp).ast_node) = new AstLoadTable(AST_LOAD_TABLE, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (12))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (12))].yystate.yysemantics.yysval.ast_node), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (12))].yystate.yysemantics.yysval.strval)), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((9) - (12))].yystate.yysemantics.yysval.strval)), atof((((yyGLRStackItem const *)yyvsp)[YYFILL ((12) - (12))].yystate.yysemantics.yysval.strval)), 1);} - } - break; +#line 985 "sql.ypp" + { + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (2))] + .yystate.yysemantics.yysval.intval); + } break; - case 278: + case 271: /* Line 868 of glr.c */ -#line 1004 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node); /*output($$, 1); puts("SQL parser: This is a drop_index statement");*/ } - break; +#line 988 "sql.ypp" + { + ((*yyvalp).intval) = 1; + } break; - case 279: + case 272: /* Line 868 of glr.c */ -#line 1007 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDropIndex(AST_DROP_INDEX, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (5))].yystate.yysemantics.yysval.strval)), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.strval))); } - break; +#line 989 "sql.ypp" + { + ((*yyvalp).intval) = 2; + } break; - case 280: + case 273: /* Line 868 of glr.c */ -#line 1011 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node);/* output($$, 1); puts("SQL parser: This is a drop_database statement");*/ } - break; +#line 992 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstIndexColList( + AST_INDEX_COL, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.intval), + NULL); + } break; + + case 274: +/* Line 868 of glr.c */ +#line 993 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstIndexColList( + AST_INDEX_COL_LIST, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (5))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (5))].yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 275: +/* Line 868 of glr.c */ +#line 997 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 281: + case 276: /* Line 868 of glr.c */ -#line 1015 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDropDatabase(AST_DROP_DB, 1, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.intval), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.strval))); } - break; +#line 1001 "sql.ypp" + { + if ((((yyGLRStackItem const *)yyvsp)[YYFILL((11) - (12))] + .yystate.yysemantics.yysval.subtok) != 4) { + yyerror(result, "please give a specific number"); + } else { + ((*yyvalp).ast_node) = new AstLoadTable( + AST_LOAD_TABLE, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (12))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (12))].yystate.yysemantics.yysval.ast_node), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((7) - (12))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((9) - (12))] + .yystate.yysemantics.yysval.strval)), + atof((((yyGLRStackItem const *)yyvsp)[YYFILL((12) - (12))] + .yystate.yysemantics.yysval.strval)), + 1); + } + } break; - case 282: + case 277: /* Line 868 of glr.c */ -#line 1016 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDropDatabase(AST_DROP_SCHEMA, 2, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (4))].yystate.yysemantics.yysval.intval), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (4))].yystate.yysemantics.yysval.strval))); } - break; +#line 1005 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstLoadTable( + AST_LOAD_TABLE, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (9))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (9))].yystate.yysemantics.yysval.ast_node), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((7) - (9))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((9) - (9))] + .yystate.yysemantics.yysval.strval)), + 1.0, 1); + } break; + + case 278: +/* Line 868 of glr.c */ +#line 1006 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstLoadTable( + AST_LOAD_TABLE, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (9))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (9))].yystate.yysemantics.yysval.ast_node), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((7) - (9))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((9) - (9))] + .yystate.yysemantics.yysval.strval)), + 1.0, 2); + } break; + + case 279: +/* Line 868 of glr.c */ +#line 1008 "sql.ypp" + { + if ((((yyGLRStackItem const *)yyvsp)[YYFILL((11) - (12))] + .yystate.yysemantics.yysval.subtok) != 4) { + yyerror(result, "please give a specific number"); + } else { + ((*yyvalp).ast_node) = new AstLoadTable( + AST_LOAD_TABLE, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (12))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (12))].yystate.yysemantics.yysval.ast_node), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((7) - (12))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((9) - (12))] + .yystate.yysemantics.yysval.strval)), + atof((((yyGLRStackItem const *)yyvsp)[YYFILL((12) - (12))] + .yystate.yysemantics.yysval.strval)), + 2); + } + } break; - case 283: + case 280: /* Line 868 of glr.c */ -#line 1019 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; +#line 1014 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval + .ast_node); /*output($$, 1); puts("SQL + parser: This is a drop_index + statement");*/ + } break; - case 284: + case 281: /* Line 868 of glr.c */ -#line 1020 "sql.ypp" - { ((*yyvalp).intval) = 1; } - break; +#line 1017 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstDropIndex( + AST_DROP_INDEX, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (5))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((5) - (5))] + .yystate.yysemantics.yysval.strval))); + } break; - case 285: + case 282: /* Line 868 of glr.c */ -#line 1024 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node); /*output($$, 1); puts("SQL parser: This is a drop_table statement"); */} - break; +#line 1021 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval + .ast_node); /* output($$, 1); puts("SQL + parser: This is a + drop_database statement");*/ + } break; - case 286: + case 283: /* Line 868 of glr.c */ -#line 1028 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDropTable(AST_DROP_TABLE, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (6))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (6))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((6) - (6))].yystate.yysemantics.yysval.intval), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (6))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 1025 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstDropDatabase( + AST_DROP_DB, 1, (((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (4))] + .yystate.yysemantics.yysval.intval), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (4))] + .yystate.yysemantics.yysval.strval))); + } break; - case 287: + case 284: /* Line 868 of glr.c */ -#line 1031 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDropTableList(AST_DROP_TABLE_LIST, "", string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.strval)), NULL); } - break; +#line 1026 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstDropDatabase( + AST_DROP_SCHEMA, 2, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (4))].yystate.yysemantics.yysval.intval), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (4))] + .yystate.yysemantics.yysval.strval))); + } break; - case 288: + case 285: /* Line 868 of glr.c */ -#line 1032 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDropTableList(AST_DROP_TABLE_LIST, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.strval)), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.strval)), NULL); } - break; +#line 1029 "sql.ypp" + { + ((*yyvalp).intval) = 0; + } break; - case 289: + case 286: /* Line 868 of glr.c */ -#line 1033 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDropTableList(AST_DROP_TABLE_LIST, "", string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 1030 "sql.ypp" + { + ((*yyvalp).intval) = 1; + } break; - case 290: + case 287: /* Line 868 of glr.c */ #line 1034 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDropTableList(AST_DROP_TABLE_LIST, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (5))].yystate.yysemantics.yysval.strval)), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 291: -/* Line 868 of glr.c */ -#line 1037 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval + .ast_node); /*output($$, 1); puts("SQL + parser: This is a drop_table + statement"); */ + } break; - case 292: + case 288: /* Line 868 of glr.c */ #line 1038 "sql.ypp" - { ((*yyvalp).intval) = 1; } - break; + { + ((*yyvalp).ast_node) = new AstDropTable( + AST_DROP_TABLE, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (6))] + .yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (6))].yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((6) - (6))].yystate.yysemantics.yysval.intval), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (6))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 289: +/* Line 868 of glr.c */ +#line 1041 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstDropTableList( + AST_DROP_TABLE_LIST, "", + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.strval)), + NULL); + } break; - case 293: + case 290: /* Line 868 of glr.c */ -#line 1039 "sql.ypp" - { ((*yyvalp).intval) = 2; } - break; +#line 1042 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstDropTableList( + AST_DROP_TABLE_LIST, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (3))] + .yystate.yysemantics.yysval.strval)), + NULL); + } break; + + case 291: +/* Line 868 of glr.c */ +#line 1043 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstDropTableList( + AST_DROP_TABLE_LIST, "", + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (3))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 294: + case 292: /* Line 868 of glr.c */ -#line 1045 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node);} - break; +#line 1044 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstDropTableList( + AST_DROP_TABLE_LIST, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (5))] + .yystate.yysemantics.yysval.strval)), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((5) - (5))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 293: +/* Line 868 of glr.c */ +#line 1047 "sql.ypp" + { + ((*yyvalp).intval) = 0; + } break; - case 295: + case 294: /* Line 868 of glr.c */ -#line 1051 "sql.ypp" - { ((*yyvalp).ast_node) = new AstInsertStmt(AST_INSERT_STMT, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (8))].yystate.yysemantics.yysval.intval), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (8))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (8))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (8))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((8) - (8))].yystate.yysemantics.yysval.ast_node), NULL, NULL); } - break; +#line 1048 "sql.ypp" + { + ((*yyvalp).intval) = 1; + } break; - case 296: + case 295: /* Line 868 of glr.c */ -#line 1054 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; +#line 1049 "sql.ypp" + { + ((*yyvalp).intval) = 2; + } break; - case 297: + case 296: /* Line 868 of glr.c */ #line 1055 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.ast_node); } - break; - - case 298: -/* Line 868 of glr.c */ -#line 1058 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; - - case 299: -/* Line 868 of glr.c */ -#line 1059 "sql.ypp" - { ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 01 ; } - break; - - case 300: -/* Line 868 of glr.c */ -#line 1060 "sql.ypp" - { ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 02 ; } - break; + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 301: + case 297: /* Line 868 of glr.c */ #line 1061 "sql.ypp" - { ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 04 ; } - break; + { + ((*yyvalp).ast_node) = new AstInsertStmt( + AST_INSERT_STMT, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (8))] + .yystate.yysemantics.yysval.intval), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (8))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (8))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((7) - (8))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((8) - (8))].yystate.yysemantics.yysval.ast_node), + NULL, NULL); + } break; + + case 298: +/* Line 868 of glr.c */ +#line 1064 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; - case 302: + case 299: /* Line 868 of glr.c */ -#line 1062 "sql.ypp" - { ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.intval) | 010 ; } - break; +#line 1065 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((5) - (5))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 305: + case 300: /* Line 868 of glr.c */ #line 1068 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).intval) = 0; + } break; - case 306: + case 301: /* Line 868 of glr.c */ #line 1069 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node); } - break; + { + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 01; + } break; - case 307: + case 302: /* Line 868 of glr.c */ -#line 1072 "sql.ypp" - { ((*yyvalp).ast_node) = new AstInsertValList(AST_INSERT_VALUE_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.ast_node), NULL); } - break; +#line 1070 "sql.ypp" + { + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 02; + } break; - case 308: + case 303: /* Line 868 of glr.c */ -#line 1073 "sql.ypp" - { ((*yyvalp).ast_node) = new AstInsertValList(AST_INSERT_VALUE_LIST, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 1071 "sql.ypp" + { + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 04; + } break; - case 309: + case 304: /* Line 868 of glr.c */ -#line 1077 "sql.ypp" - { ((*yyvalp).ast_node) = new AstInsertVals(AST_INSERT_VALUE,0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node), NULL); } - break; +#line 1072 "sql.ypp" + { + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.intval) | + 010; + } break; - case 310: + case 307: /* Line 868 of glr.c */ #line 1078 "sql.ypp" - { ((*yyvalp).ast_node) = new AstInsertVals(AST_INSERT_VALUE,1, NULL, NULL); } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 311: + case 308: /* Line 868 of glr.c */ #line 1079 "sql.ypp" - { ((*yyvalp).ast_node) = new AstInsertVals(AST_INSERT_VALUE,0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; - - case 312: -/* Line 868 of glr.c */ -#line 1080 "sql.ypp" - { ((*yyvalp).ast_node) = new AstInsertVals(AST_INSERT_VALUE,1, NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node)); } - break; + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (3))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 313: + case 309: /* Line 868 of glr.c */ -#line 1084 "sql.ypp" - { ((*yyvalp).ast_node) = new AstInsertStmt(AST_INSERT_STMT, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (7))].yystate.yysemantics.yysval.intval), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (7))].yystate.yysemantics.yysval.strval)), NULL, NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (7))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((6) - (7))].yystate.yysemantics.yysval.ast_node), NULL); } - break; +#line 1082 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstInsertValList( + AST_INSERT_VALUE_LIST, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 314: + case 310: /* Line 868 of glr.c */ -#line 1088 "sql.ypp" - { ((*yyvalp).ast_node) = new AstInsertStmt(AST_INSERT_STMT, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (7))].yystate.yysemantics.yysval.intval), string((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (7))].yystate.yysemantics.yysval.strval)), (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (7))].yystate.yysemantics.yysval.ast_node), NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((7) - (7))].yystate.yysemantics.yysval.ast_node), NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((6) - (7))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 1083 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstInsertValList( + AST_INSERT_VALUE_LIST, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; - case 315: + case 311: /* Line 868 of glr.c */ -#line 1092 "sql.ypp" - { - if((((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.subtok) != 4) yyerror(result,"bad insert assignment to %s", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.strval)); - else ((*yyvalp).ast_node) = new AstInsertAssignList(AST_INSERT_ASSIGN_LIST, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.strval)), 0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (3))].yystate.yysemantics.yysval.ast_node), NULL); } - break; +#line 1087 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstInsertVals( + AST_INSERT_VALUE, 0, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (1))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; - case 316: + case 312: /* Line 868 of glr.c */ -#line 1095 "sql.ypp" - { - if ((((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (3))].yystate.yysemantics.yysval.subtok) != 4) yyerror(result,"bad insert assignment to %s", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.strval)); - else ((*yyvalp).ast_node) = new AstInsertAssignList(AST_INSERT_ASSIGN_LIST, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (3))].yystate.yysemantics.yysval.strval)), 1, NULL, NULL); } - break; +#line 1088 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstInsertVals(AST_INSERT_VALUE, 1, NULL, NULL); + } break; - case 317: + case 313: /* Line 868 of glr.c */ -#line 1098 "sql.ypp" - { - if ((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.subtok) != 4) yyerror(result,"bad insert assignment to %s", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node)); - else ((*yyvalp).ast_node) = new AstInsertAssignList(AST_INSERT_ASSIGN_LIST, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (5))].yystate.yysemantics.yysval.strval)), 0, (((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 1089 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstInsertVals( + AST_INSERT_VALUE, 0, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 318: + case 314: /* Line 868 of glr.c */ -#line 1101 "sql.ypp" - { - if ((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.subtok) != 4) yyerror(result,"bad insert assignment to %s", (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node)); - else ((*yyvalp).ast_node) = new AstInsertAssignList(AST_INSERT_ASSIGN_LIST, string((((yyGLRStackItem const *)yyvsp)[YYFILL ((3) - (5))].yystate.yysemantics.yysval.strval)), 1, NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (5))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 1090 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstInsertVals( + AST_INSERT_VALUE, 1, NULL, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node)); + } break; - case 319: + case 315: /* Line 868 of glr.c */ -#line 1106 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node); /*output($$, 1); puts("SQL parser: This is a show statement");*/ } - break; - - case 320: +#line 1094 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstInsertStmt( + AST_INSERT_STMT, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (7))] + .yystate.yysemantics.yysval.intval), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (7))] + .yystate.yysemantics.yysval.strval)), + NULL, NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL((7) - (7))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((6) - (7))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; + + case 316: /* Line 868 of glr.c */ -#line 1109 "sql.ypp" - { ((*yyvalp).ast_node) = new AstShowStmt(AST_SHOW_STMT,1, (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (5))].yystate.yysemantics.yysval.intval), ((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.intval)==NULL)?"":string((((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (5))].yystate.yysemantics.yysval.intval)), ((((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.strval)==NULL)?"":string((((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (5))].yystate.yysemantics.yysval.strval))); } - break; +#line 1098 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstInsertStmt( + AST_INSERT_STMT, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (7))] + .yystate.yysemantics.yysval.intval), + string((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (7))] + .yystate.yysemantics.yysval.strval)), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (7))].yystate.yysemantics.yysval.ast_node), + NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL((7) - (7))] + .yystate.yysemantics.yysval.ast_node), + NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL((6) - (7))] + .yystate.yysemantics.yysval.ast_node)); + } break; + + case 317: +/* Line 868 of glr.c */ +#line 1102 "sql.ypp" + { + if ((((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.subtok) != 4) + yyerror(result, "bad insert assignment to %s", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.strval)); + else + ((*yyvalp).ast_node) = new AstInsertAssignList( + AST_INSERT_ASSIGN_LIST, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.strval)), + 0, (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; + + case 318: +/* Line 868 of glr.c */ +#line 1105 "sql.ypp" + { + if ((((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (3))].yystate.yysemantics.yysval.subtok) != 4) + yyerror(result, "bad insert assignment to %s", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.strval)); + else + ((*yyvalp).ast_node) = new AstInsertAssignList( + AST_INSERT_ASSIGN_LIST, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (3))] + .yystate.yysemantics.yysval.strval)), + 1, NULL, NULL); + } break; - case 321: + case 319: /* Line 868 of glr.c */ -#line 1110 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; - - case 322: +#line 1108 "sql.ypp" + { + if ((((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (5))].yystate.yysemantics.yysval.subtok) != 4) + yyerror(result, "bad insert assignment to %s", + (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (5))] + .yystate.yysemantics.yysval.ast_node)); + else + ((*yyvalp).ast_node) = new AstInsertAssignList( + AST_INSERT_ASSIGN_LIST, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (5))] + .yystate.yysemantics.yysval.strval)), + 0, (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 320: /* Line 868 of glr.c */ #line 1111 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; - - case 323: -/* Line 868 of glr.c */ -#line 1112 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; - - case 324: -/* Line 868 of glr.c */ -#line 1113 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; - - case 325: -/* Line 868 of glr.c */ -#line 1114 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; - - case 326: -/* Line 868 of glr.c */ -#line 1115 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + if ((((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (5))].yystate.yysemantics.yysval.subtok) != 4) + yyerror(result, "bad insert assignment to %s", + (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (5))] + .yystate.yysemantics.yysval.ast_node)); + else + ((*yyvalp).ast_node) = new AstInsertAssignList( + AST_INSERT_ASSIGN_LIST, + string((((yyGLRStackItem const *)yyvsp)[YYFILL((3) - (5))] + .yystate.yysemantics.yysval.strval)), + 1, NULL, (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (5))] + .yystate.yysemantics.yysval.ast_node)); + } break; - case 327: + case 321: /* Line 868 of glr.c */ #line 1116 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; - - case 328: -/* Line 868 of glr.c */ -#line 1117 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; - - case 329: -/* Line 868 of glr.c */ -#line 1118 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval + .ast_node); /*output($$, 1); puts("SQL + parser: This is a show + statement");*/ + } break; - case 330: + case 322: /* Line 868 of glr.c */ #line 1119 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; - - case 331: + { + ((*yyvalp).ast_node) = new AstShowStmt( + AST_SHOW_STMT, 1, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (5))] + .yystate.yysemantics.yysval.intval), + ((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (5))] + .yystate.yysemantics.yysval.intval) == NULL) + ? "" + : string((((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (5))] + .yystate.yysemantics.yysval.intval)), + ((((yyGLRStackItem const *)yyvsp)[YYFILL((5) - (5))] + .yystate.yysemantics.yysval.strval) == NULL) + ? "" + : string((((yyGLRStackItem const *)yyvsp)[YYFILL((5) - (5))] + .yystate.yysemantics.yysval.strval))); + } break; + + case 323: /* Line 868 of glr.c */ #line 1120 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 332: + case 324: /* Line 868 of glr.c */ #line 1121 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 333: + case 325: /* Line 868 of glr.c */ #line 1122 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 334: + case 326: /* Line 868 of glr.c */ #line 1123 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 335: + case 327: /* Line 868 of glr.c */ #line 1124 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 336: + case 328: /* Line 868 of glr.c */ #line 1125 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 337: + case 329: /* Line 868 of glr.c */ #line 1126 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 338: + case 330: /* Line 868 of glr.c */ #line 1127 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 339: + case 331: /* Line 868 of glr.c */ #line 1128 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 340: + case 332: /* Line 868 of glr.c */ #line 1129 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 341: + case 333: /* Line 868 of glr.c */ #line 1130 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 342: + case 334: /* Line 868 of glr.c */ #line 1131 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 343: + case 335: /* Line 868 of glr.c */ #line 1132 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 344: + case 336: /* Line 868 of glr.c */ #line 1133 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 345: + case 337: /* Line 868 of glr.c */ #line 1134 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 346: + case 338: /* Line 868 of glr.c */ #line 1135 "sql.ypp" - { ((*yyvalp).ast_node) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; + + case 339: +/* Line 868 of glr.c */ +#line 1136 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; - case 347: + case 340: /* Line 868 of glr.c */ #line 1137 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 348: + case 341: /* Line 868 of glr.c */ #line 1138 "sql.ypp" - { ((*yyvalp).intval) = 1; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; + + case 342: +/* Line 868 of glr.c */ +#line 1139 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; + + case 343: +/* Line 868 of glr.c */ +#line 1140 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; - case 349: + case 344: /* Line 868 of glr.c */ #line 1141 "sql.ypp" - { ((*yyvalp).intval) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 350: + case 345: /* Line 868 of glr.c */ #line 1142 "sql.ypp" - { ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.strval); } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; + + case 346: +/* Line 868 of glr.c */ +#line 1143 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; + + case 347: +/* Line 868 of glr.c */ +#line 1144 "sql.ypp" + { + ((*yyvalp).ast_node) = NULL; + } break; - case 351: + case 348: /* Line 868 of glr.c */ #line 1145 "sql.ypp" - { ((*yyvalp).strval) = NULL; } - break; + { + ((*yyvalp).ast_node) = NULL; + } break; - case 352: + case 349: /* Line 868 of glr.c */ -#line 1146 "sql.ypp" - { ((*yyvalp).strval) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (2))].yystate.yysemantics.yysval.strval); } - break; +#line 1147 "sql.ypp" + { + ((*yyvalp).intval) = 0; + } break; - case 357: + case 350: /* Line 868 of glr.c */ -#line 1156 "sql.ypp" - { ((*yyvalp).intval) = 0; } - break; +#line 1151 "sql.ypp" + { + ((*yyvalp).intval) = NULL; + } break; - case 358: + case 351: /* Line 868 of glr.c */ -#line 1157 "sql.ypp" - { ((*yyvalp).intval) = 1; } - break; +#line 1152 "sql.ypp" + { + ((*yyvalp).intval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (2))] + .yystate.yysemantics.yysval.strval); + } break; - case 359: + case 352: /* Line 868 of glr.c */ -#line 1158 "sql.ypp" - { ((*yyvalp).intval) = 2; } - break; +#line 1155 "sql.ypp" + { + ((*yyvalp).strval) = NULL; + } break; - case 360: + case 353: /* Line 868 of glr.c */ -#line 1161 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (1))].yystate.yysemantics.yysval.ast_node); } - break; +#line 1156 "sql.ypp" + { + ((*yyvalp).strval) = (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (2))] + .yystate.yysemantics.yysval.strval); + } break; - case 361: + case 358: /* Line 868 of glr.c */ -#line 1165 "sql.ypp" - { ((*yyvalp).ast_node) = new AstDeleteStmt(AST_DELETE_STMT,(((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (7))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((5) - (7))].yystate.yysemantics.yysval.ast_node),(((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (7))].yystate.yysemantics.yysval.ast_node) );} - break; +#line 1166 "sql.ypp" + { + ((*yyvalp).intval) = 0; + } break; - case 362: + case 359: /* Line 868 of glr.c */ -#line 1169 "sql.ypp" - { emit("DELETEMULTI %d %d %d", (((yyGLRStackItem const *)yyvsp)[YYFILL ((2) - (7))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((4) - (7))].yystate.yysemantics.yysval.ast_node), (((yyGLRStackItem const *)yyvsp)[YYFILL ((6) - (7))].yystate.yysemantics.yysval.ast_node)); } - break; +#line 1167 "sql.ypp" + { + ((*yyvalp).intval) = 1; + } break; - case 363: + case 360: /* Line 868 of glr.c */ -#line 1174 "sql.ypp" - {} - break; +#line 1168 "sql.ypp" + { + ((*yyvalp).intval) = 2; + } break; + + case 361: +/* Line 868 of glr.c */ +#line 1171 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstDescStmt( + AST_DESC_STMT, (((yyGLRStackItem const *)yyvsp)[YYFILL((2) - (2))] + .yystate.yysemantics.yysval.strval)); + } break; - case 364: + case 362: /* Line 868 of glr.c */ -#line 1177 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.ast_node) + 01; } - break; +#line 1174 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node); + } break; - case 365: + case 363: /* Line 868 of glr.c */ #line 1178 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.ast_node) + 02; } - break; + { + ((*yyvalp).ast_node) = new AstDeleteStmt( + AST_DELETE_STMT, (((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (7))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (7))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (7))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 364: +/* Line 868 of glr.c */ +#line 1182 "sql.ypp" + { + emit("DELETEMULTI %d %d %d", + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (7))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((4) - (7))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((6) - (7))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 365: +/* Line 868 of glr.c */ +#line 1187 "sql.ypp" + { + } break; + + case 366: +/* Line 868 of glr.c */ +#line 1190 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.ast_node) + + 01; + } break; - case 366: + case 367: /* Line 868 of glr.c */ -#line 1179 "sql.ypp" - { ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL ((1) - (2))].yystate.yysemantics.yysval.ast_node) + 04; } - break; +#line 1191 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.ast_node) + + 02; + } break; - case 367: + case 368: /* Line 868 of glr.c */ -#line 1180 "sql.ypp" - { ((*yyvalp).ast_node) = 0; } - break; +#line 1192 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (2))] + .yystate.yysemantics.yysval.ast_node) + + 04; + } break; - case 368: + case 369: /* Line 868 of glr.c */ -#line 1184 "sql.ypp" - { } - break; +#line 1193 "sql.ypp" + { + ((*yyvalp).ast_node) = 0; + } break; - case 369: + case 370: /* Line 868 of glr.c */ -#line 1185 "sql.ypp" - { } - break; +#line 1197 "sql.ypp" + { + } break; + case 371: +/* Line 868 of glr.c */ +#line 1198 "sql.ypp" + { + } break; + case 374: /* Line 868 of glr.c */ -#line 4488 "sql.tab.cpp" - default: break; - } +#line 1205 "sql.ypp" + { + ((*yyvalp).ast_node) = (((yyGLRStackItem const *)yyvsp)[YYFILL((1) - (1))] + .yystate.yysemantics.yysval.ast_node); + } break; + + case 375: +/* Line 868 of glr.c */ +#line 1208 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstUpdateStmt( + AST_UPDATE_STMT, (((yyGLRStackItem const *)yyvsp)[YYFILL((4) - (5))] + .yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((2) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; + + case 376: +/* Line 868 of glr.c */ +#line 1211 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstUpdateSetList( + AST_UPDATE_SET_LIST, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (3))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (3))].yystate.yysemantics.yysval.ast_node), + NULL); + } break; + + case 377: +/* Line 868 of glr.c */ +#line 1212 "sql.ypp" + { + ((*yyvalp).ast_node) = new AstUpdateSetList( + AST_UPDATE_SET_LIST, + (((yyGLRStackItem const *) + yyvsp)[YYFILL((3) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((5) - (5))].yystate.yysemantics.yysval.ast_node), + (((yyGLRStackItem const *) + yyvsp)[YYFILL((1) - (5))].yystate.yysemantics.yysval.ast_node)); + } break; + +/* Line 868 of glr.c */ +#line 4594 "sql.tab.cpp" + default: + break; + } return yyok; -# undef yyerrok -# undef YYABORT -# undef YYACCEPT -# undef YYERROR -# undef YYBACKUP -# undef yyclearin -# undef YYRECOVERING +#undef yyerrok +#undef YYABORT +#undef YYACCEPT +#undef YYERROR +#undef YYBACKUP +#undef yyclearin +#undef YYRECOVERING } - -/*ARGSUSED*/ static void -yyuserMerge (int yyn, YYSTYPE* yy0, YYSTYPE* yy1) -{ - YYUSE (yy0); - YYUSE (yy1); +/*ARGSUSED*/ static void yyuserMerge(int yyn, YYSTYPE *yy0, YYSTYPE *yy1) { + YYUSE(yy0); + YYUSE(yy1); - switch (yyn) - { - - default: break; - } + switch (yyn) { + default: + break; + } } - /* Bison grammar-table manipulation. */ +/* Bison grammar-table manipulation. */ /*-----------------------------------------------. | Release the memory associated to this symbol. | `-----------------------------------------------*/ /*ARGSUSED*/ -static void -yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, struct ParseResult* result) -{ - YYUSE (yyvaluep); - YYUSE (result); +static void yydestruct(const char *yymsg, int yytype, YYSTYPE *yyvaluep, + struct ParseResult *result) { + YYUSE(yyvaluep); + YYUSE(result); - if (!yymsg) - yymsg = "Deleting"; - YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); - - switch (yytype) - { + if (!yymsg) yymsg = "Deleting"; + YY_SYMBOL_PRINT(yymsg, yytype, yyvaluep, yylocationp); - default: - break; - } + switch (yytype) { + default: + break; + } } /** Number of symbols composing the right hand side of rule #RULE. */ -static inline int -yyrhsLength (yyRuleNum yyrule) -{ - return yyr2[yyrule]; -} +static inline int yyrhsLength(yyRuleNum yyrule) { return yyr2[yyrule]; } -static void -yydestroyGLRState (char const *yymsg, yyGLRState *yys, struct ParseResult* result) -{ +static void yydestroyGLRState(char const *yymsg, yyGLRState *yys, + struct ParseResult *result) { if (yys->yyresolved) - yydestruct (yymsg, yystos[yys->yylrState], - &yys->yysemantics.yysval, result); - else - { + yydestruct(yymsg, yystos[yys->yylrState], &yys->yysemantics.yysval, result); + else { #if YYDEBUG - if (yydebug) - { - if (yys->yysemantics.yyfirstVal) - YYFPRINTF (stderr, "%s unresolved ", yymsg); - else - YYFPRINTF (stderr, "%s incomplete ", yymsg); - yy_symbol_print (stderr, yystos[yys->yylrState], - YY_NULL, result); - YYFPRINTF (stderr, "\n"); - } + if (yydebug) { + if (yys->yysemantics.yyfirstVal) + YYFPRINTF(stderr, "%s unresolved ", yymsg); + else + YYFPRINTF(stderr, "%s incomplete ", yymsg); + yy_symbol_print(stderr, yystos[yys->yylrState], YY_NULL, result); + YYFPRINTF(stderr, "\n"); + } #endif - if (yys->yysemantics.yyfirstVal) - { - yySemanticOption *yyoption = yys->yysemantics.yyfirstVal; - yyGLRState *yyrh; - int yyn; - for (yyrh = yyoption->yystate, yyn = yyrhsLength (yyoption->yyrule); - yyn > 0; - yyrh = yyrh->yypred, yyn -= 1) - yydestroyGLRState (yymsg, yyrh, result); - } + if (yys->yysemantics.yyfirstVal) { + yySemanticOption *yyoption = yys->yysemantics.yyfirstVal; + yyGLRState *yyrh; + int yyn; + for (yyrh = yyoption->yystate, yyn = yyrhsLength(yyoption->yyrule); + yyn > 0; yyrh = yyrh->yypred, yyn -= 1) + yydestroyGLRState(yymsg, yyrh, result); } + } } /** Left-hand-side symbol for rule #RULE. */ -static inline yySymbol -yylhsNonterm (yyRuleNum yyrule) -{ - return yyr1[yyrule]; -} +static inline yySymbol yylhsNonterm(yyRuleNum yyrule) { return yyr1[yyrule]; } -#define yypact_value_is_default(Yystate) \ - (!!((Yystate) == (-470))) +#define yypact_value_is_default(Yystate) (!!((Yystate) == (-489))) /** True iff LR state STATE has only a default reduction (regardless * of token). */ -static inline yybool -yyisDefaultedState (yyStateNum yystate) -{ - return yypact_value_is_default (yypact[yystate]); +static inline yybool yyisDefaultedState(yyStateNum yystate) { + return yypact_value_is_default(yypact[yystate]); } /** The default reduction for STATE, assuming it has one. */ -static inline yyRuleNum -yydefaultAction (yyStateNum yystate) -{ +static inline yyRuleNum yydefaultAction(yyStateNum yystate) { return yydefact[yystate]; } -#define yytable_value_is_error(Yytable_value) \ - (!!((Yytable_value) == (-371))) +#define yytable_value_is_error(Yytable_value) (!!((Yytable_value) == (-378))) /** Set *YYACTION to the action to take in YYSTATE on seeing YYTOKEN. * Result R means @@ -4617,32 +5875,24 @@ yydefaultAction (yyStateNum yystate) * Set *CONFLICTS to a pointer into yyconfl to 0-terminated list of * conflicting reductions. */ -static inline void -yygetLRActions (yyStateNum yystate, int yytoken, - int* yyaction, const short int** yyconflicts) -{ +static inline void yygetLRActions(yyStateNum yystate, int yytoken, + int *yyaction, + const short int **yyconflicts) { int yyindex = yypact[yystate] + yytoken; - if (yypact_value_is_default (yypact[yystate]) - || yyindex < 0 || YYLAST < yyindex || yycheck[yyindex] != yytoken) - { - *yyaction = -yydefact[yystate]; - *yyconflicts = yyconfl; - } - else if (! yytable_value_is_error (yytable[yyindex])) - { - *yyaction = yytable[yyindex]; - *yyconflicts = yyconfl + yyconflp[yyindex]; - } - else - { - *yyaction = 0; - *yyconflicts = yyconfl + yyconflp[yyindex]; - } + if (yypact_value_is_default(yypact[yystate]) || yyindex < 0 || + YYLAST < yyindex || yycheck[yyindex] != yytoken) { + *yyaction = -yydefact[yystate]; + *yyconflicts = yyconfl; + } else if (!yytable_value_is_error(yytable[yyindex])) { + *yyaction = yytable[yyindex]; + *yyconflicts = yyconfl + yyconflp[yyindex]; + } else { + *yyaction = 0; + *yyconflicts = yyconfl + yyconflp[yyindex]; + } } -static inline yyStateNum -yyLRgotoState (yyStateNum yystate, yySymbol yylhs) -{ +static inline yyStateNum yyLRgotoState(yyStateNum yystate, yySymbol yylhs) { int yyr; yyr = yypgoto[yylhs - YYNTOKENS] + yystate; if (0 <= yyr && yyr <= YYLAST && yycheck[yyr] == yystate) @@ -4651,28 +5901,19 @@ yyLRgotoState (yyStateNum yystate, yySymbol yylhs) return yydefgoto[yylhs - YYNTOKENS]; } -static inline yybool -yyisShiftAction (int yyaction) -{ - return 0 < yyaction; -} +static inline yybool yyisShiftAction(int yyaction) { return 0 < yyaction; } -static inline yybool -yyisErrorAction (int yyaction) -{ - return yyaction == 0; -} +static inline yybool yyisErrorAction(int yyaction) { return yyaction == 0; } - /* GLRStates */ +/* GLRStates */ /** Return a fresh GLRStackItem. Callers should call * YY_RESERVE_GLRSTACK afterwards to make sure there is sufficient * headroom. */ -static inline yyGLRStackItem* -yynewGLRStackItem (yyGLRStack* yystackp, yybool yyisState) -{ - yyGLRStackItem* yynewItem = yystackp->yynextFree; +static inline yyGLRStackItem *yynewGLRStackItem(yyGLRStack *yystackp, + yybool yyisState) { + yyGLRStackItem *yynewItem = yystackp->yynextFree; yystackp->yyspaceLeft -= 1; yystackp->yynextFree += 1; yynewItem->yystate.yyisState = yyisState; @@ -4683,162 +5924,133 @@ yynewGLRStackItem (yyGLRStack* yystackp, yybool yyisState) * RULENUM on the semantic values in RHS to the list of * alternative actions for STATE. Assumes that RHS comes from * stack #K of *STACKP. */ -static void -yyaddDeferredAction (yyGLRStack* yystackp, size_t yyk, yyGLRState* yystate, - yyGLRState* rhs, yyRuleNum yyrule) -{ - yySemanticOption* yynewOption = - &yynewGLRStackItem (yystackp, yyfalse)->yyoption; +static void yyaddDeferredAction(yyGLRStack *yystackp, size_t yyk, + yyGLRState *yystate, yyGLRState *rhs, + yyRuleNum yyrule) { + yySemanticOption *yynewOption = + &yynewGLRStackItem(yystackp, yyfalse)->yyoption; yynewOption->yystate = rhs; yynewOption->yyrule = yyrule; - if (yystackp->yytops.yylookaheadNeeds[yyk]) - { - yynewOption->yyrawchar = yychar; - yynewOption->yyval = yylval; - } - else + if (yystackp->yytops.yylookaheadNeeds[yyk]) { + yynewOption->yyrawchar = yychar; + yynewOption->yyval = yylval; + } else yynewOption->yyrawchar = YYEMPTY; yynewOption->yynext = yystate->yysemantics.yyfirstVal; yystate->yysemantics.yyfirstVal = yynewOption; - YY_RESERVE_GLRSTACK (yystackp); + YY_RESERVE_GLRSTACK(yystackp); } - /* GLRStacks */ +/* GLRStacks */ /** Initialize SET to a singleton set containing an empty stack. */ -static yybool -yyinitStateSet (yyGLRStateSet* yyset) -{ +static yybool yyinitStateSet(yyGLRStateSet *yyset) { yyset->yysize = 1; yyset->yycapacity = 16; - yyset->yystates = (yyGLRState**) YYMALLOC (16 * sizeof yyset->yystates[0]); - if (! yyset->yystates) - return yyfalse; + yyset->yystates = (yyGLRState **)YYMALLOC(16 * sizeof yyset->yystates[0]); + if (!yyset->yystates) return yyfalse; yyset->yystates[0] = YY_NULL; yyset->yylookaheadNeeds = - (yybool*) YYMALLOC (16 * sizeof yyset->yylookaheadNeeds[0]); - if (! yyset->yylookaheadNeeds) - { - YYFREE (yyset->yystates); - return yyfalse; - } + (yybool *)YYMALLOC(16 * sizeof yyset->yylookaheadNeeds[0]); + if (!yyset->yylookaheadNeeds) { + YYFREE(yyset->yystates); + return yyfalse; + } return yytrue; } -static void yyfreeStateSet (yyGLRStateSet* yyset) -{ - YYFREE (yyset->yystates); - YYFREE (yyset->yylookaheadNeeds); +static void yyfreeStateSet(yyGLRStateSet *yyset) { + YYFREE(yyset->yystates); + YYFREE(yyset->yylookaheadNeeds); } /** Initialize STACK to a single empty stack, with total maximum * capacity for all stacks of SIZE. */ -static yybool -yyinitGLRStack (yyGLRStack* yystackp, size_t yysize) -{ +static yybool yyinitGLRStack(yyGLRStack *yystackp, size_t yysize) { yystackp->yyerrState = 0; yynerrs = 0; yystackp->yyspaceLeft = yysize; yystackp->yyitems = - (yyGLRStackItem*) YYMALLOC (yysize * sizeof yystackp->yynextFree[0]); - if (!yystackp->yyitems) - return yyfalse; + (yyGLRStackItem *)YYMALLOC(yysize * sizeof yystackp->yynextFree[0]); + if (!yystackp->yyitems) return yyfalse; yystackp->yynextFree = yystackp->yyitems; yystackp->yysplitPoint = YY_NULL; yystackp->yylastDeleted = YY_NULL; - return yyinitStateSet (&yystackp->yytops); + return yyinitStateSet(&yystackp->yytops); } - #if YYSTACKEXPANDABLE -# define YYRELOC(YYFROMITEMS,YYTOITEMS,YYX,YYTYPE) \ - &((YYTOITEMS) - ((YYFROMITEMS) - (yyGLRStackItem*) (YYX)))->YYTYPE +#define YYRELOC(YYFROMITEMS, YYTOITEMS, YYX, YYTYPE) \ + &((YYTOITEMS) - ((YYFROMITEMS) - (yyGLRStackItem *)(YYX)))->YYTYPE /** If STACK is expandable, extend it. WARNING: Pointers into the stack from outside should be considered invalid after this call. We always expand when there are 1 or fewer items left AFTER an allocation, so that we can avoid having external pointers exist across an allocation. */ -static void -yyexpandGLRStack (yyGLRStack* yystackp) -{ - yyGLRStackItem* yynewItems; - yyGLRStackItem* yyp0, *yyp1; +static void yyexpandGLRStack(yyGLRStack *yystackp) { + yyGLRStackItem *yynewItems; + yyGLRStackItem *yyp0, *yyp1; size_t yynewSize; size_t yyn; size_t yysize = yystackp->yynextFree - yystackp->yyitems; - if (YYMAXDEPTH - YYHEADROOM < yysize) - yyMemoryExhausted (yystackp); - yynewSize = 2*yysize; - if (YYMAXDEPTH < yynewSize) - yynewSize = YYMAXDEPTH; - yynewItems = (yyGLRStackItem*) YYMALLOC (yynewSize * sizeof yynewItems[0]); - if (! yynewItems) - yyMemoryExhausted (yystackp); - for (yyp0 = yystackp->yyitems, yyp1 = yynewItems, yyn = yysize; - 0 < yyn; - yyn -= 1, yyp0 += 1, yyp1 += 1) - { - *yyp1 = *yyp0; - if (*(yybool *) yyp0) - { - yyGLRState* yys0 = &yyp0->yystate; - yyGLRState* yys1 = &yyp1->yystate; - if (yys0->yypred != YY_NULL) - yys1->yypred = - YYRELOC (yyp0, yyp1, yys0->yypred, yystate); - if (! yys0->yyresolved && yys0->yysemantics.yyfirstVal != YY_NULL) - yys1->yysemantics.yyfirstVal = - YYRELOC (yyp0, yyp1, yys0->yysemantics.yyfirstVal, yyoption); - } - else - { - yySemanticOption* yyv0 = &yyp0->yyoption; - yySemanticOption* yyv1 = &yyp1->yyoption; - if (yyv0->yystate != YY_NULL) - yyv1->yystate = YYRELOC (yyp0, yyp1, yyv0->yystate, yystate); - if (yyv0->yynext != YY_NULL) - yyv1->yynext = YYRELOC (yyp0, yyp1, yyv0->yynext, yyoption); - } + if (YYMAXDEPTH - YYHEADROOM < yysize) yyMemoryExhausted(yystackp); + yynewSize = 2 * yysize; + if (YYMAXDEPTH < yynewSize) yynewSize = YYMAXDEPTH; + yynewItems = (yyGLRStackItem *)YYMALLOC(yynewSize * sizeof yynewItems[0]); + if (!yynewItems) yyMemoryExhausted(yystackp); + for (yyp0 = yystackp->yyitems, yyp1 = yynewItems, yyn = yysize; 0 < yyn; + yyn -= 1, yyp0 += 1, yyp1 += 1) { + *yyp1 = *yyp0; + if (*(yybool *)yyp0) { + yyGLRState *yys0 = &yyp0->yystate; + yyGLRState *yys1 = &yyp1->yystate; + if (yys0->yypred != YY_NULL) + yys1->yypred = YYRELOC(yyp0, yyp1, yys0->yypred, yystate); + if (!yys0->yyresolved && yys0->yysemantics.yyfirstVal != YY_NULL) + yys1->yysemantics.yyfirstVal = + YYRELOC(yyp0, yyp1, yys0->yysemantics.yyfirstVal, yyoption); + } else { + yySemanticOption *yyv0 = &yyp0->yyoption; + yySemanticOption *yyv1 = &yyp1->yyoption; + if (yyv0->yystate != YY_NULL) + yyv1->yystate = YYRELOC(yyp0, yyp1, yyv0->yystate, yystate); + if (yyv0->yynext != YY_NULL) + yyv1->yynext = YYRELOC(yyp0, yyp1, yyv0->yynext, yyoption); } + } if (yystackp->yysplitPoint != YY_NULL) - yystackp->yysplitPoint = YYRELOC (yystackp->yyitems, yynewItems, - yystackp->yysplitPoint, yystate); + yystackp->yysplitPoint = + YYRELOC(yystackp->yyitems, yynewItems, yystackp->yysplitPoint, yystate); for (yyn = 0; yyn < yystackp->yytops.yysize; yyn += 1) if (yystackp->yytops.yystates[yyn] != YY_NULL) yystackp->yytops.yystates[yyn] = - YYRELOC (yystackp->yyitems, yynewItems, - yystackp->yytops.yystates[yyn], yystate); - YYFREE (yystackp->yyitems); + YYRELOC(yystackp->yyitems, yynewItems, yystackp->yytops.yystates[yyn], + yystate); + YYFREE(yystackp->yyitems); yystackp->yyitems = yynewItems; yystackp->yynextFree = yynewItems + yysize; yystackp->yyspaceLeft = yynewSize - yysize; } #endif -static void -yyfreeGLRStack (yyGLRStack* yystackp) -{ - YYFREE (yystackp->yyitems); - yyfreeStateSet (&yystackp->yytops); +static void yyfreeGLRStack(yyGLRStack *yystackp) { + YYFREE(yystackp->yyitems); + yyfreeStateSet(&yystackp->yytops); } /** Assuming that S is a GLRState somewhere on STACK, update the * splitpoint of STACK, if needed, so that it is at least as deep as * S. */ -static inline void -yyupdateSplit (yyGLRStack* yystackp, yyGLRState* yys) -{ +static inline void yyupdateSplit(yyGLRStack *yystackp, yyGLRState *yys) { if (yystackp->yysplitPoint != YY_NULL && yystackp->yysplitPoint > yys) yystackp->yysplitPoint = yys; } /** Invalidate stack #K in STACK. */ -static inline void -yymarkStackDeleted (yyGLRStack* yystackp, size_t yyk) -{ +static inline void yymarkStackDeleted(yyGLRStack *yystackp, size_t yyk) { if (yystackp->yytops.yystates[yyk] != YY_NULL) yystackp->yylastDeleted = yystackp->yytops.yystates[yyk]; yystackp->yytops.yystates[yyk] = YY_NULL; @@ -4847,61 +6059,49 @@ yymarkStackDeleted (yyGLRStack* yystackp, size_t yyk) /** Undelete the last stack that was marked as deleted. Can only be done once after a deletion, and only when all other stacks have been deleted. */ -static void -yyundeleteLastStack (yyGLRStack* yystackp) -{ +static void yyundeleteLastStack(yyGLRStack *yystackp) { if (yystackp->yylastDeleted == YY_NULL || yystackp->yytops.yysize != 0) return; yystackp->yytops.yystates[0] = yystackp->yylastDeleted; yystackp->yytops.yysize = 1; - YYDPRINTF ((stderr, "Restoring last deleted stack as stack #0.\n")); + YYDPRINTF((stderr, "Restoring last deleted stack as stack #0.\n")); yystackp->yylastDeleted = YY_NULL; } -static inline void -yyremoveDeletes (yyGLRStack* yystackp) -{ +static inline void yyremoveDeletes(yyGLRStack *yystackp) { size_t yyi, yyj; yyi = yyj = 0; - while (yyj < yystackp->yytops.yysize) - { - if (yystackp->yytops.yystates[yyi] == YY_NULL) - { - if (yyi == yyj) - { - YYDPRINTF ((stderr, "Removing dead stacks.\n")); - } - yystackp->yytops.yysize -= 1; - } - else - { - yystackp->yytops.yystates[yyj] = yystackp->yytops.yystates[yyi]; - /* In the current implementation, it's unnecessary to copy - yystackp->yytops.yylookaheadNeeds[yyi] since, after - yyremoveDeletes returns, the parser immediately either enters - deterministic operation or shifts a token. However, it doesn't - hurt, and the code might evolve to need it. */ - yystackp->yytops.yylookaheadNeeds[yyj] = - yystackp->yytops.yylookaheadNeeds[yyi]; - if (yyj != yyi) - { - YYDPRINTF ((stderr, "Rename stack %lu -> %lu.\n", - (unsigned long int) yyi, (unsigned long int) yyj)); - } - yyj += 1; - } - yyi += 1; + while (yyj < yystackp->yytops.yysize) { + if (yystackp->yytops.yystates[yyi] == YY_NULL) { + if (yyi == yyj) { + YYDPRINTF((stderr, "Removing dead stacks.\n")); + } + yystackp->yytops.yysize -= 1; + } else { + yystackp->yytops.yystates[yyj] = yystackp->yytops.yystates[yyi]; + /* In the current implementation, it's unnecessary to copy + yystackp->yytops.yylookaheadNeeds[yyi] since, after + yyremoveDeletes returns, the parser immediately either enters + deterministic operation or shifts a token. However, it doesn't + hurt, and the code might evolve to need it. */ + yystackp->yytops.yylookaheadNeeds[yyj] = + yystackp->yytops.yylookaheadNeeds[yyi]; + if (yyj != yyi) { + YYDPRINTF((stderr, "Rename stack %lu -> %lu.\n", (unsigned long int)yyi, + (unsigned long int)yyj)); + } + yyj += 1; } + yyi += 1; + } } /** Shift to a new state on stack #K of STACK, corresponding to LR state * LRSTATE, at input position POSN, with (resolved) semantic value SVAL. */ -static inline void -yyglrShift (yyGLRStack* yystackp, size_t yyk, yyStateNum yylrState, - size_t yyposn, - YYSTYPE* yyvalp) -{ - yyGLRState* yynewState = &yynewGLRStackItem (yystackp, yytrue)->yystate; +static inline void yyglrShift(yyGLRStack *yystackp, size_t yyk, + yyStateNum yylrState, size_t yyposn, + YYSTYPE *yyvalp) { + yyGLRState *yynewState = &yynewGLRStackItem(yystackp, yytrue)->yystate; yynewState->yylrState = yylrState; yynewState->yyposn = yyposn; @@ -4910,17 +6110,16 @@ yyglrShift (yyGLRStack* yystackp, size_t yyk, yyStateNum yylrState, yynewState->yysemantics.yysval = *yyvalp; yystackp->yytops.yystates[yyk] = yynewState; - YY_RESERVE_GLRSTACK (yystackp); + YY_RESERVE_GLRSTACK(yystackp); } /** Shift stack #K of YYSTACK, to a new state corresponding to LR * state YYLRSTATE, at input position YYPOSN, with the (unresolved) * semantic value of YYRHS under the action for YYRULE. */ -static inline void -yyglrShiftDefer (yyGLRStack* yystackp, size_t yyk, yyStateNum yylrState, - size_t yyposn, yyGLRState* rhs, yyRuleNum yyrule) -{ - yyGLRState* yynewState = &yynewGLRStackItem (yystackp, yytrue)->yystate; +static inline void yyglrShiftDefer(yyGLRStack *yystackp, size_t yyk, + yyStateNum yylrState, size_t yyposn, + yyGLRState *rhs, yyRuleNum yyrule) { + yyGLRState *yynewState = &yynewGLRStackItem(yystackp, yytrue)->yystate; yynewState->yylrState = yylrState; yynewState->yyposn = yyposn; @@ -4930,7 +6129,7 @@ yyglrShiftDefer (yyGLRStack* yystackp, size_t yyk, yyStateNum yylrState, yystackp->yytops.yystates[yyk] = yynewState; /* Invokes YY_RESERVE_GLRSTACK. */ - yyaddDeferredAction (yystackp, yyk, yynewState, rhs, yyrule); + yyaddDeferredAction(yystackp, yyk, yynewState, rhs, yyrule); } /** Pop the symbols consumed by reduction #RULE from the top of stack @@ -4939,83 +6138,77 @@ yyglrShiftDefer (yyGLRStack* yystackp, size_t yyk, yyStateNum yylrState, * have been previously resolved. Set *VALP to the resulting value, * and *LOCP to the computed location (if any). Return value is as * for userAction. */ -static inline YYRESULTTAG -yydoAction (yyGLRStack* yystackp, size_t yyk, yyRuleNum yyrule, - YYSTYPE* yyvalp, struct ParseResult* result) -{ - int yynrhs = yyrhsLength (yyrule); - - if (yystackp->yysplitPoint == YY_NULL) - { - /* Standard special case: single stack. */ - yyGLRStackItem* rhs = (yyGLRStackItem*) yystackp->yytops.yystates[yyk]; - YYASSERT (yyk == 0); - yystackp->yynextFree -= yynrhs; - yystackp->yyspaceLeft += yynrhs; - yystackp->yytops.yystates[0] = & yystackp->yynextFree[-1].yystate; - return yyuserAction (yyrule, yynrhs, rhs, yystackp, - yyvalp, result); - } - else - { - /* At present, doAction is never called in nondeterministic - * mode, so this branch is never taken. It is here in - * anticipation of a future feature that will allow immediate - * evaluation of selected actions in nondeterministic mode. */ - int yyi; - yyGLRState* yys; - yyGLRStackItem yyrhsVals[YYMAXRHS + YYMAXLEFT + 1]; - yys = yyrhsVals[YYMAXRHS + YYMAXLEFT].yystate.yypred - = yystackp->yytops.yystates[yyk]; - for (yyi = 0; yyi < yynrhs; yyi += 1) - { - yys = yys->yypred; - YYASSERT (yys); - } - yyupdateSplit (yystackp, yys); - yystackp->yytops.yystates[yyk] = yys; - return yyuserAction (yyrule, yynrhs, yyrhsVals + YYMAXRHS + YYMAXLEFT - 1, - yystackp, yyvalp, result); +static inline YYRESULTTAG yydoAction(yyGLRStack *yystackp, size_t yyk, + yyRuleNum yyrule, YYSTYPE *yyvalp, + struct ParseResult *result) { + int yynrhs = yyrhsLength(yyrule); + + if (yystackp->yysplitPoint == YY_NULL) { + /* Standard special case: single stack. */ + yyGLRStackItem *rhs = (yyGLRStackItem *)yystackp->yytops.yystates[yyk]; + YYASSERT(yyk == 0); + yystackp->yynextFree -= yynrhs; + yystackp->yyspaceLeft += yynrhs; + yystackp->yytops.yystates[0] = &yystackp->yynextFree[-1].yystate; + return yyuserAction(yyrule, yynrhs, rhs, yystackp, yyvalp, result); + } else { + /* At present, doAction is never called in nondeterministic + * mode, so this branch is never taken. It is here in + * anticipation of a future feature that will allow immediate + * evaluation of selected actions in nondeterministic mode. */ + int yyi; + yyGLRState *yys; + yyGLRStackItem yyrhsVals[YYMAXRHS + YYMAXLEFT + 1]; + yys = yyrhsVals[YYMAXRHS + YYMAXLEFT].yystate.yypred = + yystackp->yytops.yystates[yyk]; + for (yyi = 0; yyi < yynrhs; yyi += 1) { + yys = yys->yypred; + YYASSERT(yys); } + yyupdateSplit(yystackp, yys); + yystackp->yytops.yystates[yyk] = yys; + return yyuserAction(yyrule, yynrhs, yyrhsVals + YYMAXRHS + YYMAXLEFT - 1, + yystackp, yyvalp, result); + } } #if !YYDEBUG -# define YY_REDUCE_PRINT(Args) +#define YY_REDUCE_PRINT(Args) #else -# define YY_REDUCE_PRINT(Args) \ -do { \ - if (yydebug) \ - yy_reduce_print Args; \ -} while (YYID (0)) +#define YY_REDUCE_PRINT(Args) \ + do { \ + if (yydebug) yy_reduce_print Args; \ + } while (YYID(0)) /*----------------------------------------------------------. | Report that the RULE is going to be reduced on stack #K. | `----------------------------------------------------------*/ -/*ARGSUSED*/ static inline void -yy_reduce_print (yyGLRStack* yystackp, size_t yyk, yyRuleNum yyrule, - YYSTYPE* yyvalp, struct ParseResult* result) -{ - int yynrhs = yyrhsLength (yyrule); - yybool yynormal __attribute__ ((__unused__)) = - (yystackp->yysplitPoint == YY_NULL); - yyGLRStackItem* yyvsp = (yyGLRStackItem*) yystackp->yytops.yystates[yyk]; +/*ARGSUSED*/ static inline void yy_reduce_print(yyGLRStack *yystackp, + size_t yyk, yyRuleNum yyrule, + YYSTYPE *yyvalp, + struct ParseResult *result) { + int yynrhs = yyrhsLength(yyrule); + yybool yynormal __attribute__((__unused__)) = + (yystackp->yysplitPoint == YY_NULL); + yyGLRStackItem *yyvsp = (yyGLRStackItem *)yystackp->yytops.yystates[yyk]; int yylow = 1; int yyi; - YYUSE (yyvalp); - YYUSE (result); - YYFPRINTF (stderr, "Reducing stack %lu by rule %d (line %lu):\n", - (unsigned long int) yyk, yyrule - 1, - (unsigned long int) yyrline[yyrule]); + YYUSE(yyvalp); + YYUSE(result); + YYFPRINTF(stderr, "Reducing stack %lu by rule %d (line %lu):\n", + (unsigned long int)yyk, yyrule - 1, + (unsigned long int)yyrline[yyrule]); /* The symbols being reduced. */ - for (yyi = 0; yyi < yynrhs; yyi++) - { - YYFPRINTF (stderr, " $%d = ", yyi + 1); - yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi], - &(((yyGLRStackItem const *)yyvsp)[YYFILL ((yyi + 1) - (yynrhs))].yystate.yysemantics.yysval) - , result); - YYFPRINTF (stderr, "\n"); - } + for (yyi = 0; yyi < yynrhs; yyi++) { + YYFPRINTF(stderr, " $%d = ", yyi + 1); + yy_symbol_print( + stderr, yyrhs[yyprhs[yyrule] + yyi], + &(((yyGLRStackItem const *) + yyvsp)[YYFILL((yyi + 1) - (yynrhs))].yystate.yysemantics.yysval), + result); + YYFPRINTF(stderr, "\n"); + } } #endif @@ -5030,229 +6223,184 @@ yy_reduce_print (yyGLRStack* yystackp, size_t yyk, yyRuleNum yyrule, * the STACK. In this case, the (necessarily deferred) semantic value is * added to the options for the existing state's semantic value. */ -static inline YYRESULTTAG -yyglrReduce (yyGLRStack* yystackp, size_t yyk, yyRuleNum yyrule, - yybool yyforceEval, struct ParseResult* result) -{ +static inline YYRESULTTAG yyglrReduce(yyGLRStack *yystackp, size_t yyk, + yyRuleNum yyrule, yybool yyforceEval, + struct ParseResult *result) { size_t yyposn = yystackp->yytops.yystates[yyk]->yyposn; - if (yyforceEval || yystackp->yysplitPoint == YY_NULL) - { - YYSTYPE yysval; + if (yyforceEval || yystackp->yysplitPoint == YY_NULL) { + YYSTYPE yysval; - YY_REDUCE_PRINT ((yystackp, yyk, yyrule, &yysval, result)); - YYCHK (yydoAction (yystackp, yyk, yyrule, &yysval, result)); - YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyrule], &yysval, &yyloc); - yyglrShift (yystackp, yyk, - yyLRgotoState (yystackp->yytops.yystates[yyk]->yylrState, - yylhsNonterm (yyrule)), - yyposn, &yysval); + YY_REDUCE_PRINT((yystackp, yyk, yyrule, &yysval, result)); + YYCHK(yydoAction(yystackp, yyk, yyrule, &yysval, result)); + YY_SYMBOL_PRINT("-> $$ =", yyr1[yyrule], &yysval, &yyloc); + yyglrShift(yystackp, yyk, + yyLRgotoState(yystackp->yytops.yystates[yyk]->yylrState, + yylhsNonterm(yyrule)), + yyposn, &yysval); + } else { + size_t yyi; + int yyn; + yyGLRState *yys, *yys0 = yystackp->yytops.yystates[yyk]; + yyStateNum yynewLRState; + + for (yys = yystackp->yytops.yystates[yyk], yyn = yyrhsLength(yyrule); + 0 < yyn; yyn -= 1) { + yys = yys->yypred; + YYASSERT(yys); } - else - { - size_t yyi; - int yyn; - yyGLRState* yys, *yys0 = yystackp->yytops.yystates[yyk]; - yyStateNum yynewLRState; - - for (yys = yystackp->yytops.yystates[yyk], yyn = yyrhsLength (yyrule); - 0 < yyn; yyn -= 1) - { - yys = yys->yypred; - YYASSERT (yys); - } - yyupdateSplit (yystackp, yys); - yynewLRState = yyLRgotoState (yys->yylrState, yylhsNonterm (yyrule)); - YYDPRINTF ((stderr, - "Reduced stack %lu by rule #%d; action deferred. Now in state %d.\n", - (unsigned long int) yyk, yyrule - 1, yynewLRState)); - for (yyi = 0; yyi < yystackp->yytops.yysize; yyi += 1) - if (yyi != yyk && yystackp->yytops.yystates[yyi] != YY_NULL) - { - yyGLRState *yysplit = yystackp->yysplitPoint; - yyGLRState *yyp = yystackp->yytops.yystates[yyi]; - while (yyp != yys && yyp != yysplit && yyp->yyposn >= yyposn) - { - if (yyp->yylrState == yynewLRState && yyp->yypred == yys) - { - yyaddDeferredAction (yystackp, yyk, yyp, yys0, yyrule); - yymarkStackDeleted (yystackp, yyk); - YYDPRINTF ((stderr, "Merging stack %lu into stack %lu.\n", - (unsigned long int) yyk, - (unsigned long int) yyi)); - return yyok; - } - yyp = yyp->yypred; - } + yyupdateSplit(yystackp, yys); + yynewLRState = yyLRgotoState(yys->yylrState, yylhsNonterm(yyrule)); + YYDPRINTF( + (stderr, + "Reduced stack %lu by rule #%d; action deferred. Now in state %d.\n", + (unsigned long int)yyk, yyrule - 1, yynewLRState)); + for (yyi = 0; yyi < yystackp->yytops.yysize; yyi += 1) + if (yyi != yyk && yystackp->yytops.yystates[yyi] != YY_NULL) { + yyGLRState *yysplit = yystackp->yysplitPoint; + yyGLRState *yyp = yystackp->yytops.yystates[yyi]; + while (yyp != yys && yyp != yysplit && yyp->yyposn >= yyposn) { + if (yyp->yylrState == yynewLRState && yyp->yypred == yys) { + yyaddDeferredAction(yystackp, yyk, yyp, yys0, yyrule); + yymarkStackDeleted(yystackp, yyk); + YYDPRINTF((stderr, "Merging stack %lu into stack %lu.\n", + (unsigned long int)yyk, (unsigned long int)yyi)); + return yyok; } - yystackp->yytops.yystates[yyk] = yys; - yyglrShiftDefer (yystackp, yyk, yynewLRState, yyposn, yys0, yyrule); - } + yyp = yyp->yypred; + } + } + yystackp->yytops.yystates[yyk] = yys; + yyglrShiftDefer(yystackp, yyk, yynewLRState, yyposn, yys0, yyrule); + } return yyok; } -static size_t -yysplitStack (yyGLRStack* yystackp, size_t yyk) -{ - if (yystackp->yysplitPoint == YY_NULL) - { - YYASSERT (yyk == 0); - yystackp->yysplitPoint = yystackp->yytops.yystates[yyk]; - } - if (yystackp->yytops.yysize >= yystackp->yytops.yycapacity) - { - yyGLRState** yynewStates; - yybool* yynewLookaheadNeeds; - - yynewStates = YY_NULL; - - if (yystackp->yytops.yycapacity - > (YYSIZEMAX / (2 * sizeof yynewStates[0]))) - yyMemoryExhausted (yystackp); - yystackp->yytops.yycapacity *= 2; - - yynewStates = - (yyGLRState**) YYREALLOC (yystackp->yytops.yystates, - (yystackp->yytops.yycapacity - * sizeof yynewStates[0])); - if (yynewStates == YY_NULL) - yyMemoryExhausted (yystackp); - yystackp->yytops.yystates = yynewStates; - - yynewLookaheadNeeds = - (yybool*) YYREALLOC (yystackp->yytops.yylookaheadNeeds, - (yystackp->yytops.yycapacity - * sizeof yynewLookaheadNeeds[0])); - if (yynewLookaheadNeeds == YY_NULL) - yyMemoryExhausted (yystackp); - yystackp->yytops.yylookaheadNeeds = yynewLookaheadNeeds; - } - yystackp->yytops.yystates[yystackp->yytops.yysize] - = yystackp->yytops.yystates[yyk]; - yystackp->yytops.yylookaheadNeeds[yystackp->yytops.yysize] - = yystackp->yytops.yylookaheadNeeds[yyk]; +static size_t yysplitStack(yyGLRStack *yystackp, size_t yyk) { + if (yystackp->yysplitPoint == YY_NULL) { + YYASSERT(yyk == 0); + yystackp->yysplitPoint = yystackp->yytops.yystates[yyk]; + } + if (yystackp->yytops.yysize >= yystackp->yytops.yycapacity) { + yyGLRState **yynewStates; + yybool *yynewLookaheadNeeds; + + yynewStates = YY_NULL; + + if (yystackp->yytops.yycapacity > (YYSIZEMAX / (2 * sizeof yynewStates[0]))) + yyMemoryExhausted(yystackp); + yystackp->yytops.yycapacity *= 2; + + yynewStates = (yyGLRState **)YYREALLOC( + yystackp->yytops.yystates, + (yystackp->yytops.yycapacity * sizeof yynewStates[0])); + if (yynewStates == YY_NULL) yyMemoryExhausted(yystackp); + yystackp->yytops.yystates = yynewStates; + + yynewLookaheadNeeds = (yybool *)YYREALLOC( + yystackp->yytops.yylookaheadNeeds, + (yystackp->yytops.yycapacity * sizeof yynewLookaheadNeeds[0])); + if (yynewLookaheadNeeds == YY_NULL) yyMemoryExhausted(yystackp); + yystackp->yytops.yylookaheadNeeds = yynewLookaheadNeeds; + } + yystackp->yytops.yystates[yystackp->yytops.yysize] = + yystackp->yytops.yystates[yyk]; + yystackp->yytops.yylookaheadNeeds[yystackp->yytops.yysize] = + yystackp->yytops.yylookaheadNeeds[yyk]; yystackp->yytops.yysize += 1; - return yystackp->yytops.yysize-1; + return yystackp->yytops.yysize - 1; } /** True iff Y0 and Y1 represent identical options at the top level. * That is, they represent the same rule applied to RHS symbols * that produce the same terminal symbols. */ -static yybool -yyidenticalOptions (yySemanticOption* yyy0, yySemanticOption* yyy1) -{ - if (yyy0->yyrule == yyy1->yyrule) - { - yyGLRState *yys0, *yys1; - int yyn; - for (yys0 = yyy0->yystate, yys1 = yyy1->yystate, - yyn = yyrhsLength (yyy0->yyrule); - yyn > 0; - yys0 = yys0->yypred, yys1 = yys1->yypred, yyn -= 1) - if (yys0->yyposn != yys1->yyposn) - return yyfalse; - return yytrue; - } - else +static yybool yyidenticalOptions(yySemanticOption *yyy0, + yySemanticOption *yyy1) { + if (yyy0->yyrule == yyy1->yyrule) { + yyGLRState *yys0, *yys1; + int yyn; + for (yys0 = yyy0->yystate, yys1 = yyy1->yystate, + yyn = yyrhsLength(yyy0->yyrule); + yyn > 0; yys0 = yys0->yypred, yys1 = yys1->yypred, yyn -= 1) + if (yys0->yyposn != yys1->yyposn) return yyfalse; + return yytrue; + } else return yyfalse; } /** Assuming identicalOptions (Y0,Y1), destructively merge the * alternative semantic values for the RHS-symbols of Y1 and Y0. */ -static void -yymergeOptionSets (yySemanticOption* yyy0, yySemanticOption* yyy1) -{ +static void yymergeOptionSets(yySemanticOption *yyy0, yySemanticOption *yyy1) { yyGLRState *yys0, *yys1; int yyn; for (yys0 = yyy0->yystate, yys1 = yyy1->yystate, - yyn = yyrhsLength (yyy0->yyrule); - yyn > 0; - yys0 = yys0->yypred, yys1 = yys1->yypred, yyn -= 1) - { - if (yys0 == yys1) - break; - else if (yys0->yyresolved) - { - yys1->yyresolved = yytrue; - yys1->yysemantics.yysval = yys0->yysemantics.yysval; - } - else if (yys1->yyresolved) - { - yys0->yyresolved = yytrue; - yys0->yysemantics.yysval = yys1->yysemantics.yysval; - } - else - { - yySemanticOption** yyz0p = &yys0->yysemantics.yyfirstVal; - yySemanticOption* yyz1 = yys1->yysemantics.yyfirstVal; - while (YYID (yytrue)) - { - if (yyz1 == *yyz0p || yyz1 == YY_NULL) - break; - else if (*yyz0p == YY_NULL) - { - *yyz0p = yyz1; - break; - } - else if (*yyz0p < yyz1) - { - yySemanticOption* yyz = *yyz0p; - *yyz0p = yyz1; - yyz1 = yyz1->yynext; - (*yyz0p)->yynext = yyz; - } - yyz0p = &(*yyz0p)->yynext; - } - yys1->yysemantics.yyfirstVal = yys0->yysemantics.yyfirstVal; + yyn = yyrhsLength(yyy0->yyrule); + yyn > 0; yys0 = yys0->yypred, yys1 = yys1->yypred, yyn -= 1) { + if (yys0 == yys1) + break; + else if (yys0->yyresolved) { + yys1->yyresolved = yytrue; + yys1->yysemantics.yysval = yys0->yysemantics.yysval; + } else if (yys1->yyresolved) { + yys0->yyresolved = yytrue; + yys0->yysemantics.yysval = yys1->yysemantics.yysval; + } else { + yySemanticOption **yyz0p = &yys0->yysemantics.yyfirstVal; + yySemanticOption *yyz1 = yys1->yysemantics.yyfirstVal; + while (YYID(yytrue)) { + if (yyz1 == *yyz0p || yyz1 == YY_NULL) + break; + else if (*yyz0p == YY_NULL) { + *yyz0p = yyz1; + break; + } else if (*yyz0p < yyz1) { + yySemanticOption *yyz = *yyz0p; + *yyz0p = yyz1; + yyz1 = yyz1->yynext; + (*yyz0p)->yynext = yyz; } + yyz0p = &(*yyz0p)->yynext; + } + yys1->yysemantics.yyfirstVal = yys0->yysemantics.yyfirstVal; } + } } /** Y0 and Y1 represent two possible actions to take in a given * parsing state; return 0 if no combination is possible, * 1 if user-mergeable, 2 if Y0 is preferred, 3 if Y1 is preferred. */ -static int -yypreference (yySemanticOption* y0, yySemanticOption* y1) -{ +static int yypreference(yySemanticOption *y0, yySemanticOption *y1) { yyRuleNum r0 = y0->yyrule, r1 = y1->yyrule; int p0 = yydprec[r0], p1 = yydprec[r1]; - if (p0 == p1) - { - if (yymerger[r0] == 0 || yymerger[r0] != yymerger[r1]) - return 0; - else - return 1; - } - if (p0 == 0 || p1 == 0) - return 0; - if (p0 < p1) - return 3; - if (p1 < p0) - return 2; + if (p0 == p1) { + if (yymerger[r0] == 0 || yymerger[r0] != yymerger[r1]) + return 0; + else + return 1; + } + if (p0 == 0 || p1 == 0) return 0; + if (p0 < p1) return 3; + if (p1 < p0) return 2; return 0; } -static YYRESULTTAG yyresolveValue (yyGLRState* yys, - yyGLRStack* yystackp, struct ParseResult* result); - +static YYRESULTTAG yyresolveValue(yyGLRState *yys, yyGLRStack *yystackp, + struct ParseResult *result); /** Resolve the previous N states starting at and including state S. If result * != yyok, some states may have been left unresolved possibly with empty * semantic option chains. Regardless of whether result = yyok, each state * has been left with consistent data so that yydestroyGLRState can be invoked * if necessary. */ -static YYRESULTTAG -yyresolveStates (yyGLRState* yys, int yyn, - yyGLRStack* yystackp, struct ParseResult* result) -{ - if (0 < yyn) - { - YYASSERT (yys->yypred); - YYCHK (yyresolveStates (yys->yypred, yyn-1, yystackp, result)); - if (! yys->yyresolved) - YYCHK (yyresolveValue (yys, yystackp, result)); - } +static YYRESULTTAG yyresolveStates(yyGLRState *yys, int yyn, + yyGLRStack *yystackp, + struct ParseResult *result) { + if (0 < yyn) { + YYASSERT(yys->yypred); + YYCHK(yyresolveStates(yys->yypred, yyn - 1, yystackp, result)); + if (!yys->yyresolved) YYCHK(yyresolveValue(yys, yystackp, result)); + } return yyok; } @@ -5260,21 +6408,19 @@ yyresolveStates (yyGLRState* yys, int yyn, * the semantic value and location. Regardless of whether result = yyok, all * RHS states have been destroyed (assuming the user action destroys all RHS * semantic values if invoked). */ -static YYRESULTTAG -yyresolveAction (yySemanticOption* yyopt, yyGLRStack* yystackp, - YYSTYPE* yyvalp, struct ParseResult* result) -{ +static YYRESULTTAG yyresolveAction(yySemanticOption *yyopt, + yyGLRStack *yystackp, YYSTYPE *yyvalp, + struct ParseResult *result) { yyGLRStackItem yyrhsVals[YYMAXRHS + YYMAXLEFT + 1]; - int yynrhs = yyrhsLength (yyopt->yyrule); + int yynrhs = yyrhsLength(yyopt->yyrule); YYRESULTTAG yyflag = - yyresolveStates (yyopt->yystate, yynrhs, yystackp, result); - if (yyflag != yyok) - { - yyGLRState *yys; - for (yys = yyopt->yystate; yynrhs > 0; yys = yys->yypred, yynrhs -= 1) - yydestroyGLRState ("Cleanup: popping", yys, result); - return yyflag; - } + yyresolveStates(yyopt->yystate, yynrhs, yystackp, result); + if (yyflag != yyok) { + yyGLRState *yys; + for (yys = yyopt->yystate; yynrhs > 0; yys = yys->yypred, yynrhs -= 1) + yydestroyGLRState("Cleanup: popping", yys, result); + return yyflag; + } yyrhsVals[YYMAXRHS + YYMAXLEFT].yystate.yypred = yyopt->yystate; { @@ -5282,9 +6428,9 @@ yyresolveAction (yySemanticOption* yyopt, yyGLRStack* yystackp, YYSTYPE yylval_current = yylval; yychar = yyopt->yyrawchar; yylval = yyopt->yyval; - yyflag = yyuserAction (yyopt->yyrule, yynrhs, - yyrhsVals + YYMAXRHS + YYMAXLEFT - 1, - yystackp, yyvalp, result); + yyflag = yyuserAction(yyopt->yyrule, yynrhs, + yyrhsVals + YYMAXRHS + YYMAXLEFT - 1, yystackp, + yyvalp, result); yychar = yychar_current; yylval = yylval_current; } @@ -5292,70 +6438,61 @@ yyresolveAction (yySemanticOption* yyopt, yyGLRStack* yystackp, } #if YYDEBUG -static void -yyreportTree (yySemanticOption* yyx, int yyindent) -{ - int yynrhs = yyrhsLength (yyx->yyrule); +static void yyreportTree(yySemanticOption *yyx, int yyindent) { + int yynrhs = yyrhsLength(yyx->yyrule); int yyi; - yyGLRState* yys; - yyGLRState* yystates[1 + YYMAXRHS]; + yyGLRState *yys; + yyGLRState *yystates[1 + YYMAXRHS]; yyGLRState yyleftmost_state; for (yyi = yynrhs, yys = yyx->yystate; 0 < yyi; yyi -= 1, yys = yys->yypred) yystates[yyi] = yys; - if (yys == YY_NULL) - { - yyleftmost_state.yyposn = 0; - yystates[0] = &yyleftmost_state; - } - else + if (yys == YY_NULL) { + yyleftmost_state.yyposn = 0; + yystates[0] = &yyleftmost_state; + } else yystates[0] = yys; if (yyx->yystate->yyposn < yys->yyposn + 1) - YYFPRINTF (stderr, "%*s%s -> \n", - yyindent, "", yytokenName (yylhsNonterm (yyx->yyrule)), - yyx->yyrule - 1); + YYFPRINTF(stderr, "%*s%s -> \n", yyindent, "", + yytokenName(yylhsNonterm(yyx->yyrule)), yyx->yyrule - 1); else - YYFPRINTF (stderr, "%*s%s -> \n", - yyindent, "", yytokenName (yylhsNonterm (yyx->yyrule)), - yyx->yyrule - 1, (unsigned long int) (yys->yyposn + 1), - (unsigned long int) yyx->yystate->yyposn); - for (yyi = 1; yyi <= yynrhs; yyi += 1) - { - if (yystates[yyi]->yyresolved) - { - if (yystates[yyi-1]->yyposn+1 > yystates[yyi]->yyposn) - YYFPRINTF (stderr, "%*s%s \n", yyindent+2, "", - yytokenName (yyrhs[yyprhs[yyx->yyrule]+yyi-1])); - else - YYFPRINTF (stderr, "%*s%s \n", yyindent+2, "", - yytokenName (yyrhs[yyprhs[yyx->yyrule]+yyi-1]), - (unsigned long int) (yystates[yyi - 1]->yyposn + 1), - (unsigned long int) yystates[yyi]->yyposn); - } + YYFPRINTF(stderr, "%*s%s -> \n", yyindent, "", + yytokenName(yylhsNonterm(yyx->yyrule)), yyx->yyrule - 1, + (unsigned long int)(yys->yyposn + 1), + (unsigned long int)yyx->yystate->yyposn); + for (yyi = 1; yyi <= yynrhs; yyi += 1) { + if (yystates[yyi]->yyresolved) { + if (yystates[yyi - 1]->yyposn + 1 > yystates[yyi]->yyposn) + YYFPRINTF(stderr, "%*s%s \n", yyindent + 2, "", + yytokenName(yyrhs[yyprhs[yyx->yyrule] + yyi - 1])); else - yyreportTree (yystates[yyi]->yysemantics.yyfirstVal, yyindent+2); - } + YYFPRINTF(stderr, "%*s%s \n", yyindent + 2, "", + yytokenName(yyrhs[yyprhs[yyx->yyrule] + yyi - 1]), + (unsigned long int)(yystates[yyi - 1]->yyposn + 1), + (unsigned long int)yystates[yyi]->yyposn); + } else + yyreportTree(yystates[yyi]->yysemantics.yyfirstVal, yyindent + 2); + } } #endif -/*ARGSUSED*/ static YYRESULTTAG -yyreportAmbiguity (yySemanticOption* yyx0, - yySemanticOption* yyx1, struct ParseResult* result) -{ - YYUSE (yyx0); - YYUSE (yyx1); +/*ARGSUSED*/ static YYRESULTTAG yyreportAmbiguity(yySemanticOption *yyx0, + yySemanticOption *yyx1, + struct ParseResult *result) { + YYUSE(yyx0); + YYUSE(yyx1); #if YYDEBUG - YYFPRINTF (stderr, "Ambiguity detected.\n"); - YYFPRINTF (stderr, "Option 1,\n"); - yyreportTree (yyx0, 2); - YYFPRINTF (stderr, "\nOption 2,\n"); - yyreportTree (yyx1, 2); - YYFPRINTF (stderr, "\n"); + YYFPRINTF(stderr, "Ambiguity detected.\n"); + YYFPRINTF(stderr, "Option 1,\n"); + yyreportTree(yyx0, 2); + YYFPRINTF(stderr, "\nOption 2,\n"); + yyreportTree(yyx1, 2); + YYFPRINTF(stderr, "\n"); #endif - yyerror (result, YY_("syntax is ambiguous")); + yyerror(result, YY_("syntax is ambiguous")); return yyabort; } @@ -5365,344 +6502,289 @@ yyreportAmbiguity (yySemanticOption* yyx0, * unmodified except that redundant options may have been removed. Regardless * of whether result = yyok, S has been left with consistent data so that * yydestroyGLRState can be invoked if necessary. */ -static YYRESULTTAG -yyresolveValue (yyGLRState* yys, yyGLRStack* yystackp, struct ParseResult* result) -{ - yySemanticOption* yyoptionList = yys->yysemantics.yyfirstVal; - yySemanticOption* yybest = yyoptionList; - yySemanticOption** yypp; +static YYRESULTTAG yyresolveValue(yyGLRState *yys, yyGLRStack *yystackp, + struct ParseResult *result) { + yySemanticOption *yyoptionList = yys->yysemantics.yyfirstVal; + yySemanticOption *yybest = yyoptionList; + yySemanticOption **yypp; yybool yymerge = yyfalse; YYSTYPE yysval; YYRESULTTAG yyflag; - for (yypp = &yyoptionList->yynext; *yypp != YY_NULL; ) - { - yySemanticOption* yyp = *yypp; - - if (yyidenticalOptions (yybest, yyp)) - { - yymergeOptionSets (yybest, yyp); - *yypp = yyp->yynext; - } - else - { - switch (yypreference (yybest, yyp)) - { - case 0: - return yyreportAmbiguity (yybest, yyp, result); - break; - case 1: - yymerge = yytrue; - break; - case 2: - break; - case 3: - yybest = yyp; - yymerge = yyfalse; - break; - default: - /* This cannot happen so it is not worth a YYASSERT (yyfalse), - but some compilers complain if the default case is - omitted. */ - break; - } - yypp = &yyp->yynext; - } + for (yypp = &yyoptionList->yynext; *yypp != YY_NULL;) { + yySemanticOption *yyp = *yypp; + + if (yyidenticalOptions(yybest, yyp)) { + yymergeOptionSets(yybest, yyp); + *yypp = yyp->yynext; + } else { + switch (yypreference(yybest, yyp)) { + case 0: + return yyreportAmbiguity(yybest, yyp, result); + break; + case 1: + yymerge = yytrue; + break; + case 2: + break; + case 3: + yybest = yyp; + yymerge = yyfalse; + break; + default: + /* This cannot happen so it is not worth a YYASSERT (yyfalse), + but some compilers complain if the default case is + omitted. */ + break; + } + yypp = &yyp->yynext; } + } - if (yymerge) - { - yySemanticOption* yyp; - int yyprec = yydprec[yybest->yyrule]; - yyflag = yyresolveAction (yybest, yystackp, &yysval, result); - if (yyflag == yyok) - for (yyp = yybest->yynext; yyp != YY_NULL; yyp = yyp->yynext) - { - if (yyprec == yydprec[yyp->yyrule]) - { - YYSTYPE yysval_other; - yyflag = yyresolveAction (yyp, yystackp, &yysval_other, result); - if (yyflag != yyok) - { - yydestruct ("Cleanup: discarding incompletely merged value for", - yystos[yys->yylrState], - &yysval, result); - break; - } - yyuserMerge (yymerger[yyp->yyrule], &yysval, &yysval_other); - } + if (yymerge) { + yySemanticOption *yyp; + int yyprec = yydprec[yybest->yyrule]; + yyflag = yyresolveAction(yybest, yystackp, &yysval, result); + if (yyflag == yyok) + for (yyp = yybest->yynext; yyp != YY_NULL; yyp = yyp->yynext) { + if (yyprec == yydprec[yyp->yyrule]) { + YYSTYPE yysval_other; + yyflag = yyresolveAction(yyp, yystackp, &yysval_other, result); + if (yyflag != yyok) { + yydestruct("Cleanup: discarding incompletely merged value for", + yystos[yys->yylrState], &yysval, result); + break; } - } - else - yyflag = yyresolveAction (yybest, yystackp, &yysval, result); + yyuserMerge(yymerger[yyp->yyrule], &yysval, &yysval_other); + } + } + } else + yyflag = yyresolveAction(yybest, yystackp, &yysval, result); - if (yyflag == yyok) - { - yys->yyresolved = yytrue; - yys->yysemantics.yysval = yysval; - } - else + if (yyflag == yyok) { + yys->yyresolved = yytrue; + yys->yysemantics.yysval = yysval; + } else yys->yysemantics.yyfirstVal = YY_NULL; return yyflag; } -static YYRESULTTAG -yyresolveStack (yyGLRStack* yystackp, struct ParseResult* result) -{ - if (yystackp->yysplitPoint != YY_NULL) - { - yyGLRState* yys; - int yyn; +static YYRESULTTAG yyresolveStack(yyGLRStack *yystackp, + struct ParseResult *result) { + if (yystackp->yysplitPoint != YY_NULL) { + yyGLRState *yys; + int yyn; - for (yyn = 0, yys = yystackp->yytops.yystates[0]; - yys != yystackp->yysplitPoint; - yys = yys->yypred, yyn += 1) - continue; - YYCHK (yyresolveStates (yystackp->yytops.yystates[0], yyn, yystackp - , result)); - } + for (yyn = 0, yys = yystackp->yytops.yystates[0]; + yys != yystackp->yysplitPoint; yys = yys->yypred, yyn += 1) + continue; + YYCHK(yyresolveStates(yystackp->yytops.yystates[0], yyn, yystackp, result)); + } return yyok; } -static void -yycompressStack (yyGLRStack* yystackp) -{ - yyGLRState* yyp, *yyq, *yyr; +static void yycompressStack(yyGLRStack *yystackp) { + yyGLRState *yyp, *yyq, *yyr; - if (yystackp->yytops.yysize != 1 || yystackp->yysplitPoint == YY_NULL) - return; + if (yystackp->yytops.yysize != 1 || yystackp->yysplitPoint == YY_NULL) return; for (yyp = yystackp->yytops.yystates[0], yyq = yyp->yypred, yyr = YY_NULL; - yyp != yystackp->yysplitPoint; - yyr = yyp, yyp = yyq, yyq = yyp->yypred) + yyp != yystackp->yysplitPoint; yyr = yyp, yyp = yyq, yyq = yyp->yypred) yyp->yypred = yyr; yystackp->yyspaceLeft += yystackp->yynextFree - yystackp->yyitems; - yystackp->yynextFree = ((yyGLRStackItem*) yystackp->yysplitPoint) + 1; + yystackp->yynextFree = ((yyGLRStackItem *)yystackp->yysplitPoint) + 1; yystackp->yyspaceLeft -= yystackp->yynextFree - yystackp->yyitems; yystackp->yysplitPoint = YY_NULL; yystackp->yylastDeleted = YY_NULL; - while (yyr != YY_NULL) - { - yystackp->yynextFree->yystate = *yyr; - yyr = yyr->yypred; - yystackp->yynextFree->yystate.yypred = &yystackp->yynextFree[-1].yystate; - yystackp->yytops.yystates[0] = &yystackp->yynextFree->yystate; - yystackp->yynextFree += 1; - yystackp->yyspaceLeft -= 1; - } + while (yyr != YY_NULL) { + yystackp->yynextFree->yystate = *yyr; + yyr = yyr->yypred; + yystackp->yynextFree->yystate.yypred = &yystackp->yynextFree[-1].yystate; + yystackp->yytops.yystates[0] = &yystackp->yynextFree->yystate; + yystackp->yynextFree += 1; + yystackp->yyspaceLeft -= 1; + } } -static YYRESULTTAG -yyprocessOneStack (yyGLRStack* yystackp, size_t yyk, - size_t yyposn, struct ParseResult* result) -{ +static YYRESULTTAG yyprocessOneStack(yyGLRStack *yystackp, size_t yyk, + size_t yyposn, + struct ParseResult *result) { int yyaction; - const short int* yyconflicts; + const short int *yyconflicts; yyRuleNum yyrule; - while (yystackp->yytops.yystates[yyk] != YY_NULL) - { - yyStateNum yystate = yystackp->yytops.yystates[yyk]->yylrState; - YYDPRINTF ((stderr, "Stack %lu Entering state %d\n", - (unsigned long int) yyk, yystate)); + while (yystackp->yytops.yystates[yyk] != YY_NULL) { + yyStateNum yystate = yystackp->yytops.yystates[yyk]->yylrState; + YYDPRINTF((stderr, "Stack %lu Entering state %d\n", (unsigned long int)yyk, + yystate)); - YYASSERT (yystate != YYFINAL); + YYASSERT(yystate != YYFINAL); - if (yyisDefaultedState (yystate)) - { - yyrule = yydefaultAction (yystate); - if (yyrule == 0) - { - YYDPRINTF ((stderr, "Stack %lu dies.\n", - (unsigned long int) yyk)); - yymarkStackDeleted (yystackp, yyk); - return yyok; - } - YYCHK (yyglrReduce (yystackp, yyk, yyrule, yyfalse, result)); - } - else - { - yySymbol yytoken; - yystackp->yytops.yylookaheadNeeds[yyk] = yytrue; - if (yychar == YYEMPTY) - { - YYDPRINTF ((stderr, "Reading a token: ")); - yychar = YYLEX; - } + if (yyisDefaultedState(yystate)) { + yyrule = yydefaultAction(yystate); + if (yyrule == 0) { + YYDPRINTF((stderr, "Stack %lu dies.\n", (unsigned long int)yyk)); + yymarkStackDeleted(yystackp, yyk); + return yyok; + } + YYCHK(yyglrReduce(yystackp, yyk, yyrule, yyfalse, result)); + } else { + yySymbol yytoken; + yystackp->yytops.yylookaheadNeeds[yyk] = yytrue; + if (yychar == YYEMPTY) { + YYDPRINTF((stderr, "Reading a token: ")); + yychar = YYLEX; + } - if (yychar <= YYEOF) - { - yychar = yytoken = YYEOF; - YYDPRINTF ((stderr, "Now at end of input.\n")); - } - else - { - yytoken = YYTRANSLATE (yychar); - YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); - } + if (yychar <= YYEOF) { + yychar = yytoken = YYEOF; + YYDPRINTF((stderr, "Now at end of input.\n")); + } else { + yytoken = YYTRANSLATE(yychar); + YY_SYMBOL_PRINT("Next token is", yytoken, &yylval, &yylloc); + } - yygetLRActions (yystate, yytoken, &yyaction, &yyconflicts); + yygetLRActions(yystate, yytoken, &yyaction, &yyconflicts); - while (*yyconflicts != 0) - { - size_t yynewStack = yysplitStack (yystackp, yyk); - YYDPRINTF ((stderr, "Splitting off stack %lu from %lu.\n", - (unsigned long int) yynewStack, - (unsigned long int) yyk)); - YYCHK (yyglrReduce (yystackp, yynewStack, - *yyconflicts, yyfalse, result)); - YYCHK (yyprocessOneStack (yystackp, yynewStack, - yyposn, result)); - yyconflicts += 1; - } + while (*yyconflicts != 0) { + size_t yynewStack = yysplitStack(yystackp, yyk); + YYDPRINTF((stderr, "Splitting off stack %lu from %lu.\n", + (unsigned long int)yynewStack, (unsigned long int)yyk)); + YYCHK(yyglrReduce(yystackp, yynewStack, *yyconflicts, yyfalse, result)); + YYCHK(yyprocessOneStack(yystackp, yynewStack, yyposn, result)); + yyconflicts += 1; + } - if (yyisShiftAction (yyaction)) - break; - else if (yyisErrorAction (yyaction)) - { - YYDPRINTF ((stderr, "Stack %lu dies.\n", - (unsigned long int) yyk)); - yymarkStackDeleted (yystackp, yyk); - break; - } - else - YYCHK (yyglrReduce (yystackp, yyk, -yyaction, - yyfalse, result)); - } + if (yyisShiftAction(yyaction)) + break; + else if (yyisErrorAction(yyaction)) { + YYDPRINTF((stderr, "Stack %lu dies.\n", (unsigned long int)yyk)); + yymarkStackDeleted(yystackp, yyk); + break; + } else + YYCHK(yyglrReduce(yystackp, yyk, -yyaction, yyfalse, result)); } + } return yyok; } -/*ARGSUSED*/ static void -yyreportSyntaxError (yyGLRStack* yystackp, struct ParseResult* result) -{ - if (yystackp->yyerrState != 0) - return; -#if ! YYERROR_VERBOSE - yyerror (result, YY_("syntax error")); +/*ARGSUSED*/ static void yyreportSyntaxError(yyGLRStack *yystackp, + struct ParseResult *result) { + if (yystackp->yyerrState != 0) return; +#if !YYERROR_VERBOSE + yyerror(result, YY_("syntax error")); #else { - yySymbol yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); - size_t yysize0 = yytnamerr (YY_NULL, yytokenName (yytoken)); - size_t yysize = yysize0; - yybool yysize_overflow = yyfalse; - char* yymsg = YY_NULL; - enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; - /* Internationalized format string. */ - const char *yyformat = YY_NULL; - /* Arguments of yyformat. */ - char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; - /* Number of reported tokens (one for the "unexpected", one per - "expected"). */ - int yycount = 0; - - /* There are many possibilities here to consider: - - If this state is a consistent state with a default action, then - the only way this function was invoked is if the default action - is an error action. In that case, don't check for expected - tokens because there are none. - - The only way there can be no lookahead present (in yychar) is if - this state is a consistent state with a default action. Thus, - detecting the absence of a lookahead is sufficient to determine - that there is no unexpected or expected token to report. In that - case, just report a simple "syntax error". - - Don't assume there isn't a lookahead just because this state is a - consistent state with a default action. There might have been a - previous inconsistent state, consistent state with a non-default - action, or user semantic action that manipulated yychar. - - Of course, the expected token list depends on states to have - correct lookahead information, and it depends on the parser not - to perform extra reductions after fetching a lookahead from the - scanner and before detecting a syntax error. Thus, state merging - (from LALR or IELR) and default reductions corrupt the expected - token list. However, the list is correct for canonical LR with - one exception: it will still contain any token that will not be - accepted due to an error action in a later state. - */ - if (yytoken != YYEMPTY) - { + yySymbol yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE(yychar); + size_t yysize0 = yytnamerr(YY_NULL, yytokenName(yytoken)); + size_t yysize = yysize0; + yybool yysize_overflow = yyfalse; + char *yymsg = YY_NULL; + enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; + /* Internationalized format string. */ + const char *yyformat = YY_NULL; + /* Arguments of yyformat. */ + char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; + /* Number of reported tokens (one for the "unexpected", one per + "expected"). */ + int yycount = 0; + + /* There are many possibilities here to consider: + - If this state is a consistent state with a default action, then + the only way this function was invoked is if the default action + is an error action. In that case, don't check for expected + tokens because there are none. + - The only way there can be no lookahead present (in yychar) is if + this state is a consistent state with a default action. Thus, + detecting the absence of a lookahead is sufficient to determine + that there is no unexpected or expected token to report. In that + case, just report a simple "syntax error". + - Don't assume there isn't a lookahead just because this state is a + consistent state with a default action. There might have been a + previous inconsistent state, consistent state with a non-default + action, or user semantic action that manipulated yychar. + - Of course, the expected token list depends on states to have + correct lookahead information, and it depends on the parser not + to perform extra reductions after fetching a lookahead from the + scanner and before detecting a syntax error. Thus, state merging + (from LALR or IELR) and default reductions corrupt the expected + token list. However, the list is correct for canonical LR with + one exception: it will still contain any token that will not be + accepted due to an error action in a later state. + */ + if (yytoken != YYEMPTY) { int yyn = yypact[yystackp->yytops.yystates[0]->yylrState]; - yyarg[yycount++] = yytokenName (yytoken); - if (!yypact_value_is_default (yyn)) - { - /* Start YYX at -YYN if negative to avoid negative indexes in - YYCHECK. In other words, skip the first -YYN actions for this - state because they are default actions. */ - int yyxbegin = yyn < 0 ? -yyn : 0; - /* Stay within bounds of both yycheck and yytname. */ - int yychecklim = YYLAST - yyn + 1; - int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; - int yyx; - for (yyx = yyxbegin; yyx < yyxend; ++yyx) - if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR - && !yytable_value_is_error (yytable[yyx + yyn])) - { - if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) - { - yycount = 1; - yysize = yysize0; - break; - } - yyarg[yycount++] = yytokenName (yyx); - { - size_t yysz = yysize + yytnamerr (YY_NULL, yytokenName (yyx)); - yysize_overflow |= yysz < yysize; - yysize = yysz; - } - } - } + yyarg[yycount++] = yytokenName(yytoken); + if (!yypact_value_is_default(yyn)) { + /* Start YYX at -YYN if negative to avoid negative indexes in + YYCHECK. In other words, skip the first -YYN actions for this + state because they are default actions. */ + int yyxbegin = yyn < 0 ? -yyn : 0; + /* Stay within bounds of both yycheck and yytname. */ + int yychecklim = YYLAST - yyn + 1; + int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; + int yyx; + for (yyx = yyxbegin; yyx < yyxend; ++yyx) + if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR && + !yytable_value_is_error(yytable[yyx + yyn])) { + if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) { + yycount = 1; + yysize = yysize0; + break; + } + yyarg[yycount++] = yytokenName(yyx); + { + size_t yysz = yysize + yytnamerr(YY_NULL, yytokenName(yyx)); + yysize_overflow |= yysz < yysize; + yysize = yysz; + } + } + } } - switch (yycount) - { -#define YYCASE_(N, S) \ - case N: \ - yyformat = S; \ - break + switch (yycount) { +#define YYCASE_(N, S) \ + case N: \ + yyformat = S; \ + break YYCASE_(0, YY_("syntax error")); YYCASE_(1, YY_("syntax error, unexpected %s")); YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); - YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); + YYCASE_( + 5, + YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); #undef YYCASE_ } - { - size_t yysz = yysize + strlen (yyformat); - yysize_overflow |= yysz < yysize; - yysize = yysz; - } + { + size_t yysz = yysize + strlen(yyformat); + yysize_overflow |= yysz < yysize; + yysize = yysz; + } - if (!yysize_overflow) - yymsg = (char *) YYMALLOC (yysize); + if (!yysize_overflow) yymsg = (char *)YYMALLOC(yysize); - if (yymsg) - { + if (yymsg) { char *yyp = yymsg; int yyi = 0; - while ((*yyp = *yyformat)) - { - if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) - { - yyp += yytnamerr (yyp, yyarg[yyi++]); - yyformat += 2; - } - else - { - yyp++; - yyformat++; - } + while ((*yyp = *yyformat)) { + if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) { + yyp += yytnamerr(yyp, yyarg[yyi++]); + yyformat += 2; + } else { + yyp++; + yyformat++; } - yyerror (result, yymsg); - YYFREE (yymsg); - } - else - { - yyerror (result, YY_("syntax error")); - yyMemoryExhausted (yystackp); + } + yyerror(result, yymsg); + YYFREE(yymsg); + } else { + yyerror(result, YY_("syntax error")); + yyMemoryExhausted(yystackp); } } #endif /* YYERROR_VERBOSE */ @@ -5712,443 +6794,446 @@ yyreportSyntaxError (yyGLRStack* yystackp, struct ParseResult* result) /* Recover from a syntax error on *YYSTACKP, assuming that *YYSTACKP->YYTOKENP, yylval, and yylloc are the syntactic category, semantic value, and location of the lookahead. */ -/*ARGSUSED*/ static void -yyrecoverSyntaxError (yyGLRStack* yystackp, struct ParseResult* result) -{ +/*ARGSUSED*/ static void yyrecoverSyntaxError(yyGLRStack *yystackp, + struct ParseResult *result) { size_t yyk; int yyj; if (yystackp->yyerrState == 3) /* We just shifted the error token and (perhaps) took some reductions. Skip tokens until we can proceed. */ - while (YYID (yytrue)) - { - yySymbol yytoken; - if (yychar == YYEOF) - yyFail (yystackp, result, YY_NULL); - if (yychar != YYEMPTY) - { - yytoken = YYTRANSLATE (yychar); - yydestruct ("Error: discarding", - yytoken, &yylval, result); - } - YYDPRINTF ((stderr, "Reading a token: ")); - yychar = YYLEX; - if (yychar <= YYEOF) - { - yychar = yytoken = YYEOF; - YYDPRINTF ((stderr, "Now at end of input.\n")); - } - else - { - yytoken = YYTRANSLATE (yychar); - YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); - } - yyj = yypact[yystackp->yytops.yystates[0]->yylrState]; - if (yypact_value_is_default (yyj)) - return; - yyj += yytoken; - if (yyj < 0 || YYLAST < yyj || yycheck[yyj] != yytoken) - { - if (yydefact[yystackp->yytops.yystates[0]->yylrState] != 0) - return; - } - else if (! yytable_value_is_error (yytable[yyj])) - return; + while (YYID(yytrue)) { + yySymbol yytoken; + if (yychar == YYEOF) yyFail(yystackp, result, YY_NULL); + if (yychar != YYEMPTY) { + yytoken = YYTRANSLATE(yychar); + yydestruct("Error: discarding", yytoken, &yylval, result); } + YYDPRINTF((stderr, "Reading a token: ")); + yychar = YYLEX; + if (yychar <= YYEOF) { + yychar = yytoken = YYEOF; + YYDPRINTF((stderr, "Now at end of input.\n")); + } else { + yytoken = YYTRANSLATE(yychar); + YY_SYMBOL_PRINT("Next token is", yytoken, &yylval, &yylloc); + } + yyj = yypact[yystackp->yytops.yystates[0]->yylrState]; + if (yypact_value_is_default(yyj)) return; + yyj += yytoken; + if (yyj < 0 || YYLAST < yyj || yycheck[yyj] != yytoken) { + if (yydefact[yystackp->yytops.yystates[0]->yylrState] != 0) return; + } else if (!yytable_value_is_error(yytable[yyj])) + return; + } /* Reduce to one stack. */ for (yyk = 0; yyk < yystackp->yytops.yysize; yyk += 1) - if (yystackp->yytops.yystates[yyk] != YY_NULL) - break; - if (yyk >= yystackp->yytops.yysize) - yyFail (yystackp, result, YY_NULL); + if (yystackp->yytops.yystates[yyk] != YY_NULL) break; + if (yyk >= yystackp->yytops.yysize) yyFail(yystackp, result, YY_NULL); for (yyk += 1; yyk < yystackp->yytops.yysize; yyk += 1) - yymarkStackDeleted (yystackp, yyk); - yyremoveDeletes (yystackp); - yycompressStack (yystackp); + yymarkStackDeleted(yystackp, yyk); + yyremoveDeletes(yystackp); + yycompressStack(yystackp); /* Now pop stack until we find a state that shifts the error token. */ yystackp->yyerrState = 3; - while (yystackp->yytops.yystates[0] != YY_NULL) - { - yyGLRState *yys = yystackp->yytops.yystates[0]; - yyj = yypact[yys->yylrState]; - if (! yypact_value_is_default (yyj)) - { - yyj += YYTERROR; - if (0 <= yyj && yyj <= YYLAST && yycheck[yyj] == YYTERROR - && yyisShiftAction (yytable[yyj])) - { - /* Shift the error token. */ - YY_SYMBOL_PRINT ("Shifting", yystos[yytable[yyj]], - &yylval, &yyerrloc); - yyglrShift (yystackp, 0, yytable[yyj], - yys->yyposn, &yylval); - yys = yystackp->yytops.yystates[0]; - break; - } - } - if (yys->yypred != YY_NULL) - yydestroyGLRState ("Error: popping", yys, result); - yystackp->yytops.yystates[0] = yys->yypred; - yystackp->yynextFree -= 1; - yystackp->yyspaceLeft += 1; + while (yystackp->yytops.yystates[0] != YY_NULL) { + yyGLRState *yys = yystackp->yytops.yystates[0]; + yyj = yypact[yys->yylrState]; + if (!yypact_value_is_default(yyj)) { + yyj += YYTERROR; + if (0 <= yyj && yyj <= YYLAST && yycheck[yyj] == YYTERROR && + yyisShiftAction(yytable[yyj])) { + /* Shift the error token. */ + YY_SYMBOL_PRINT("Shifting", yystos[yytable[yyj]], &yylval, &yyerrloc); + yyglrShift(yystackp, 0, yytable[yyj], yys->yyposn, &yylval); + yys = yystackp->yytops.yystates[0]; + break; + } } + if (yys->yypred != YY_NULL) + yydestroyGLRState("Error: popping", yys, result); + yystackp->yytops.yystates[0] = yys->yypred; + yystackp->yynextFree -= 1; + yystackp->yyspaceLeft += 1; + } if (yystackp->yytops.yystates[0] == YY_NULL) - yyFail (yystackp, result, YY_NULL); + yyFail(yystackp, result, YY_NULL); } -#define YYCHK1(YYE) \ - do { \ - switch (YYE) { \ - case yyok: \ - break; \ - case yyabort: \ - goto yyabortlab; \ - case yyaccept: \ - goto yyacceptlab; \ - case yyerr: \ - goto yyuser_error; \ - default: \ - goto yybuglab; \ - } \ - } while (YYID (0)) - +#define YYCHK1(YYE) \ + do { \ + switch (YYE) { \ + case yyok: \ + break; \ + case yyabort: \ + goto yyabortlab; \ + case yyaccept: \ + goto yyacceptlab; \ + case yyerr: \ + goto yyuser_error; \ + default: \ + goto yybuglab; \ + } \ + } while (YYID(0)) /*----------. | yyparse. | `----------*/ -int -yyparse (struct ParseResult* result) -{ +int yyparse(struct ParseResult *result) { int yyresult; yyGLRStack yystack; - yyGLRStack* const yystackp = &yystack; + yyGLRStack *const yystackp = &yystack; size_t yyposn; - YYDPRINTF ((stderr, "Starting parse\n")); + YYDPRINTF((stderr, "Starting parse\n")); yychar = YYEMPTY; yylval = yyval_default; - if (! yyinitGLRStack (yystackp, YYINITDEPTH)) - goto yyexhaustedlab; - switch (YYSETJMP (yystack.yyexception_buffer)) - { - case 0: break; - case 1: goto yyabortlab; - case 2: goto yyexhaustedlab; - default: goto yybuglab; - } - yyglrShift (&yystack, 0, 0, 0, &yylval); + if (!yyinitGLRStack(yystackp, YYINITDEPTH)) goto yyexhaustedlab; + switch (YYSETJMP(yystack.yyexception_buffer)) { + case 0: + break; + case 1: + goto yyabortlab; + case 2: + goto yyexhaustedlab; + default: + goto yybuglab; + } + yyglrShift(&yystack, 0, 0, 0, &yylval); yyposn = 0; - while (YYID (yytrue)) - { - /* For efficiency, we have two loops, the first of which is - specialized to deterministic operation (single stack, no - potential ambiguity). */ - /* Standard mode */ - while (YYID (yytrue)) - { - yyRuleNum yyrule; - int yyaction; - const short int* yyconflicts; - - yyStateNum yystate = yystack.yytops.yystates[0]->yylrState; - YYDPRINTF ((stderr, "Entering state %d\n", yystate)); - if (yystate == YYFINAL) - goto yyacceptlab; - if (yyisDefaultedState (yystate)) - { - yyrule = yydefaultAction (yystate); - if (yyrule == 0) - { - - yyreportSyntaxError (&yystack, result); - goto yyuser_error; - } - YYCHK1 (yyglrReduce (&yystack, 0, yyrule, yytrue, result)); - } - else - { - yySymbol yytoken; - if (yychar == YYEMPTY) - { - YYDPRINTF ((stderr, "Reading a token: ")); - yychar = YYLEX; - } - - if (yychar <= YYEOF) - { - yychar = yytoken = YYEOF; - YYDPRINTF ((stderr, "Now at end of input.\n")); - } - else - { - yytoken = YYTRANSLATE (yychar); - YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); - } - - yygetLRActions (yystate, yytoken, &yyaction, &yyconflicts); - if (*yyconflicts != 0) - break; - if (yyisShiftAction (yyaction)) - { - YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); - yychar = YYEMPTY; - yyposn += 1; - yyglrShift (&yystack, 0, yyaction, yyposn, &yylval); - if (0 < yystack.yyerrState) - yystack.yyerrState -= 1; - } - else if (yyisErrorAction (yyaction)) - { - - yyreportSyntaxError (&yystack, result); - goto yyuser_error; - } - else - YYCHK1 (yyglrReduce (&yystack, 0, -yyaction, yytrue, result)); - } + while (YYID(yytrue)) { + /* For efficiency, we have two loops, the first of which is + specialized to deterministic operation (single stack, no + potential ambiguity). */ + /* Standard mode */ + while (YYID(yytrue)) { + yyRuleNum yyrule; + int yyaction; + const short int *yyconflicts; + + yyStateNum yystate = yystack.yytops.yystates[0]->yylrState; + YYDPRINTF((stderr, "Entering state %d\n", yystate)); + if (yystate == YYFINAL) goto yyacceptlab; + if (yyisDefaultedState(yystate)) { + yyrule = yydefaultAction(yystate); + if (yyrule == 0) { + yyreportSyntaxError(&yystack, result); + goto yyuser_error; + } + YYCHK1(yyglrReduce(&yystack, 0, yyrule, yytrue, result)); + } else { + yySymbol yytoken; + if (yychar == YYEMPTY) { + YYDPRINTF((stderr, "Reading a token: ")); + yychar = YYLEX; } - while (YYID (yytrue)) - { - yySymbol yytoken_to_shift; - size_t yys; + if (yychar <= YYEOF) { + yychar = yytoken = YYEOF; + YYDPRINTF((stderr, "Now at end of input.\n")); + } else { + yytoken = YYTRANSLATE(yychar); + YY_SYMBOL_PRINT("Next token is", yytoken, &yylval, &yylloc); + } - for (yys = 0; yys < yystack.yytops.yysize; yys += 1) - yystackp->yytops.yylookaheadNeeds[yys] = yychar != YYEMPTY; + yygetLRActions(yystate, yytoken, &yyaction, &yyconflicts); + if (*yyconflicts != 0) break; + if (yyisShiftAction(yyaction)) { + YY_SYMBOL_PRINT("Shifting", yytoken, &yylval, &yylloc); + yychar = YYEMPTY; + yyposn += 1; + yyglrShift(&yystack, 0, yyaction, yyposn, &yylval); + if (0 < yystack.yyerrState) yystack.yyerrState -= 1; + } else if (yyisErrorAction(yyaction)) { + yyreportSyntaxError(&yystack, result); + goto yyuser_error; + } else + YYCHK1(yyglrReduce(&yystack, 0, -yyaction, yytrue, result)); + } + } - /* yyprocessOneStack returns one of three things: + while (YYID(yytrue)) { + yySymbol yytoken_to_shift; + size_t yys; - - An error flag. If the caller is yyprocessOneStack, it - immediately returns as well. When the caller is finally - yyparse, it jumps to an error label via YYCHK1. + for (yys = 0; yys < yystack.yytops.yysize; yys += 1) + yystackp->yytops.yylookaheadNeeds[yys] = yychar != YYEMPTY; - - yyok, but yyprocessOneStack has invoked yymarkStackDeleted - (&yystack, yys), which sets the top state of yys to NULL. Thus, - yyparse's following invocation of yyremoveDeletes will remove - the stack. + /* yyprocessOneStack returns one of three things: - - yyok, when ready to shift a token. + - An error flag. If the caller is yyprocessOneStack, it + immediately returns as well. When the caller is finally + yyparse, it jumps to an error label via YYCHK1. - Except in the first case, yyparse will invoke yyremoveDeletes and - then shift the next token onto all remaining stacks. This - synchronization of the shift (that is, after all preceding - reductions on all stacks) helps prevent double destructor calls - on yylval in the event of memory exhaustion. */ + - yyok, but yyprocessOneStack has invoked yymarkStackDeleted + (&yystack, yys), which sets the top state of yys to NULL. Thus, + yyparse's following invocation of yyremoveDeletes will remove + the stack. - for (yys = 0; yys < yystack.yytops.yysize; yys += 1) - YYCHK1 (yyprocessOneStack (&yystack, yys, yyposn, result)); - yyremoveDeletes (&yystack); - if (yystack.yytops.yysize == 0) - { - yyundeleteLastStack (&yystack); - if (yystack.yytops.yysize == 0) - yyFail (&yystack, result, YY_("syntax error")); - YYCHK1 (yyresolveStack (&yystack, result)); - YYDPRINTF ((stderr, "Returning to deterministic operation.\n")); - - yyreportSyntaxError (&yystack, result); - goto yyuser_error; - } + - yyok, when ready to shift a token. - /* If any yyglrShift call fails, it will fail after shifting. Thus, - a copy of yylval will already be on stack 0 in the event of a - failure in the following loop. Thus, yychar is set to YYEMPTY - before the loop to make sure the user destructor for yylval isn't - called twice. */ - yytoken_to_shift = YYTRANSLATE (yychar); - yychar = YYEMPTY; - yyposn += 1; - for (yys = 0; yys < yystack.yytops.yysize; yys += 1) - { - int yyaction; - const short int* yyconflicts; - yyStateNum yystate = yystack.yytops.yystates[yys]->yylrState; - yygetLRActions (yystate, yytoken_to_shift, &yyaction, - &yyconflicts); - /* Note that yyconflicts were handled by yyprocessOneStack. */ - YYDPRINTF ((stderr, "On stack %lu, ", (unsigned long int) yys)); - YY_SYMBOL_PRINT ("shifting", yytoken_to_shift, &yylval, &yylloc); - yyglrShift (&yystack, yys, yyaction, yyposn, - &yylval); - YYDPRINTF ((stderr, "Stack %lu now in state #%d\n", - (unsigned long int) yys, - yystack.yytops.yystates[yys]->yylrState)); - } + Except in the first case, yyparse will invoke yyremoveDeletes and + then shift the next token onto all remaining stacks. This + synchronization of the shift (that is, after all preceding + reductions on all stacks) helps prevent double destructor calls + on yylval in the event of memory exhaustion. */ - if (yystack.yytops.yysize == 1) - { - YYCHK1 (yyresolveStack (&yystack, result)); - YYDPRINTF ((stderr, "Returning to deterministic operation.\n")); - yycompressStack (&yystack); - break; - } - } - continue; - yyuser_error: - yyrecoverSyntaxError (&yystack, result); - yyposn = yystack.yytops.yystates[0]->yyposn; + for (yys = 0; yys < yystack.yytops.yysize; yys += 1) + YYCHK1(yyprocessOneStack(&yystack, yys, yyposn, result)); + yyremoveDeletes(&yystack); + if (yystack.yytops.yysize == 0) { + yyundeleteLastStack(&yystack); + if (yystack.yytops.yysize == 0) + yyFail(&yystack, result, YY_("syntax error")); + YYCHK1(yyresolveStack(&yystack, result)); + YYDPRINTF((stderr, "Returning to deterministic operation.\n")); + + yyreportSyntaxError(&yystack, result); + goto yyuser_error; + } + + /* If any yyglrShift call fails, it will fail after shifting. Thus, + a copy of yylval will already be on stack 0 in the event of a + failure in the following loop. Thus, yychar is set to YYEMPTY + before the loop to make sure the user destructor for yylval isn't + called twice. */ + yytoken_to_shift = YYTRANSLATE(yychar); + yychar = YYEMPTY; + yyposn += 1; + for (yys = 0; yys < yystack.yytops.yysize; yys += 1) { + int yyaction; + const short int *yyconflicts; + yyStateNum yystate = yystack.yytops.yystates[yys]->yylrState; + yygetLRActions(yystate, yytoken_to_shift, &yyaction, &yyconflicts); + /* Note that yyconflicts were handled by yyprocessOneStack. */ + YYDPRINTF((stderr, "On stack %lu, ", (unsigned long int)yys)); + YY_SYMBOL_PRINT("shifting", yytoken_to_shift, &yylval, &yylloc); + yyglrShift(&yystack, yys, yyaction, yyposn, &yylval); + YYDPRINTF((stderr, "Stack %lu now in state #%d\n", + (unsigned long int)yys, + yystack.yytops.yystates[yys]->yylrState)); + } + + if (yystack.yytops.yysize == 1) { + YYCHK1(yyresolveStack(&yystack, result)); + YYDPRINTF((stderr, "Returning to deterministic operation.\n")); + yycompressStack(&yystack); + break; + } } + continue; + yyuser_error: + yyrecoverSyntaxError(&yystack, result); + yyposn = yystack.yytops.yystates[0]->yyposn; + } - yyacceptlab: +yyacceptlab: yyresult = 0; goto yyreturn; - yybuglab: - YYASSERT (yyfalse); +yybuglab: + YYASSERT(yyfalse); goto yyabortlab; - yyabortlab: +yyabortlab: yyresult = 1; goto yyreturn; - yyexhaustedlab: - yyerror (result, YY_("memory exhausted")); +yyexhaustedlab: + yyerror(result, YY_("memory exhausted")); yyresult = 2; goto yyreturn; - yyreturn: +yyreturn: if (yychar != YYEMPTY) - yydestruct ("Cleanup: discarding lookahead", - YYTRANSLATE (yychar), &yylval, result); + yydestruct("Cleanup: discarding lookahead", YYTRANSLATE(yychar), &yylval, + result); /* If the stack is well-formed, pop the stack until it is empty, destroying its entries as we go. But free the stack regardless of whether it is well-formed. */ - if (yystack.yyitems) - { - yyGLRState** yystates = yystack.yytops.yystates; - if (yystates) - { - size_t yysize = yystack.yytops.yysize; - size_t yyk; - for (yyk = 0; yyk < yysize; yyk += 1) - if (yystates[yyk]) - { - while (yystates[yyk]) - { - yyGLRState *yys = yystates[yyk]; - if (yys->yypred != YY_NULL) - yydestroyGLRState ("Cleanup: popping", yys, result); - yystates[yyk] = yys->yypred; - yystack.yynextFree -= 1; - yystack.yyspaceLeft += 1; - } - break; - } + if (yystack.yyitems) { + yyGLRState **yystates = yystack.yytops.yystates; + if (yystates) { + size_t yysize = yystack.yytops.yysize; + size_t yyk; + for (yyk = 0; yyk < yysize; yyk += 1) + if (yystates[yyk]) { + while (yystates[yyk]) { + yyGLRState *yys = yystates[yyk]; + if (yys->yypred != YY_NULL) + yydestroyGLRState("Cleanup: popping", yys, result); + yystates[yyk] = yys->yypred; + yystack.yynextFree -= 1; + yystack.yyspaceLeft += 1; + } + break; } - yyfreeGLRStack (&yystack); } + yyfreeGLRStack(&yystack); + } /* Make sure YYID is used. */ - return YYID (yyresult); + return YYID(yyresult); } /* DEBUGGING ONLY */ #if YYDEBUG -static void yypstack (yyGLRStack* yystackp, size_t yyk) - __attribute__ ((__unused__)); -static void yypdumpstack (yyGLRStack* yystackp) __attribute__ ((__unused__)); - -static void -yy_yypstack (yyGLRState* yys) -{ - if (yys->yypred) - { - yy_yypstack (yys->yypred); - YYFPRINTF (stderr, " -> "); - } - YYFPRINTF (stderr, "%d@%lu", yys->yylrState, - (unsigned long int) yys->yyposn); +static void yypstack(yyGLRStack *yystackp, size_t yyk) + __attribute__((__unused__)); +static void yypdumpstack(yyGLRStack *yystackp) __attribute__((__unused__)); + +static void yy_yypstack(yyGLRState *yys) { + if (yys->yypred) { + yy_yypstack(yys->yypred); + YYFPRINTF(stderr, " -> "); + } + YYFPRINTF(stderr, "%d@%lu", yys->yylrState, (unsigned long int)yys->yyposn); } -static void -yypstates (yyGLRState* yyst) -{ +static void yypstates(yyGLRState *yyst) { if (yyst == YY_NULL) - YYFPRINTF (stderr, ""); + YYFPRINTF(stderr, ""); else - yy_yypstack (yyst); - YYFPRINTF (stderr, "\n"); + yy_yypstack(yyst); + YYFPRINTF(stderr, "\n"); } -static void -yypstack (yyGLRStack* yystackp, size_t yyk) -{ - yypstates (yystackp->yytops.yystates[yyk]); +static void yypstack(yyGLRStack *yystackp, size_t yyk) { + yypstates(yystackp->yytops.yystates[yyk]); } -#define YYINDEX(YYX) \ - ((YYX) == YY_NULL ? -1 : (yyGLRStackItem*) (YYX) - yystackp->yyitems) +#define YYINDEX(YYX) \ + ((YYX) == YY_NULL ? -1 : (yyGLRStackItem *)(YYX) - yystackp->yyitems) - -static void -yypdumpstack (yyGLRStack* yystackp) -{ - yyGLRStackItem* yyp; +static void yypdumpstack(yyGLRStack *yystackp) { + yyGLRStackItem *yyp; size_t yyi; - for (yyp = yystackp->yyitems; yyp < yystackp->yynextFree; yyp += 1) - { - YYFPRINTF (stderr, "%3lu. ", - (unsigned long int) (yyp - yystackp->yyitems)); - if (*(yybool *) yyp) - { - YYFPRINTF (stderr, "Res: %d, LR State: %d, posn: %lu, pred: %ld", - yyp->yystate.yyresolved, yyp->yystate.yylrState, - (unsigned long int) yyp->yystate.yyposn, - (long int) YYINDEX (yyp->yystate.yypred)); - if (! yyp->yystate.yyresolved) - YYFPRINTF (stderr, ", firstVal: %ld", - (long int) YYINDEX (yyp->yystate - .yysemantics.yyfirstVal)); - } - else - { - YYFPRINTF (stderr, "Option. rule: %d, state: %ld, next: %ld", - yyp->yyoption.yyrule - 1, - (long int) YYINDEX (yyp->yyoption.yystate), - (long int) YYINDEX (yyp->yyoption.yynext)); - } - YYFPRINTF (stderr, "\n"); + for (yyp = yystackp->yyitems; yyp < yystackp->yynextFree; yyp += 1) { + YYFPRINTF(stderr, "%3lu. ", (unsigned long int)(yyp - yystackp->yyitems)); + if (*(yybool *)yyp) { + YYFPRINTF(stderr, "Res: %d, LR State: %d, posn: %lu, pred: %ld", + yyp->yystate.yyresolved, yyp->yystate.yylrState, + (unsigned long int)yyp->yystate.yyposn, + (long int)YYINDEX(yyp->yystate.yypred)); + if (!yyp->yystate.yyresolved) + YYFPRINTF(stderr, ", firstVal: %ld", + (long int)YYINDEX(yyp->yystate.yysemantics.yyfirstVal)); + } else { + YYFPRINTF(stderr, "Option. rule: %d, state: %ld, next: %ld", + yyp->yyoption.yyrule - 1, + (long int)YYINDEX(yyp->yyoption.yystate), + (long int)YYINDEX(yyp->yyoption.yynext)); } - YYFPRINTF (stderr, "Tops:"); + YYFPRINTF(stderr, "\n"); + } + YYFPRINTF(stderr, "Tops:"); for (yyi = 0; yyi < yystackp->yytops.yysize; yyi += 1) - YYFPRINTF (stderr, "%lu: %ld; ", (unsigned long int) yyi, - (long int) YYINDEX (yystackp->yytops.yystates[yyi])); - YYFPRINTF (stderr, "\n"); + YYFPRINTF(stderr, "%lu: %ld; ", (unsigned long int)yyi, + (long int)YYINDEX(yystackp->yytops.yystates[yyi])); + YYFPRINTF(stderr, "\n"); } #endif /* Line 2575 of glr.c */ -#line 1192 "sql.ypp" +#line 1215 "sql.ypp" + +void emit(char *s, ...) { + /* + extern int yylineno; + va_list ap; + va_start(ap, s); + printf("rpn: "); + vfprintf(stdout, s, ap); + printf("\n"); + */ +} +void yyerror(struct ParseResult *pp, const char *s, ...) { + va_list ap; + va_start(ap, s); + string sql_clause = pp->sql_clause; + vector tokens; + boost::split(tokens, sql_clause, boost::is_any_of("\n")); + ostringstream ostr; + int columnno = 0; + int lineno = yyget_lineno(pp->yyscan_info_); + lineno = lineno % tokens.size(); + lineno++; + // Because find() can only find the first place where the word appears, + // so this method may not find the certain place. + /* + for (int i = 0; i < tokens.size(); i++) { + columnno = tokens[i].find(yyget_text(pp->yyscan_info_)) + 1; + if (-1 != columnno && ((lineno - 1) == i)) { + cout << "In clause \'" + << "\e[1m" << tokens[i] << "\e[0m\'" << endl; + for (int j = 0; j < (columnno + 9); j++) { + cout << "."; + } + cout << "^" << endl; + ostr << "In clause \'" + << "\e[1m" << tokens[i] << "\e[0m\'" << endl; + for (int j = 0; j < (columnno + 9); j++) { + ostr << "."; + } + ostr << "^" << endl; + break; + } + } + */ + string wrong_clause = tokens[lineno - 1]; + vector words_in_clause; + boost::split(words_in_clause, tokens[lineno - 1], boost::is_any_of(" ")); + int yyget_col_no = yyget_column(pp->yyscan_info_); + if (yyget_col_no == 0) { + yyget_col_no = 1; + } + cout << "yyget_col_no = " << yyget_col_no << endl; + int column_num = 1; + for (int i = 0; i < words_in_clause.size(); i++) { + if (i == (yyget_col_no - 1)) { + cout << "In clause \'" + << "\e[1m" << tokens[lineno - 1] << "\e[0m\'" << endl; + ostr << "In clause \'" + << "\e[1m" << tokens[lineno - 1] << "\e[0m\'" << endl; + for (int j = 0; j < (column_num + 10); j++) { + ostr << "."; + cout << "."; + } + cout << "^" << endl; + ostr << "^" << endl; -void emit(char *s, ...) -{ - /* - extern int yylineno; - va_list ap; - va_start(ap, s); - printf("rpn: "); - vfprintf(stdout, s, ap); - printf("\n"); - */ -} + break; + } else { + column_num = column_num + words_in_clause[i].size() + 1; + } + } + ostr << "SQL syntax error at \e[1mline: " << lineno << "," + << "\e[0m near \'\e[1m"; + ostr << yyget_text(pp->yyscan_info_); + // ostr << "\e[1mLINE: " << lineno << "," << columnno << "\e[0m error: "; + // ostr << "near \'\e[1m"; + // ostr << yyget_text(pp->yyscan_info_); + ostr << "\e[0m\'." << endl; + pp->error_info_ = ostr.str(); + cout << "SQL syntax error at \e[1mline: " << lineno << "," + << "\e[0m near \'\e[1m"; + cout << yyget_text(pp->yyscan_info_); + cout << "\e[0m\'." << endl; + /* + cout << "\e[1mLINE: " << lineno << "\e[0m error: "; + cout << "near \'\e[1m"; + cout << yyget_text(pp->yyscan_info_); + cout << "\e[0m\'." << endl; + */ + yyset_column(0, pp->yyscan_info_); -void yyerror(struct ParseResult *pp,const char * s, ...) -{ - /* - va_list ap; - va_start(ap, s); - fprintf(stderr, "%d: error: ", yyget_lineno(pp->yyscan_info_)); - vfprintf(stderr, s, ap); - fprintf(stderr, "\n"); - */ - fprintf (stderr, "%s\n", s); + vfprintf(stderr, s, ap); + fprintf(stderr, "\n"); } - diff --git a/sql_parser/parser/sql.tab.hpp b/sql_parser/parser/sql.tab.hpp index c378cc533..8a7d28211 100644 --- a/sql_parser/parser/sql.tab.hpp +++ b/sql_parser/parser/sql.tab.hpp @@ -142,222 +142,222 @@ extern int yydebug; FORCE = 351, FOREIGN = 352, FROM = 353, - FULLTEXT = 354, - GRANT = 355, - GROUP = 356, - HAVING = 357, - HIGH_PRIORITY = 358, - HOUR_MICROSECOND = 359, - HOUR_MINUTE = 360, - HOUR_SECOND = 361, - IF = 362, - IGNORE = 363, - INDEX = 364, - INFILE = 365, - INNER = 366, - INOUT = 367, - INSENSITIVE = 368, - INSERT = 369, - INT = 370, - INTEGER = 371, - INTERVAL = 372, - INTO = 373, - ITERATE = 374, - JOIN = 375, - KEY = 376, - KEYS = 377, - KILL = 378, - LEADING = 379, - LEAVE = 380, - LEFT = 381, - LIMIT = 382, - LINES = 383, - LOAD = 384, - LOCALTIME = 385, - LOCALTIMESTAMP = 386, - LOCK = 387, - LONG = 388, - LONGBLOB = 389, - LONGTEXT = 390, - LOOP = 391, - LOW_PRIORITY = 392, - MATCH = 393, - MEDIUMBLOB = 394, - MEDIUMINT = 395, - MEDIUMTEXT = 396, - MINUTE_MICROSECOND = 397, - MINUTE_SECOND = 398, - MODIFIES = 399, - NATURAL = 400, - NO_WRITE_TO_BINLOG = 401, - NULLX = 402, - NUMBER = 403, - ON = 404, - DUPLICATE = 405, - OPTIMIZE = 406, - OPTION = 407, - OPTIONALLY = 408, - ORDER = 409, - OUT = 410, - OUTER = 411, - OUTFILE = 412, - PARTITIONED = 413, - PRECISION = 414, - PRIMARY = 415, - PROCEDURE = 416, - PROJECTION = 417, - PURGE = 418, - QUICK = 419, - QUARTER = 420, - READ = 421, - READS = 422, - REAL = 423, - REFERENCES = 424, - RELEASE = 425, - RENAME = 426, - REPEAT = 427, - REPLACE = 428, - REQUIRE = 429, - RESTRICT = 430, - RETURN = 431, - REVOKE = 432, - RIGHT = 433, - ROLLUP = 434, - SAMPLE = 435, - SCHEMA = 436, - SCHEMAS = 437, - SECOND_MICROSECOND = 438, - SELECT = 439, - SENSITIVE = 440, - SEPARATOR = 441, - SET = 442, - SHOW = 443, - SMALLINT = 444, - SOME = 445, - SONAME = 446, - SPATIAL = 447, - SPECIFIC = 448, - SQL = 449, - SQLEXCEPTION = 450, - SQLSTATE = 451, - SQLWARNING = 452, - SQL_BIG_RESULT = 453, - SQL_CALC_FOUND_ROWS = 454, - SQL_SMALL_RESULT = 455, - SSL = 456, - STARTING = 457, - STRAIGHT_JOIN = 458, - TABLE = 459, - TEMPORARY = 460, - TEXT = 461, - TERMINATED = 462, - THEN = 463, - TIME = 464, - TIMESTAMP = 465, - TINYBLOB = 466, - TINYINT = 467, - TINYTEXT = 468, - TO = 469, - TRAILING = 470, - TRIGGER = 471, - UNDO = 472, - UNION = 473, - UNIQUE = 474, - UNLOCK = 475, - UNSIGNED = 476, - UPDATE = 477, - USAGE = 478, - USE = 479, - USING = 480, - UTC_DATE = 481, - UTC_TIME = 482, - UTC_TIMESTAMP = 483, - VALUES = 484, - VARBINARY = 485, - VARCHAR = 486, - VARYING = 487, - WHEN = 488, - WHERE = 489, - WHILE = 490, - WITH = 491, - WRITE = 492, - YEAR = 493, - YEAR_MONTH = 494, - ZEROFILL = 495, - WEEK = 496, - DO = 497, - MAX_QUERIES_PER_HOUR = 498, - MAX_UPDATES_PER_HOUR = 499, - MAX_CONNECTIONS_PER_HOUR = 500, - MAX_USER_CONNECTIONS = 501, - USER = 502, - TRUNCATE = 503, - FAST = 504, - MEDIUM = 505, - EXTENDED = 506, - CHANGED = 507, - LEAVES = 508, - MASTER = 509, - QUERY = 510, - CACHE = 511, - SLAVE = 512, - BEGINT = 513, - COMMIT = 514, - START = 515, - TRANSACTION = 516, - NO = 517, - CHAIN = 518, - AUTOCOMMIT = 519, - SAVEPOINT = 520, - ROLLBACK = 521, - LOCAL = 522, - TABLES = 523, - ISOLATION = 524, - LEVEL = 525, - GLOBAL = 526, - SESSION = 527, - UNCOMMITTED = 528, - COMMITTED = 529, - REPEATABLE = 530, - SERIALIZABLE = 531, - IDENTIFIED = 532, - PASSWORD = 533, - PRIVILEGES = 534, - BACKUP = 535, - CHECKSUM = 536, - REPAIR = 537, - USE_FRM = 538, - RESTORE = 539, - CHARACTER = 540, - COLLATION = 541, - COLUMNS = 542, - ENGINE = 543, - LOGS = 544, - STATUS = 545, - STORAGE = 546, - ENGINES = 547, - ERRORS = 548, - GRANTS = 549, - INNODB = 550, - PROCESSLIST = 551, - TRIGGERS = 552, - VARIABLES = 553, - WARNINGS = 554, - FLUSH = 555, - HOSTS = 556, - DES_KEY_FILE = 557, - USER_RESOURCES = 558, - CONNECTION = 559, - RESET = 560, - PREPARE = 561, - DEALLOCATE = 562, - EXECUTE = 563, - WORK = 564, - BTREE = 565, - HASH = 566, - BDB = 567, - OPEN = 568, - FULL = 569, + FULL = 354, + FULLTEXT = 355, + GRANT = 356, + GROUP = 357, + HAVING = 358, + HIGH_PRIORITY = 359, + HOUR_MICROSECOND = 360, + HOUR_MINUTE = 361, + HOUR_SECOND = 362, + IF = 363, + IGNORE = 364, + INDEX = 365, + INFILE = 366, + INNER = 367, + INOUT = 368, + INSENSITIVE = 369, + INSERT = 370, + INT = 371, + INTEGER = 372, + INTERVAL = 373, + INTO = 374, + ITERATE = 375, + JOIN = 376, + KEY = 377, + KEYS = 378, + KILL = 379, + LEADING = 380, + LEAVE = 381, + LEFT = 382, + LIMIT = 383, + LINES = 384, + LOAD = 385, + LOCALTIME = 386, + LOCALTIMESTAMP = 387, + LOCK = 388, + LONG = 389, + LONGBLOB = 390, + LONGTEXT = 391, + LOOP = 392, + LOW_PRIORITY = 393, + MATCH = 394, + MEDIUMBLOB = 395, + MEDIUMINT = 396, + MEDIUMTEXT = 397, + MINUTE_MICROSECOND = 398, + MINUTE_SECOND = 399, + MODIFIES = 400, + NATURAL = 401, + NO_WRITE_TO_BINLOG = 402, + NULLX = 403, + NUMBER = 404, + ON = 405, + DUPLICATE = 406, + OPTIMIZE = 407, + OPTION = 408, + OPTIONALLY = 409, + ORDER = 410, + OUT = 411, + OUTER = 412, + OUTFILE = 413, + PARTITIONED = 414, + PRECISION = 415, + PRIMARY = 416, + PROCEDURE = 417, + PROJECTION = 418, + PURGE = 419, + QUICK = 420, + QUARTER = 421, + READ = 422, + READS = 423, + REAL = 424, + REFERENCES = 425, + RELEASE = 426, + RENAME = 427, + REPEAT = 428, + REPLACE = 429, + REQUIRE = 430, + RESTRICT = 431, + RETURN = 432, + REVOKE = 433, + RIGHT = 434, + ROLLUP = 435, + SAMPLE = 436, + SCHEMA = 437, + SCHEMAS = 438, + SECOND_MICROSECOND = 439, + SELECT = 440, + SENSITIVE = 441, + SEPARATOR = 442, + SET = 443, + SHOW = 444, + SMALLINT = 445, + SOME = 446, + SONAME = 447, + SPATIAL = 448, + SPECIFIC = 449, + SQL = 450, + SQLEXCEPTION = 451, + SQLSTATE = 452, + SQLWARNING = 453, + SQL_BIG_RESULT = 454, + SQL_CALC_FOUND_ROWS = 455, + SQL_SMALL_RESULT = 456, + SSL = 457, + STARTING = 458, + STRAIGHT_JOIN = 459, + TABLE = 460, + TEMPORARY = 461, + TEXT = 462, + TERMINATED = 463, + THEN = 464, + TIME = 465, + TIMESTAMP = 466, + TINYBLOB = 467, + TINYINT = 468, + TINYTEXT = 469, + TO = 470, + TRAILING = 471, + TRIGGER = 472, + UNDO = 473, + UNION = 474, + UNIQUE = 475, + UNLOCK = 476, + UNSIGNED = 477, + UPDATE = 478, + USAGE = 479, + USE = 480, + USING = 481, + UTC_DATE = 482, + UTC_TIME = 483, + UTC_TIMESTAMP = 484, + VALUES = 485, + VARBINARY = 486, + VARCHAR = 487, + VARYING = 488, + WHEN = 489, + WHERE = 490, + WHILE = 491, + WITH = 492, + WRITE = 493, + YEAR = 494, + YEAR_MONTH = 495, + ZEROFILL = 496, + WEEK = 497, + DO = 498, + MAX_QUERIES_PER_HOUR = 499, + MAX_UPDATES_PER_HOUR = 500, + MAX_CONNECTIONS_PER_HOUR = 501, + MAX_USER_CONNECTIONS = 502, + USER = 503, + TRUNCATE = 504, + FAST = 505, + MEDIUM = 506, + EXTENDED = 507, + CHANGED = 508, + LEAVES = 509, + MASTER = 510, + QUERY = 511, + CACHE = 512, + SLAVE = 513, + BEGINT = 514, + COMMIT = 515, + START = 516, + TRANSACTION = 517, + NO = 518, + CHAIN = 519, + AUTOCOMMIT = 520, + SAVEPOINT = 521, + ROLLBACK = 522, + LOCAL = 523, + TABLES = 524, + ISOLATION = 525, + LEVEL = 526, + GLOBAL = 527, + SESSION = 528, + UNCOMMITTED = 529, + COMMITTED = 530, + REPEATABLE = 531, + SERIALIZABLE = 532, + IDENTIFIED = 533, + PASSWORD = 534, + PRIVILEGES = 535, + BACKUP = 536, + CHECKSUM = 537, + REPAIR = 538, + USE_FRM = 539, + RESTORE = 540, + CHARACTER = 541, + COLLATION = 542, + COLUMNS = 543, + ENGINE = 544, + LOGS = 545, + STATUS = 546, + STORAGE = 547, + ENGINES = 548, + ERRORS = 549, + GRANTS = 550, + INNODB = 551, + PROCESSLIST = 552, + TRIGGERS = 553, + VARIABLES = 554, + WARNINGS = 555, + FLUSH = 556, + HOSTS = 557, + DES_KEY_FILE = 558, + USER_RESOURCES = 559, + CONNECTION = 560, + RESET = 561, + PREPARE = 562, + DEALLOCATE = 563, + EXECUTE = 564, + WORK = 565, + BTREE = 566, + HASH = 567, + BDB = 568, + OPEN = 569, FSUBSTRING = 570, FTRIM = 571, FDATE_ADD = 572, @@ -378,7 +378,7 @@ extern int yydebug; typedef union YYSTYPE { /* Line 2579 of glr.c */ -#line 34 "sql.ypp" +#line 39 "sql.ypp" int intval; double floatval; diff --git a/sql_parser/parser/sql.ypp b/sql_parser/parser/sql.ypp index 7074385da..2eabd3b79 100644 --- a/sql_parser/parser/sql.ypp +++ b/sql_parser/parser/sql.ypp @@ -5,7 +5,7 @@ %parse-param { struct ParseResult* result} %glr-parser %expect 4 -%expect-rr 61 +%expect-rr 62 %defines %{ #include @@ -15,6 +15,9 @@ #include #include #include +#include +#include +#include #include using namespace std; #include "../ast_node/ast_node.h" @@ -26,6 +29,8 @@ using namespace std; #include "../ast_node/ast_load_stmt.h" #include "../ast_node/ast_show_stmt.h" #include "../ast_node/ast_delete_stmt.h" +#include "../ast_node/ast_desc_stmt.h" +#include "../ast_node/ast_update_stmt.h" void yyerror(struct ParseResult *pp,const char *s, ...); void emit(char *s, ...); @@ -154,6 +159,7 @@ should replace YYLEX with the following clause in sql.tab.cpp, why so? I don't k %token FORCE %token FOREIGN %token FROM +%token FULL %token FULLTEXT %token GRANT %token GROUP @@ -376,7 +382,7 @@ should replace YYLEX with the following clause in sql.tab.cpp, why so? I don't k %token HASH %token BDB %token OPEN -%token FULL + %token FSUBSTRING %token FTRIM @@ -393,7 +399,7 @@ should replace YYLEX with the following clause in sql.tab.cpp, why so? I don't k %token FMAX -%type select_opts opt_with_rollup opt_asc_desc opt_inner_cross left_or_right +%type select_opts opt_with_rollup opt_asc_desc opt_inner_cross left_right_full %type opt_left_or_right_outer opt_outer %type stmt_list select_expr_list %type opt_where select_stmt stmt opt_groupby select_expr @@ -417,6 +423,7 @@ should replace YYLEX with the following clause in sql.tab.cpp, why so? I don't k %type opt_col_names enum_list opt_length %type column_atts data_type load_table_stmt %type delete_opts delete_list delete_stmt +%type update_stmt update_set_list %type opt_if_exists opt_if_not_exists opt_ignore_replace @@ -597,6 +604,7 @@ outer 4 left 8 right 16 nature 32 +full 64 straight_join -1 */ @@ -604,7 +612,7 @@ join_table: table_reference opt_inner_cross JOIN table_factor opt_join_condition { $$ = new AstJoin(AST_JOIN, $2, $1, $4, $5);} | table_reference STRAIGHT_JOIN table_factor { $$ = new AstJoin(AST_JOIN, -1, $1, $3, NULL);} | table_reference STRAIGHT_JOIN table_factor ON expr { $$ = new AstJoin(AST_JOIN, -1, $1, $3, $5);} - | table_reference left_or_right opt_outer JOIN table_factor join_condition { $$ = new AstJoin(AST_JOIN, $2 + $3, $1, $5, $6);} + | table_reference left_right_full opt_outer JOIN table_factor join_condition { $$ = new AstJoin(AST_JOIN, $2 + $3, $1, $5, $6);} | table_reference NATURAL opt_left_or_right_outer JOIN table_factor { $$ = new AstJoin(AST_JOIN, 32 + $3, $1, $5, NULL);} ; @@ -617,14 +625,16 @@ opt_outer: { $$ = 4; } | OUTER { $$ = 4; } ; -left_or_right: +left_right_full: LEFT { $$ = 8; } | RIGHT { $$ = 16; } + | FULL { $$ = 64; } ; opt_left_or_right_outer: LEFT opt_outer { $$ = 8 + $2; } - | RIGHT opt_outer { $$ = 16 + $2; } + | RIGHT opt_outer { $$ = 16 + $2;} + | FULL opt_outer { $$ = 64 + $2;} | /* nil */ { $$ = 0; } ; @@ -990,13 +1000,13 @@ stmt: load_table_stmt { $$ = $1;} load_table_stmt: LOAD TABLE NAME FROM expr_list WITH STRING ',' STRING SAMPLE COMPARISON APPROXNUM { if ($11 != 4) { yyerror(result,"please give a specific number"); } - else {$$ = new AstLoadTable(AST_LOAD_TABLE, string($3), $5, string($7), string($9), atof($12), 0);} + else {$$ = new AstLoadTable(AST_LOAD_TABLE, string($3), $5, string($7), string($9), atof($12), 1);} } - | LOAD TABLE NAME FROM expr_list WITH STRING ',' STRING { $$ = new AstLoadTable(AST_LOAD_TABLE, string($3), $5, string($7), string($9), 1.0, 0);} - | APPEND TABLE NAME FROM expr_list WITH STRING ',' STRING { $$ = new AstLoadTable(AST_LOAD_TABLE, string($3), $5, string($7), string($9), 1.0, 1);} + | LOAD TABLE NAME FROM expr_list WITH STRING ',' STRING { $$ = new AstLoadTable(AST_LOAD_TABLE, string($3), $5, string($7), string($9), 1.0, 1);} + | APPEND TABLE NAME FROM expr_list WITH STRING ',' STRING { $$ = new AstLoadTable(AST_LOAD_TABLE, string($3), $5, string($7), string($9), 1.0, 2);} | APPEND TABLE NAME FROM expr_list WITH STRING ',' STRING SAMPLE COMPARISON APPROXNUM { if ($11 != 4) { yyerror(result,"please give a specific number"); } - else {$$ = new AstLoadTable(AST_LOAD_TABLE, string($3), $5, string($7), string($9), atof($12), 1);} + else {$$ = new AstLoadTable(AST_LOAD_TABLE, string($3), $5, string($7), string($9), atof($12), 2);} } ; @@ -1134,8 +1144,8 @@ show_stmt: SHOW opt_full TABLES opt_from opt_like_string { $$ = new AstShowStmt( | SHOW WARNINGS opt_limit { $$ = NULL; } | SHOW expr WARNINGS /* expr = COUNT(*) */ { $$ = NULL; } ; -opt_full: /* nil */ { $$ = 0; } - | FULL { $$ = 1; } + opt_full: /* nil */ { $$ = 0; } + /* | FULL { $$ = 1; }*/ ; opt_from: /* nil */ { $$ = NULL; } @@ -1157,6 +1167,9 @@ opt_trans_level: /* nil */ { $$ = 0; } | GLOBAL { $$ = 1; } | SESSION { $$ = 2; } ; + /* description clause*/ +stmt: DESC NAME { $$ = new AstDescStmt(AST_DESC_STMT, $2); } + ; stmt: delete_stmt { $$ = $1; } ; @@ -1188,12 +1201,22 @@ delete_list: NAME opt_dot_star { } opt_dot_star: /* nil */ | '.' '*' ; - + +stmt: update_stmt { $$ = $1; } + ; + +update_stmt: UPDATE table_factor SET update_set_list opt_where { $$ = new AstUpdateStmt(AST_UPDATE_STMT, $4, $2, $5); } + ; + +update_set_list: expr COMPARISON expr { $$ = new AstUpdateSetList(AST_UPDATE_SET_LIST, $1, $3, NULL); } + | update_set_list ',' expr COMPARISON expr { $$ = new AstUpdateSetList(AST_UPDATE_SET_LIST, $3, $5, $1); } + ; + %% void emit(char *s, ...) { - /* + /* extern int yylineno; va_list ap; va_start(ap, s); @@ -1203,15 +1226,87 @@ void emit(char *s, ...) */ } -void yyerror(struct ParseResult *pp,const char * s, ...) -{ - /* - va_list ap; - va_start(ap, s); - fprintf(stderr, "%d: error: ", yyget_lineno(pp->yyscan_info_)); - vfprintf(stderr, s, ap); - fprintf(stderr, "\n"); - */ - fprintf (stderr, "%s\n", s); +void yyerror(struct ParseResult *pp,const char * s, ...) { + va_list ap; + va_start(ap, s); + string sql_clause = pp->sql_clause; + vector tokens; + boost::split(tokens, sql_clause, boost::is_any_of("\n")); + ostringstream ostr; + int columnno = 0; + int lineno = yyget_lineno(pp->yyscan_info_); + lineno = lineno % tokens.size(); + lineno++; + // Because find() can only find the first place where the word appears, + // so this method may not find the certain place. + /* + for (int i = 0; i < tokens.size(); i++) { + columnno = tokens[i].find(yyget_text(pp->yyscan_info_)) + 1; + if (-1 != columnno && ((lineno - 1) == i)) { + cout << "In clause \'" + << "\e[1m" << tokens[i] << "\e[0m\'" << endl; + for (int j = 0; j < (columnno + 9); j++) { + cout << "."; + } + cout << "^" << endl; + ostr << "In clause \'" + << "\e[1m" << tokens[i] << "\e[0m\'" << endl; + for (int j = 0; j < (columnno + 9); j++) { + ostr << "."; + } + ostr << "^" << endl; + break; + } + } + */ + string wrong_clause = tokens[lineno - 1]; + vector words_in_clause; + boost::split(words_in_clause, tokens[lineno - 1], boost::is_any_of(" ")); + int yyget_col_no = yyget_column(pp->yyscan_info_); + if (yyget_col_no == 0) { + yyget_col_no = 1; + } + cout << "yyget_col_no = " << yyget_col_no << endl; + int column_num = 1; + for (int i = 0; i < words_in_clause.size(); i++) { + if (i == (yyget_col_no - 1)) { + cout << "In clause \'" + << "\e[1m" << tokens[lineno - 1] << "\e[0m\'" << endl; + ostr << "In clause \'" + << "\e[1m" << tokens[lineno - 1] << "\e[0m\'" << endl; + for (int j = 0; j < (column_num + 10); j++) { + ostr << "."; + cout << "."; + } + cout << "^" << endl; + ostr << "^" << endl; + + break; + } else { + column_num = column_num + words_in_clause[i].size() + 1; + } + } + ostr << "SQL syntax error at \e[1mline: " << lineno << "," + << "\e[0m near \'\e[1m"; + ostr << yyget_text(pp->yyscan_info_); + // ostr << "\e[1mLINE: " << lineno << "," << columnno << "\e[0m error: "; + // ostr << "near \'\e[1m"; + // ostr << yyget_text(pp->yyscan_info_); + ostr << "\e[0m\'." << endl; + pp->error_info_ = ostr.str(); + cout << "SQL syntax error at \e[1mline: " << lineno << "," + << "\e[0m near \'\e[1m"; + cout << yyget_text(pp->yyscan_info_); + cout << "\e[0m\'." << endl; + /* + cout << "\e[1mLINE: " << lineno << "\e[0m error: "; + cout << "near \'\e[1m"; + cout << yyget_text(pp->yyscan_info_); + cout << "\e[0m\'." << endl; + */ + yyset_column(0, pp->yyscan_info_); + + vfprintf(stderr, s, ap); + fprintf(stderr, "\n"); } diff --git a/stmt_handler/Makefile.am b/stmt_handler/Makefile.am index 6b90572d9..97a678b23 100644 --- a/stmt_handler/Makefile.am +++ b/stmt_handler/Makefile.am @@ -1,8 +1,7 @@ AM_CPPFLAGS= -fPIC -fpermissive \ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux \ --I${THERON_HOME}/Include \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization @@ -30,6 +29,7 @@ noinst_LIBRARIES=libstmthandler.a libstmthandler_a_SOURCES = \ delete_stmt_exec.cpp delete_stmt_exec.h \ + update_stmt_exec.cpp update_stmt_exec.h \ drop_table_exec.cpp drop_table_exec.h \ create_projection_exec.cpp create_projection_exec.h\ show_exec.cpp show_exec.h \ @@ -38,7 +38,8 @@ libstmthandler_a_SOURCES = \ select_exec.cpp select_exec.h \ insert_exec.cpp insert_exec.h \ load_exec.cpp load_exec.h \ - create_table_exec.cpp create_table_exec.h + create_table_exec.cpp create_table_exec.h \ + desc_exec.cpp desc_exec.h diff --git a/stmt_handler/create_projection_exec.cpp b/stmt_handler/create_projection_exec.cpp index a83de15b5..ff7b34e18 100644 --- a/stmt_handler/create_projection_exec.cpp +++ b/stmt_handler/create_projection_exec.cpp @@ -76,7 +76,10 @@ RetCode CreateProjectionExec::Execute(ExecutedResult* exec_result) { Catalog* local_catalog = Environment::getInstance()->getCatalog(); ret = CreateTableProjection(table_name, local_catalog, sem_cnxt.index_, partition_attribute_name, partition_num); - if (rSuccess == ret) { + if (common::rResourceIsLocked == ret) { + exec_result->SetError( + "Can't create new projection when loading/ inserting data"); + } else if (rSuccess == ret) { exec_result->status_ = true; exec_result->result_ = NULL; exec_result->info_ = "create projection successfully"; @@ -125,6 +128,7 @@ RetCode CreateProjectionExec::Execute(ExecutedResult* exec_result) { local_catalog->saveCatalog(); return ret; } else { + exec_result->SetError(CStrError(ret)); return ret; } return ret; @@ -143,8 +147,10 @@ RetCode CreateProjectionExec::CreateTableProjection( // vector indexDEL; // indexDEL.push_back(0); if (0 != columns.size()) { - catalog->getTable(table_id)->createHashPartitionedProjection( - columns, partition_attribute, partition_num); + EXEC_AND_RETURN_ERROR( + ret, catalog->getTable(table_id)->createHashPartitionedProjection( + columns, partition_attribute, partition_num), + "failed to create projection"); int projection_index = catalog->getTable(table_id)->getNumberOfProjection() - 1; diff --git a/stmt_handler/delete_stmt_exec.cpp b/stmt_handler/delete_stmt_exec.cpp index c6fa2a969..e56988f5a 100644 --- a/stmt_handler/delete_stmt_exec.cpp +++ b/stmt_handler/delete_stmt_exec.cpp @@ -93,6 +93,7 @@ RetCode DeleteStmtExec::Execute(ExecutedResult* exec_result) { ret = appended_query_exec->Execute(exec_result); if (ret != rSuccess) { WLOG(ret, "failed to find the delete tuples from the table "); + delete appended_query_exec; return ret; } ostringstream ostr; @@ -117,7 +118,9 @@ RetCode DeleteStmtExec::Execute(ExecutedResult* exec_result) { // if (NULL != appended_query_exec) { // delete appended_query_exec; // } - + delete exec_result->result_; + exec_result->result_ = NULL; + delete appended_query_exec; return ret; } else if (rCreateProjectionOnDelTableFailed == ret) { WLOG(ret, diff --git a/stmt_handler/desc_exec.cpp b/stmt_handler/desc_exec.cpp new file mode 100644 index 000000000..d666d5fb9 --- /dev/null +++ b/stmt_handler/desc_exec.cpp @@ -0,0 +1,254 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/stmt_handler/desc_stmt_exec.cpp + * + * Created on: Feb 26, 2016 + * Author: yuyang + * Email: youngfish93@hotmail.com + * + * Description: + * + */ + +#include +#include +#include +#include "../stmt_handler/desc_exec.h" +#include "../catalog/catalog.h" +#include "../Environment.h" +#include "../catalog/stat/Analyzer.h" +#include "../catalog/projection_binding.h" +using std::endl; +using std::string; +using std::vector; +using claims::catalog::Catalog; + +namespace claims { +namespace stmt_handler { + +DescExec::DescExec(AstNode* stmt) : StmtExec(stmt) { + assert(stmt_); + desc_stmt_ast_ = dynamic_cast(stmt_); +} + +DescExec::~DescExec() {} + +RetCode DescExec::Execute(ExecutedResult* exec_result) { + SemanticContext sem_cnxt; + RetCode ret = desc_stmt_ast_->SemanticAnalisys(&sem_cnxt); + if (rSuccess != ret) { + exec_result->error_info_ = + "Semantic analysis error.\n" + sem_cnxt.error_msg_; + exec_result->status_ = false; + LOG(ERROR) << "semantic analysis error result= : " << ret; + cout << "semantic analysis error result= : " << ret << endl; + return ret; + } + + ostringstream ostr; + Catalog* local_catalog = Environment::getInstance()->getCatalog(); + TableDescriptor* table = local_catalog->getTable(desc_stmt_ast_->table_name_); + for (int i = 0; i < table->getAttributes().size(); i++) { + desc_stmt_ast_->column_name_.push_back(table->getAttributes()[i].attrName); + // extra and is_key not used now. + desc_stmt_ast_->extra_.push_back(""); + desc_stmt_ast_->is_key_.push_back(""); + desc_stmt_ast_->size_.push_back( + table->getAttributes()[i].attrType->get_length()); + if (table->getAttributes()[i].attrType->nullable) { + desc_stmt_ast_->nullable_.push_back("YES"); + } else { + desc_stmt_ast_->nullable_.push_back("NO"); + } + switch (table->getAttributes()[i].attrType->type) { + case t_smallInt: + desc_stmt_ast_->type_.push_back("small int"); + desc_stmt_ast_->default_value_.push_back("0"); + break; + case t_int: + desc_stmt_ast_->type_.push_back("int"); + desc_stmt_ast_->default_value_.push_back("0"); + break; + case t_u_long: + desc_stmt_ast_->type_.push_back("unsigned long"); + desc_stmt_ast_->default_value_.push_back("0"); + break; + case t_float: + desc_stmt_ast_->type_.push_back("float"); + desc_stmt_ast_->default_value_.push_back("0.0"); + break; + case t_double: + desc_stmt_ast_->type_.push_back("double"); + desc_stmt_ast_->default_value_.push_back("0.0"); + break; + case t_string: + desc_stmt_ast_->type_.push_back("string"); + desc_stmt_ast_->default_value_.push_back("NULL"); + break; + case t_date: + desc_stmt_ast_->type_.push_back("date"); + desc_stmt_ast_->default_value_.push_back("1400-01-01"); + break; + case t_time: + desc_stmt_ast_->type_.push_back("time"); + desc_stmt_ast_->default_value_.push_back("00:00:00.000000"); + break; + case t_datetime: + desc_stmt_ast_->type_.push_back("date and time"); + desc_stmt_ast_->default_value_.push_back("1400-01-01 00:00:00.000000"); + break; + case t_decimal: + desc_stmt_ast_->type_.push_back("decimal"); + desc_stmt_ast_->default_value_.push_back("0.0"); + break; + case t_boolean: + desc_stmt_ast_->type_.push_back("boolean"); + desc_stmt_ast_->default_value_.push_back("false"); + break; + case t_u_smallInt: + desc_stmt_ast_->type_.push_back("unsigned small int"); + desc_stmt_ast_->default_value_.push_back("0.0"); + break; + case t_date_day: + desc_stmt_ast_->type_.push_back("date day"); + desc_stmt_ast_->default_value_.push_back(""); + break; + case t_date_week: + desc_stmt_ast_->type_.push_back("date week"); + desc_stmt_ast_->default_value_.push_back(""); + break; + case t_date_month: + desc_stmt_ast_->type_.push_back("date month"); + desc_stmt_ast_->default_value_.push_back(""); + break; + case t_date_year: + desc_stmt_ast_->type_.push_back("date year"); + desc_stmt_ast_->default_value_.push_back(""); + break; + case t_date_quarter: + desc_stmt_ast_->type_.push_back("date quarter"); + desc_stmt_ast_->default_value_.push_back(""); + break; + default: + desc_stmt_ast_->type_.push_back("unknown"); + desc_stmt_ast_->default_value_.push_back(""); + break; + } + } + + vector max_column_size; + max_column_size.push_back(5); // Field + max_column_size.push_back(4); // Type + max_column_size.push_back(4); // Null + max_column_size.push_back(3); // Key + max_column_size.push_back(7); // Default + max_column_size.push_back(5); // Size + max_column_size.push_back(5); // Extra + + vector col_header; + col_header.push_back("Field"); + col_header.push_back("Type"); + col_header.push_back("Null"); + col_header.push_back("Key"); + col_header.push_back("Default"); + col_header.push_back("Size"); + col_header.push_back("Extra"); + + for (int i = 0; i < table->getAttributes().size(); i++) { + if (desc_stmt_ast_->column_name_[i].size() > max_column_size[0]) { + max_column_size[0] = desc_stmt_ast_->column_name_[i].size(); + } + if (desc_stmt_ast_->type_[i].size() > max_column_size[1]) { + max_column_size[1] = desc_stmt_ast_->type_[i].size(); + } + if (desc_stmt_ast_->nullable_[i].size() > max_column_size[2]) { + max_column_size[2] = desc_stmt_ast_->nullable_[i].size(); + } + if (desc_stmt_ast_->is_key_[i].size() > max_column_size[3]) { + max_column_size[3] = desc_stmt_ast_->is_key_[i].size(); + } + if (desc_stmt_ast_->default_value_[i].size() > max_column_size[4]) { + max_column_size[4] = desc_stmt_ast_->default_value_[i].size(); + } + } + // print header + for (int i = 0; i < 7; i++) { + ostr << "+"; + for (int j = 0; j < (max_column_size[i] + 2); j++) { + ostr << "-"; + } + } + ostr << "+" << endl; + + for (int i = 0; i < 7; i++) { + ostr << "|"; + ostr << " " << std::left << std::setw(max_column_size[i]) << col_header[i]; + ostr << " "; + } + ostr << "|" << endl; + for (int i = 0; i < 7; i++) { + ostr << "+"; + for (int j = 0; j < (max_column_size[i] + 2); j++) { + ostr << "-"; + } + } + ostr << "+" << endl; + + // print the description table + for (int i = 0; i < desc_stmt_ast_->column_name_.size(); i++) { + ostr << "|" + << " " << std::left << std::setw(max_column_size[0]) + << desc_stmt_ast_->column_name_[i] << " "; + ostr << "|" + << " " << std::left << std::setw(max_column_size[1]) + << desc_stmt_ast_->type_[i] << " "; + ostr << "|" + << " " << std::left << std::setw(max_column_size[2]) + << desc_stmt_ast_->nullable_[i] << " "; + ostr << "|" + << " " << std::left << std::setw(max_column_size[3]) + << desc_stmt_ast_->is_key_[i] << " "; + ostr << "|" + << " " << std::left << std::setw(max_column_size[4]) + << desc_stmt_ast_->default_value_[i] << " "; + ostr << "|" + << " " << std::left << std::setw(max_column_size[5]) + << desc_stmt_ast_->size_[i] << " "; + ostr << "|" + << " " << std::left << std::setw(max_column_size[6]) + << desc_stmt_ast_->extra_[i] << " "; + ostr << "|" << endl; + } + + // print last line + for (int i = 0; i < 7; i++) { + ostr << "+"; + for (int j = 0; j < (max_column_size[i] + 2); j++) { + ostr << "-"; + } + } + ostr << "+" << endl; + + exec_result->info_ = ostr.str(); + exec_result->status_ = true; + exec_result->result_ = NULL; +} + +} /* namespace stmt_handler */ +} /* namespace claims */ diff --git a/stmt_handler/desc_exec.h b/stmt_handler/desc_exec.h new file mode 100644 index 000000000..29b873db7 --- /dev/null +++ b/stmt_handler/desc_exec.h @@ -0,0 +1,58 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/stmt_handler/desc_stmt_exec.h + * + * Created on: Feb 26, 2016 + * Author: yuyang + * Email: youngfish93@hotmail.com + * + * Description: + * + */ + +#ifndef STMT_HANDLER_DESC_EXEC_H_ +#define STMT_HANDLER_DESC_EXEC_H_ + +#include "../stmt_handler/stmt_exec.h" +#include "../sql_parser/ast_node/ast_desc_stmt.h" + +namespace claims { +namespace stmt_handler { + +class DescExec : public StmtExec { + public: + /** + * @brief Method description: The executor about show statement. + * @param AstNode* stmt point to AST + */ + DescExec(AstNode *stmt); // NOLINT + ~DescExec(); + /** + * @brief the concrete operation of show statement. + */ + RetCode Execute(ExecutedResult *exec_result); + + private: + AstDescStmt *desc_stmt_ast_; +}; + +} /* namespace stmt_handler */ +} /* namespace claims */ + +#endif // STMT_HANDLER_DESC_EXEC_H_ diff --git a/stmt_handler/drop_table_exec.cpp b/stmt_handler/drop_table_exec.cpp index 38bd78023..a7b853013 100644 --- a/stmt_handler/drop_table_exec.cpp +++ b/stmt_handler/drop_table_exec.cpp @@ -156,7 +156,7 @@ RetCode DropTableExec::DropTable(const string& table_name) { // catalog ret = DeleteTableFiles(table_name); if (rSuccess != ret) { - ELOG(ret, "failed to delete the files when dropping table" + table_name); + ELOG(ret, "failed to delete the files when dropping table " + table_name); return ret; } else { ret = DropTableFromCatalog(table_name); @@ -191,38 +191,16 @@ RetCode DropTableExec::DropTableFromCatalog(const string& table_name) { */ RetCode DropTableExec::DeleteTableFiles(const string& table_name) { RetCode ret = rSuccess; - Catalog* local_catalog = Environment::getInstance()->getCatalog(); - TableDescriptor* table_desc = local_catalog->getTable(table_name); // start to delete the files - vector> write_path; - for (int i = 0; i < table_desc->getNumberOfProjection(); i++) { - vector prj_write_path; - prj_write_path.clear(); - for (int j = 0; j < table_desc->getProjectoin(i) - ->getPartitioner() - ->getNumberOfPartitions(); - ++j) { - string path = PartitionID(table_desc->getProjectoin(i)->getProjectionID(), - j).getPathAndName(); - prj_write_path.push_back(path); - } - write_path.push_back(prj_write_path); - // unbound the file in memory - if (table_desc->getProjectoin(i)->getPartitioner()->allPartitionBound()) { - Catalog::getInstance()->getBindingModele()->UnbindingEntireProjection( - table_desc->getProjectoin(i)->getPartitioner()); - } - } TableFileConnector* connector = new TableFileConnector( Config::local_disk_mode ? FilePlatform::kDisk : FilePlatform::kHdfs, - write_path); - ret = connector->DeleteFiles(); - if (ret != rSuccess) { - ELOG(ret, - "failed to delete the projections, when delete the file on table" + - table_name); - return ret; - } + Environment::getInstance()->getCatalog()->getTable(table_name), + common::kReadFile); + EXEC_AND_RETURN_ERROR( + ret, connector->DeleteAllTableFiles(), + "failed to delete the projections, when delete the file on table" + + table_name); + return ret; } } /* namespace stmt_handler */ diff --git a/stmt_handler/insert_exec.cpp b/stmt_handler/insert_exec.cpp index 19f5d6b41..302d46c8f 100644 --- a/stmt_handler/insert_exec.cpp +++ b/stmt_handler/insert_exec.cpp @@ -224,7 +224,8 @@ RetCode InsertExec::Execute(ExecutedResult *exec_result) { position < table_desc_->getNumberOfAttribute(); position++) { // check value count if (insert_value == NULL) { - LOG(ERROR) << "Value count is too few" << endl; + LOG(ERROR) << "Value count is too few. Expected value count is " + << table_desc_->getNumberOfAttribute() << endl; exec_result->SetError("Value count is too few"); return claims::common::rFailure; } diff --git a/stmt_handler/load_exec.cpp b/stmt_handler/load_exec.cpp index ea1058fbc..32fa40371 100644 --- a/stmt_handler/load_exec.cpp +++ b/stmt_handler/load_exec.cpp @@ -121,6 +121,8 @@ RetCode LoadExec::Execute(ExecutedResult *exec_result) { #ifdef NEW_LOADER DataInjector *injector = new DataInjector(table, column_separator, tuple_separator); +LOG(INFO) << "complete create new DataInjector for test."<mode_; ret = injector->LoadFromFile(path_names, static_cast(load_ast_->mode_), exec_result, load_ast_->sample_); diff --git a/stmt_handler/select_exec.cpp b/stmt_handler/select_exec.cpp index c3140fdb5..ddb048506 100644 --- a/stmt_handler/select_exec.cpp +++ b/stmt_handler/select_exec.cpp @@ -28,20 +28,41 @@ #include "../stmt_handler/select_exec.h" #include +#include +#include +#include #include +#include #include #include +#include #include "../common/error_define.h" +#include "../common/ids.h" +#include "../exec_tracker/stmt_exec_tracker.h" +#include "../Environment.h" #include "../logical_operator/logical_query_plan_root.h" +#include "../physical_operator/exchange_sender.h" +#include "../physical_operator/exchange_sender_pipeline.h" +#include "../physical_operator/physical_aggregation.h" +#include "../physical_operator/physical_nest_loop_join.h" #include "../physical_operator/physical_operator_base.h" #include "../stmt_handler/stmt_handler.h" +#include "caf/io/all.hpp" +using caf::io::remote_actor; using claims::logical_operator::LogicalQueryPlanRoot; +using claims::physical_operator::ExchangeSender; +using claims::physical_operator::ExchangeSenderPipeline; +using claims::physical_operator::PhysicalAggregation; +using claims::physical_operator::PhysicalNestLoopJoin; using claims::physical_operator::PhysicalOperatorBase; +using claims::physical_operator::PhysicalOperatorType; using std::endl; using std::vector; using std::string; using std::cout; +using std::make_pair; +using claims::common::rStmtCancelled; namespace claims { namespace stmt_handler { @@ -58,9 +79,56 @@ SelectExec::~SelectExec() { // delete select_ast_; // select_ast_ = NULL; // } + while (!all_segments_.empty()) { + delete all_segments_.top(); + all_segments_.pop(); + } } RetCode SelectExec::Execute(ExecutedResult* exec_result) { + GETCURRENTTIME(start_time); + // exec_status is deleted by tracker + StmtExecStatus* exec_status = new StmtExecStatus(raw_sql_); + exec_status->RegisterToTracker(); + set_stmt_exec_status(exec_status); + RetCode ret = Execute(); + if (rSuccess != ret) { + exec_result->result_ = NULL; + exec_result->status_ = false; + exec_result->error_info_ = raw_sql_ + string(" execution error!"); + exec_status->set_exec_status(StmtExecStatus::ExecStatus::kError); + return ret; + } else { + if (StmtExecStatus::ExecStatus::kCancelled == + exec_status->get_exec_status()) { + exec_result->result_ = NULL; + exec_result->status_ = false; + exec_result->error_info_ = raw_sql_ + string(" have been cancelled!"); + exec_status->set_exec_status(StmtExecStatus::ExecStatus::kError); + + } else if (StmtExecStatus::ExecStatus::kOk == + exec_status->get_exec_status()) { + exec_result->result_ = exec_status->get_query_result(); + exec_result->status_ = true; + exec_result->info_ = exec_status->get_exec_info(); + exec_status->set_exec_status(StmtExecStatus::ExecStatus::kDone); + + } else { + assert(false); + exec_status->set_exec_status(StmtExecStatus::ExecStatus::kError); + } + } + + double exec_time_ms = GetElapsedTime(start_time); + if (NULL != exec_result->result_) { + exec_result->result_->query_time_ = exec_time_ms / 1000.0; + } + LOG(INFO) << raw_sql_ << " execution time: " << exec_time_ms / 1000.0 + << " sec" << endl; + return rSuccess; +} + +RetCode SelectExec::Execute() { #ifdef PRINTCONTEXT select_ast_->Print(); cout << "--------------begin semantic analysis---------------" << endl; @@ -69,23 +137,24 @@ RetCode SelectExec::Execute(ExecutedResult* exec_result) { RetCode ret = rSuccess; ret = select_ast_->SemanticAnalisys(&sem_cnxt); if (rSuccess != ret) { - exec_result->error_info_ = "semantic analysis error"; - exec_result->status_ = false; + stmt_exec_status_->set_exec_info("semantic analysis error \n" + + sem_cnxt.error_msg_); + stmt_exec_status_->set_exec_status(StmtExecStatus::ExecStatus::kError); LOG(ERROR) << "semantic analysis error result= : " << ret; - cout << "semantic analysis error result= : " << ret << endl; return ret; } #ifdef PRINTCONTEXT select_ast_->Print(); cout << "--------------begin push down condition ------------" << endl; #endif - ret = select_ast_->PushDownCondition(NULL); + PushDownConditionContext pdccnxt; + ret = select_ast_->PushDownCondition(pdccnxt); if (rSuccess != ret) { - exec_result->error_info_ = "push down condition error"; - exec_result->status_ = false; - exec_result->result_ = NULL; - ELOG(ret, exec_result->error_info_); - cout << exec_result->error_info_; + stmt_exec_status_->set_exec_info("push down condition error"); + stmt_exec_status_->set_exec_status(StmtExecStatus::ExecStatus::kError); + stmt_exec_status_->set_query_result(NULL); + ELOG(ret, stmt_exec_status_->get_exec_info()); + cout << stmt_exec_status_->get_exec_info(); return ret; } #ifndef PRINTCONTEXT @@ -96,11 +165,12 @@ RetCode SelectExec::Execute(ExecutedResult* exec_result) { LogicalOperator* logic_plan = NULL; ret = select_ast_->GetLogicalPlan(logic_plan); if (rSuccess != ret) { - exec_result->error_info_ = "get logical plan error"; - exec_result->status_ = false; - exec_result->result_ = NULL; - ELOG(ret, exec_result->error_info_); - cout << exec_result->error_info_; + stmt_exec_status_->set_exec_info("get logical plan error"); + stmt_exec_status_->set_exec_status(StmtExecStatus::ExecStatus::kError); + stmt_exec_status_->set_query_result(NULL); + ELOG(ret, stmt_exec_status_->get_exec_info()); + cout << stmt_exec_status_->get_exec_info(); + delete logic_plan; return ret; } logic_plan = new LogicalQueryPlanRoot(0, logic_plan, raw_sql_, @@ -116,16 +186,150 @@ RetCode SelectExec::Execute(ExecutedResult* exec_result) { physical_plan->Print(); cout << "--------------begin output result -------------------" << endl; #endif + // collect all plan segments + physical_plan->GetAllSegments(&all_segments_); + // create thread to send all segments + pthread_t tid = 0; + // add segment_exec_status to stmt_exec_status_ + SegmentExecStatus* seg_exec_status_1 = new SegmentExecStatus( + make_pair(stmt_exec_status_->get_query_id(), + Environment::getInstance()->get_slave_node()->get_node_id())); + + stmt_exec_status_->AddSegExecStatus(seg_exec_status_1); - physical_plan->Open(); - while (physical_plan->Next(NULL)) { + if (all_segments_.size() > 0) { + int ret = pthread_create(&tid, NULL, SendAllSegments, this); } - exec_result->result_ = physical_plan->GetResultSet(); - physical_plan->Close(); + // this segment_exec_status for reporting status + SegmentExecStatus* seg_exec_status = new SegmentExecStatus( + make_pair(stmt_exec_status_->get_query_id(), + Environment::getInstance()->get_slave_node()->get_node_id()), + Environment::getInstance()->get_slave_node()->get_node_id()); + seg_exec_status->RegisterToTracker(); + bool pret = physical_plan->Open(seg_exec_status); + if (pret) { + seg_exec_status->UpdateStatus(SegmentExecStatus::ExecStatus::kOk, + "physical plan Open() succeed at collector", + 0, true); + while (physical_plan->Next(seg_exec_status, NULL)) { + } + seg_exec_status->UpdateStatus(SegmentExecStatus::ExecStatus::kOk, + "physical plan next() succeed", 0, true); + // the difference from the execution of normal segment due to getting result + stmt_exec_status_->set_query_result(physical_plan->GetResultSet()); + stmt_exec_status_->set_exec_info(string("execute a query successfully")); + } else { + seg_exec_status->UpdateStatus(SegmentExecStatus::ExecStatus::kError, + "physical plan open() failed at collector", 0, + true); + stmt_exec_status_->set_query_result(NULL); + stmt_exec_status_->set_exec_info(string("execute a query failed")); + } + physical_plan->Close(seg_exec_status); + seg_exec_status->UpdateStatus(SegmentExecStatus::ExecStatus::kDone, + "physical plan close() succeed", 0, true); + + if (tid != 0) { + // if (StmtExecStatus::kCancelled == + // stmt_exec_status_->get_exec_status()) { + // pthread_cancel(tid); + // } + pthread_join(tid, NULL); + } + + ret = rSuccess; delete logic_plan; delete physical_plan; - return rSuccess; + return ret; } +//!!!return ret by global variant +void* SelectExec::SendAllSegments(void* arg) { + RetCode ret = 0; + SelectExec* select_exec = reinterpret_cast(arg); + short segment_id = 0; + while (!select_exec->all_segments_.empty()) { + pthread_testcancel(); + if (select_exec->stmt_exec_status_->IsCancelled()) { + return NULL; + } + auto a_plan_segment = select_exec->all_segments_.top(); + // make sure upper exchanges are prepared + ret = select_exec->IsUpperExchangeRegistered( + a_plan_segment->upper_node_id_list_, a_plan_segment->exchange_id_); + if (rSuccess == ret) { + auto physical_sender_oper = a_plan_segment->get_plan_segment(); + for (int i = 0; i < a_plan_segment->lower_node_id_list_.size(); ++i) { + pthread_testcancel(); + if (select_exec->stmt_exec_status_->IsCancelled()) { + return NULL; + } + // set partition offset for each segment + reinterpret_cast(physical_sender_oper) + ->SetPartitionOffset(i); + segment_id = select_exec->get_stmt_exec_status()->GenSegmentId(); + + // new SegmentExecStatus and add it to StmtExecStatus + SegmentExecStatus* seg_exec_status = new SegmentExecStatus(make_pair( + select_exec->get_stmt_exec_status()->get_query_id(), + segment_id * kMaxNodeNum + a_plan_segment->lower_node_id_list_[i])); + + select_exec->get_stmt_exec_status()->AddSegExecStatus(seg_exec_status); + // send plan + if (Environment::getInstance() + ->get_iterator_executor_master() + ->ExecuteBlockStreamIteratorsOnSite( + physical_sender_oper, + a_plan_segment->lower_node_id_list_[i], + select_exec->get_stmt_exec_status()->get_query_id(), + segment_id) == false) { + LOG(ERROR) << "sending plan of " + << select_exec->get_stmt_exec_status()->get_query_id() + << " , " << segment_id << "error!!!" << endl; + ret = -1; + return &ret; + } + + LOG(INFO) << "sending plan of " + << select_exec->get_stmt_exec_status()->get_query_id() + << " , " << segment_id << "succeed!!!" << endl; + } + } else { + LOG(ERROR) << "asking upper exchange failed!" << endl; + return &ret; + } + select_exec->all_segments_.pop(); + DELETE_PTR(a_plan_segment); + } + return &ret; +} +RetCode SelectExec::IsUpperExchangeRegistered( + vector& upper_node_id_list, const u_int64_t exchange_id) { + RetCode ret = rSuccess; + NodeAddress node_addr; + int times = 0; + /// TODO(fzh)should release the strong synchronization + for (int i = 0; i < upper_node_id_list.size(); ++i) { + auto target_actor = + Environment::getInstance()->get_slave_node()->GetNodeActorFromId( + upper_node_id_list[i]); + while (Environment::getInstance() + ->getExchangeTracker() + ->AskForSocketConnectionInfo(ExchangeID(exchange_id, i), + upper_node_id_list[i], node_addr, + target_actor) != true) { + if (stmt_exec_status_->IsCancelled()) { + return -1; + } + + LOG(WARNING) << "busy asking socket connection info of node = " + << upper_node_id_list[i] << " , total times= " << ++times + << endl; + usleep(200); + } + } + return ret; +} + } // namespace stmt_handler } // namespace claims diff --git a/stmt_handler/select_exec.h b/stmt_handler/select_exec.h index 820e79f61..cd3a5034a 100644 --- a/stmt_handler/select_exec.h +++ b/stmt_handler/select_exec.h @@ -28,12 +28,18 @@ #ifndef STMT_HANDLER_SELECT_EXEC_H_ #define STMT_HANDLER_SELECT_EXEC_H_ +#include #include +#include +#include "../physical_operator/physical_operator_base.h" +#include "../physical_operator/segment.h" #include "../stmt_handler/delete_stmt_exec.h" #include "../stmt_handler/stmt_exec.h" #include "../stmt_handler/stmt_handler.h" - +using std::stack; +using claims::physical_operator::PhysicalOperatorBase; +using claims::physical_operator::Segment; using std::string; namespace claims { namespace stmt_handler { @@ -46,9 +52,17 @@ class SelectExec : public StmtExec { virtual ~SelectExec(); RetCode Execute(ExecutedResult* exec_result); + RetCode Execute(); + + private: + static void* SendAllSegments(void* arg); + RetCode IsUpperExchangeRegistered(vector& upper_node_id_list, + const u_int64_t exchange_id); + private: AstSelectStmt* select_ast_; string raw_sql_; + stack all_segments_; }; } // namespace stmt_handler } // namespace claims diff --git a/stmt_handler/stmt_exec.h b/stmt_handler/stmt_exec.h index 569fc204f..5c2406b74 100644 --- a/stmt_handler/stmt_exec.h +++ b/stmt_handler/stmt_exec.h @@ -29,6 +29,7 @@ #ifndef STMT_HANDLER_STMT_EXEC_H_ #define STMT_HANDLER_STMT_EXEC_H_ #include "../Daemon/Daemon.h" +#include "../exec_tracker/stmt_exec_tracker.h" #define GLOG_NO_ABBREVIATED_SEVERITIES #include @@ -47,15 +48,16 @@ namespace claims { namespace stmt_handler { -typedef int RetCode; -const int rSuccess = 0; -const int rParserError = -11000; -const int rUnknowStmtType = -11001; + class StmtExec { public: StmtExec(AstNode* stmt); virtual ~StmtExec(); virtual int Execute(ExecutedResult* exec_result); + StmtExecStatus* get_stmt_exec_status() { return stmt_exec_status_; } + void set_stmt_exec_status(StmtExecStatus* stmt_exec_status) { + stmt_exec_status_ = stmt_exec_status; + } public: const ResultSet* getResultSet() const; @@ -75,6 +77,7 @@ class StmtExec { bool result_flag_; string error_msg_; string info_; + StmtExecStatus* stmt_exec_status_; }; } // namespace stmt_handler diff --git a/stmt_handler/stmt_handler.cpp b/stmt_handler/stmt_handler.cpp index 91aea3a64..b33119089 100644 --- a/stmt_handler/stmt_handler.cpp +++ b/stmt_handler/stmt_handler.cpp @@ -30,11 +30,19 @@ #include #include #include "../stmt_handler/stmt_handler.h" - +#include "../common/memory_handle.h" +#include #include "../stmt_handler/create_projection_exec.h" +#include "../stmt_handler/desc_exec.h" #include "../stmt_handler/drop_table_exec.h" #include "../stmt_handler/show_exec.h" #include "../utility/Timer.h" +#include "../common/error_define.h" + +using boost::algorithm::to_lower; +using boost::algorithm::trim; +using claims::common::rUnknowStmtType; +using claims::common::rSQLParserErr; namespace claims { namespace stmt_handler { @@ -69,15 +77,16 @@ RetCode StmtHandler::GenerateStmtExec(AstNode* stmt_ast) { case AST_STMT_LIST: { AstStmtList* stmt_list = reinterpret_cast(stmt_ast); GenerateStmtExec(stmt_list->stmt_); - if (NULL != stmt_list) { - LOG(WARNING) << "only support one sql statement!"; - } break; } case AST_SHOW_STMT: { stmt_exec_ = new ShowExec(stmt_ast); break; } + case AST_DESC_STMT: { + stmt_exec_ = new DescExec(stmt_ast); + break; + } case AST_CREATE_TABLE_LIST: case AST_CREATE_TABLE_LIST_SEL: case AST_CREATE_TABLE_SEL: { @@ -97,6 +106,10 @@ RetCode StmtHandler::GenerateStmtExec(AstNode* stmt_ast) { stmt_exec_ = new DeleteStmtExec(stmt_ast); break; } + case AST_UPDATE_STMT: { + stmt_exec_ = new UpdateStmtExec(stmt_ast); + break; + } default: { LOG(ERROR) << "unknow statement type!" << std::endl; return rUnknowStmtType; @@ -105,16 +118,17 @@ RetCode StmtHandler::GenerateStmtExec(AstNode* stmt_ast) { return rSuccess; } RetCode StmtHandler::Execute(ExecutedResult* exec_result) { - GETCURRENTTIME(start_time); RetCode ret = rSuccess; - sql_parser_ = new Parser(sql_stmt_); + trim(sql_stmt_); + sql_parser_ = new Parser(sql_stmt_, (exec_result->info_)); AstNode* raw_ast = sql_parser_->GetRawAST(); if (NULL == raw_ast) { - exec_result->error_info_ = "Parser Error"; + exec_result->error_info_ = "Parser Error\n" + exec_result->info_; exec_result->status_ = false; exec_result->result_ = NULL; - return rParserError; + return rSQLParserErr; } + // print the raw ast if it's necessary. raw_ast->Print(); ret = GenerateStmtExec(raw_ast); if (rSuccess != ret) { @@ -124,11 +138,6 @@ RetCode StmtHandler::Execute(ExecutedResult* exec_result) { if (rSuccess != ret) { return ret; } - double exec_time_ms = GetElapsedTime(start_time); - if (NULL != exec_result->result_) - exec_result->result_->query_time_ = exec_time_ms / 1000.0; - cout << "execute time: " << exec_time_ms / 1000.0 << " sec" << endl; - return rSuccess; } } // namespace stmt_handler diff --git a/stmt_handler/stmt_handler.h b/stmt_handler/stmt_handler.h index 129815c2f..e0c75a13d 100644 --- a/stmt_handler/stmt_handler.h +++ b/stmt_handler/stmt_handler.h @@ -33,6 +33,7 @@ #include "../stmt_handler/select_exec.h" #include "../stmt_handler/load_exec.h" #include "../stmt_handler/insert_exec.h" +#include "../stmt_handler/update_stmt_exec.h" #include "./create_table_exec.h" #include "../Daemon/Daemon.h" #include "../sql_parser/parser/parser.h" diff --git a/stmt_handler/update_stmt_exec.cpp b/stmt_handler/update_stmt_exec.cpp new file mode 100644 index 000000000..a2feef39a --- /dev/null +++ b/stmt_handler/update_stmt_exec.cpp @@ -0,0 +1,244 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/stmt_handler/delete_stmt_exec.cpp + * + * Created on: Aug 18, 2016 + * Author: cswang + * Email: cs_wang@infosys.com + * + * Description: + * + */ + +#include +#include +#include +#include +#include "../Environment.h" +#include "../stmt_handler/update_stmt_exec.h" + +#include "../common/Block/BlockStream.h" +#include "../common/Block/ResultSet.h" +#include "../loader/data_injector.h" +#include "../catalog/table.h" +#include "../catalog/projection.h" +#include "../Daemon/Daemon.h" +#include "../sql_parser/ast_node/ast_select_stmt.h" +#include "../stmt_handler/select_exec.h" +#include "../common/error_define.h" +using claims::loader::DataInjector; +using std::endl; +using std::string; +using std::vector; +using std::cout; +using claims::catalog::TableDescriptor; +using claims::common::rSuccess; +using claims::common::rFailure; +using claims::common::rNoProjection; +using claims::common::rCreateProjectionOnDelTableFailed; +namespace claims { +namespace stmt_handler { + +UpdateStmtExec::UpdateStmtExec(AstNode* stmt) : StmtExec(stmt) { + assert(stmt_); + update_stmt_ast_ = dynamic_cast(stmt_); +} + +UpdateStmtExec::~UpdateStmtExec() {} + +RetCode UpdateStmtExec::Execute(ExecutedResult* exec_result) { + RetCode ret = rSuccess; + string table_base_name = + dynamic_cast(update_stmt_ast_->update_table_)->table_name_; + + TableDescriptor* new_table = + Environment::getInstance()->getCatalog()->getTable(table_base_name); + + SemanticContext sem_cnxt; + ret = update_stmt_ast_->SemanticAnalisys(&sem_cnxt); + if (rSuccess != ret) { + exec_result->SetError("Semantic analysis error.\n" + sem_cnxt.error_msg_); + LOG(ERROR) << "semantic analysis error result= : " << ret; + cout << "semantic analysis error result= : " << ret << endl; + return ret; + } + + AstTable* update_table = + dynamic_cast(update_stmt_ast_->update_table_); + AstWhereClause* update_where = + dynamic_cast(update_stmt_ast_->where_list_); + AstUpdateSetList* update_set_list = + dynamic_cast(update_stmt_ast_->update_set_list_); + + AstFromList* from_list = new AstFromList(AST_FROM_LIST, update_table, NULL); + + AstNode* appended_query_sel_stmt; + ret = GenerateSelectForUpdateStmt(table_base_name, appended_query_sel_stmt); + if (rSuccess == ret) { + appended_query_sel_stmt->Print(); + SelectExec* appended_query_exec = new SelectExec(appended_query_sel_stmt); + ret = appended_query_exec->Execute(exec_result); + if (ret != rSuccess) { + WLOG(ret, "failed to find the update tuples from the table "); + delete appended_query_exec; + return ret; + } + ostringstream ostr; + ostr << exec_result->result_->getNumberOftuples() << " tuples updated."; + + /* STEP2 : generate update selected data */ + ostringstream ostr_res; + ret = GenerateUpdateData(table_base_name, update_set_list, exec_result, + ostr_res); + delete exec_result->result_; + exec_result->result_ = NULL; + delete appended_query_exec; + if (rSuccess != ret) { + WLOG(ret, "updating tuples failed "); + return ret; + } + /* STEP3 : del update data */ + AstDeleteStmt* delete_stmt_ast = + new AstDeleteStmt(AST_DELETE_STMT, from_list, update_where, 0); + DeleteStmtExec* deletestmtexec = new DeleteStmtExec(delete_stmt_ast); + ret = deletestmtexec->Execute(exec_result); + if (ret != rSuccess) { + WLOG(ret, "failed to find the update tuples from the table "); + delete deletestmtexec; + return ret; + } + /* STEP4 :insert generate update selected data */ + InsertUpdatedDataIntoTable(table_base_name, exec_result, ostr_res); + exec_result->info_ = ostr.str(); + delete exec_result->result_; + exec_result->result_ = NULL; + delete deletestmtexec; + return ret; + } else if (rCreateProjectionOnDelTableFailed == ret) { + WLOG(ret, + "no projections has been created on the del table when delete tuples " + "from the base table"); + return ret; + } + return ret; +} + +RetCode UpdateStmtExec::GenerateSelectForUpdateStmt( + const string table_name, AstNode*& appended_query_sel_stmt) { + RetCode ret = rSuccess; + if (rCreateProjectionOnDelTableFailed == ret) { + WLOG(ret, + "create projection on del table failed, since no projection has been " + "created on the base table"); + return ret; + } + /* SELECT * FROM TABLLE */ + AstFromList* from_list = + new AstFromList(AST_FROM_LIST, update_stmt_ast_->update_table_, NULL); + AstNode* appended_query_sel_list = + new AstSelectList(AST_SELECT_LIST, true, NULL, NULL); + appended_query_sel_stmt = new AstSelectStmt( + AST_SELECT_STMT, 0, appended_query_sel_list, from_list, + update_stmt_ast_->where_list_, NULL, NULL, NULL, NULL, NULL); + + return ret; +} + +RetCode UpdateStmtExec::GenerateUpdateData(string table_base_name, + AstNode* update_set_list, + ExecutedResult* exec_result, + ostringstream& ostr) { + RetCode ret = rSuccess; + DynamicBlockBuffer::Iterator it = exec_result->result_->createIterator(); + BlockStreamBase* block = NULL; + BlockStreamBase::BlockStreamTraverseIterator* tuple_it = NULL; + + std::unordered_map update_attr_list; + + AstUpdateSetList* update_set_list_temp = update_set_list; + while (NULL != update_set_list_temp) { + AstColumn* update_column = + dynamic_cast(update_set_list_temp->args0_); + AstExprConst* expr_const = + dynamic_cast(update_set_list_temp->args1_); + + int update_column_index = -1; + for (unsigned i = 0; i < exec_result->result_->column_header_list_.size(); + i++) { + if (exec_result->result_->column_header_list_[i] == + (table_base_name + "." + update_column->column_name_)) { + update_column_index = i; + break; + } + } + if (update_column_index == -1) { + ret = rFailure; + string err_msg = "The column [" + table_base_name + "." + + update_column->column_name_ + + "] is not existed during update data."; + LOG(ERROR) << err_msg << std::endl; + exec_result->SetError(err_msg); + return ret; + } + update_attr_list.insert({update_column_index, expr_const}); + update_set_list_temp = update_set_list_temp->next_; + } + + while (block = it.nextBlock()) { + tuple_it = block->createIterator(); + void* tuple; + while (tuple = tuple_it->nextTuple()) { + for (unsigned i = 1; i < exec_result->result_->column_header_list_.size(); + i++) { + if (update_attr_list.find(i) != update_attr_list.end()) { + ostr << dynamic_cast(update_attr_list[i])->data_; + } else { + ostr << exec_result->result_->schema_->getcolumn(i) + .operate->toString(exec_result->result_->schema_ + ->getColumnAddess(i, tuple)) + .c_str(); + } + ostr << "|"; + } + ostr << "\n"; + } + delete tuple_it; + } + + return ret; +} + +void UpdateStmtExec::InsertUpdatedDataIntoTable(string table_name, + ExecutedResult* exec_result, + ostringstream& ostr) { + TableDescriptor* table = + Environment::getInstance()->getCatalog()->getTable(table_name); + if (NULL == table) { + LOG(ERROR) << "The table " + table_name + + " is not existed during update data." << std::endl; + return; + } + + DataInjector* injector = new DataInjector(table); + injector->InsertFromString(ostr.str(), exec_result); + Environment::getInstance()->getCatalog()->saveCatalog(); +} + +} /* namespace stmt_handler */ +} /* namespace claims */ diff --git a/stmt_handler/update_stmt_exec.h b/stmt_handler/update_stmt_exec.h new file mode 100644 index 000000000..3a07ecf2c --- /dev/null +++ b/stmt_handler/update_stmt_exec.h @@ -0,0 +1,72 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/stmt_handler/delete_stmt_exec.h + * + * Created on: Aug 18, 2016 + * Author: cswnag + * Email: cs_wang@infosys.com + * + * Description: + * + */ + +#ifndef STMT_HANDLER_UPDATE_STMT_EXEC_H_ +#define STMT_HANDLER_UPDATE_STMT_EXEC_H_ + +#include + +#include "../common/Block/ResultSet.h" +#include "../stmt_handler/stmt_exec.h" +#include "../sql_parser/ast_node/ast_update_stmt.h" +using std::string; + +namespace claims { +namespace stmt_handler { + +class UpdateStmtExec : public StmtExec { + public: + /** + * @brief Method description: The executor about update statement. + * @param AstNode* stmt point to AST + */ + UpdateStmtExec(AstNode *stmt); // NOLINT + virtual ~UpdateStmtExec(); + + /** + * @brief the concrete operation of update statement. + */ + RetCode Execute(ExecutedResult *exec_result); + + private: + RetCode GenerateSelectForUpdateStmt(const string table_name, + AstNode *&appended_query_sel_stmt); + RetCode GenerateUpdateData(string table_base_name, AstNode *update_set_list, + ExecutedResult *exec_result, ostringstream &ostr); + void InsertUpdatedDataIntoTable(string table_name, + ExecutedResult *exec_result, + ostringstream &ostr); + + private: + AstUpdateStmt *update_stmt_ast_; +}; + +} /* namespace stmt_handler */ +} /* namespace claims */ + +#endif // STMT_HANDLER_UPDATE_STMT_EXEC_H_ diff --git a/storage/AllBlockInfo.cpp b/storage/AllBlockInfo.cpp index 42fe16769..9c72107e8 100755 --- a/storage/AllBlockInfo.cpp +++ b/storage/AllBlockInfo.cpp @@ -7,20 +7,18 @@ #include "AllBlockInfo.h" -AllBlockInfo *AllBlockInfo::abi_=0; +AllBlockInfo *AllBlockInfo::abi_ = 0; -AllBlockInfo::AllBlockInfo() { +AllBlockInfo::AllBlockInfo() {} -} - -AllBlockInfo::~AllBlockInfo() { +AllBlockInfo::~AllBlockInfo() {} +bool AllBlockInfo::put(string blockmanagerId, string blockId) { + lock_.acquire(); + string block = blockmanagerId + blockId; + vv_.push_back(block.c_str()); + lock_.release(); + return true; } -bool AllBlockInfo::put(string blockmanagerId,string blockId){ - lock_.acquire(); - string block=blockmanagerId+blockId; - vv_.push_back(block.c_str()); - lock_.release(); - return true; -} +// not used in my program . I need consider if it is useful -han diff --git a/storage/AllBlockInfo.h b/storage/AllBlockInfo.h index 196cdf89b..65ce897cf 100755 --- a/storage/AllBlockInfo.h +++ b/storage/AllBlockInfo.h @@ -12,36 +12,32 @@ #include #include #include -#ifdef DMALLOC -#include "dmalloc.h" -#endif #include "../utility/lock.h" using namespace std; - /* * 在master端由于theron的局限性,用一个专门的单例对象来存储,供master使用, * 记录所有机器上的block的信息,同步的get&put都在这个类中 * */ class AllBlockInfo { -public: - static AllBlockInfo* getInstance(){ - if(abi_==0){ - abi_=new AllBlockInfo(); - } - return abi_; - } - ~AllBlockInfo(); - - bool put(string blockmanagerId,string blockId); - -private: - AllBlockInfo(); - -private: - static AllBlockInfo *abi_; - vector vv_; - Lock lock_; + public: + static AllBlockInfo* getInstance() { + if (abi_ == 0) { + abi_ = new AllBlockInfo(); + } + return abi_; + } + ~AllBlockInfo(); + + bool put(string blockmanagerId, string blockId); + + private: + AllBlockInfo(); + + private: + static AllBlockInfo* abi_; + vector vv_; + Lock lock_; }; #endif /* ALLBLOCKINFO_H_ */ diff --git a/storage/BlanceMatcher.cpp b/storage/BlanceMatcher.cpp index a4d992a3d..4b2b83c43 100755 --- a/storage/BlanceMatcher.cpp +++ b/storage/BlanceMatcher.cpp @@ -7,51 +7,46 @@ #include "BlanceMatcher.h" -BlanceMatcher *BlanceMatcher::blancematcher_=0; +BlanceMatcher *BlanceMatcher::blancematcher_ = 0; -BlanceMatcher::BlanceMatcher() { +BlanceMatcher::BlanceMatcher() {} -} - -BlanceMatcher::~BlanceMatcher() { - -} +BlanceMatcher::~BlanceMatcher() {} -bool BlanceMatcher::projectsInput(string filename,list project){ - projects_[filename.c_str()]=project; - return true; +bool BlanceMatcher::projectsInput(string filename, list project) { + projects_[filename.c_str()] = project; + return true; } -string BlanceMatcher::matcher(string filename,BlockManagerId bmi){ - cout<<"in the matcher: "< proj; - proj.push_back(bmi.blockManagerId); - coming_ip_[filename.c_str()]=proj; - string rt=projects_[filename.c_str()].front(); - projects_[filename.c_str()].pop_front(); - return rt; - } - else{ - // 判断来取数据的ip是否来过 - cout<<"the vector is not empty!"< bmis=coming_ip_[filename.c_str()]; - bool exists=false; - list::iterator itr=bmis.begin(); - for(unsigned i=0;i proj; + proj.push_back(bmi.blockManagerId); + coming_ip_[filename.c_str()] = proj; + string rt = projects_[filename.c_str()].front(); + projects_[filename.c_str()].pop_front(); + return rt; + } else { + // 判断来取数据的ip是否来过 + cout << "the vector is not empty!" << endl; + list bmis = coming_ip_[filename.c_str()]; + bool exists = false; + list::iterator itr = bmis.begin(); + for (unsigned i = 0; i < bmis.size(); i++) { + if ((*itr) == bmi.blockManagerId) { + exists = true; + break; + } else { + itr++; + } + } + if (!exists) { + coming_ip_[filename.c_str()].push_back(bmi.blockManagerId); + } + string ite = projects_[filename.c_str()].front(); + projects_[filename.c_str()].pop_front(); + return ite; + } } diff --git a/storage/BlanceMatcher.h b/storage/BlanceMatcher.h index f4d67a16d..e529fb033 100755 --- a/storage/BlanceMatcher.h +++ b/storage/BlanceMatcher.h @@ -10,9 +10,6 @@ /* * 为storage提供数据,scan操作的是每个 * */ -#ifdef DMALLOC -#include "dmalloc.h" -#endif #include #include @@ -22,27 +19,28 @@ using namespace std; class BlanceMatcher { -public: - static BlanceMatcher *getInstance(){ - if(blancematcher_==0){ - blancematcher_=new BlanceMatcher(); - } - return blancematcher_; - } - virtual ~BlanceMatcher(); + public: + static BlanceMatcher *getInstance() { + if (blancematcher_ == 0) { + blancematcher_ = new BlanceMatcher(); + } + return blancematcher_; + } + virtual ~BlanceMatcher(); - bool projectsInput(string filename, list project); - // 返回的是projectid - string matcher(string filename,BlockManagerId bmi); + bool projectsInput(string filename, list project); + // 返回的是projectid + string matcher(string filename, BlockManagerId bmi); -private: - BlanceMatcher(); -private: - static BlanceMatcher *blancematcher_; - // 文件与project的对应关系 - map > projects_; - // 文件与来取数据的ip的对应关系 - map > coming_ip_; + private: + BlanceMatcher(); + + private: + static BlanceMatcher *blancematcher_; + // 文件与project的对应关系 + map > projects_; + // 文件与来取数据的ip的对应关系 + map > coming_ip_; }; #endif /* BLANCEMATCHER_H_ */ diff --git a/storage/BlockManager.cpp b/storage/BlockManager.cpp index af2632d3a..761bfe267 100755 --- a/storage/BlockManager.cpp +++ b/storage/BlockManager.cpp @@ -6,37 +6,34 @@ */ #include #include "BlockManager.h" +#include +#include "../common/file_handle/hdfs_connector.h" #include "../Environment.h" #include "../common/rename.h" #include "../common/Message.h" #include "../common/Logging.h" #include "../Config.h" -BlockManager* BlockManager::blockmanager_ = 0; +#include "../common/error_define.h" +#include "../common/error_no.h" +using claims::common::rLoadFromHdfsOpenFailed; +using claims::common::rLoadFromDiskOpenFailed; +using claims::common::rUnbindPartitionFailed; +using claims::common::HdfsConnector; + +BlockManager* BlockManager::blockmanager_ = NULL; BlockManager* BlockManager::getInstance() { - if (blockmanager_ == 0) { + if (NULL == blockmanager_) { blockmanager_ = new BlockManager(); } return blockmanager_; } BlockManager::BlockManager() { - framework_ = - new Theron::Framework(*Environment::getInstance()->getEndPoint()); - std::ostringstream actor_name; - actor_name << "blockManagerWorkerActor://" - << Environment::getInstance()->getNodeID(); - - actor_ = - new BlockManagerWorkerActor(framework_, actor_name.str().c_str(), this); logging_ = new StorageManagerLogging(); - logging_->log("BlockManagerSlave is initialized. The ActorName=%s", - actor_name.str().c_str()); - memstore_ = MemoryChunkStore::getInstance(); + memstore_ = MemoryChunkStore::GetInstance(); } BlockManager::~BlockManager() { blockmanager_ = 0; - delete actor_; - delete framework_; delete logging_; delete memstore_; } @@ -48,12 +45,13 @@ void BlockManager::initialize() { // 读配置文件中的配置,然后根据是否是master注册 // 1,建两个存储,一个是内存的,一个磁盘的 diskstore_ = new DiskStore(DISKDIR); - memstore_ = MemoryChunkStore::getInstance(); + + memstore_ = MemoryChunkStore::GetInstance(); /// the version written by zhanglei///////////////////////////////// // blockManagerId_=new BlockManagerId(); // 2,注册 - registerToMaster(blockManagerId_); + // registerToMaster(blockManagerId_); // 3,开启心跳监听 heartBeat(); /////////////////////////////////////////////////////////////////// @@ -73,7 +71,8 @@ void BlockManager::initialize() { } void BlockManager::registerToMaster(BlockManagerId* blockManagerId) { - worker_->_reigisterToMaster(blockManagerId); + assert(false); + // worker_->_reigisterToMaster(blockManagerId); } void BlockManager::heartBeat() { @@ -81,7 +80,7 @@ void BlockManager::heartBeat() { // while(true){ // 可以在这里有个配置property的指定,然后优化网络 // sleep(3); - worker_->_sendHeartBeat(); + // worker_->_sendHeartBeat(); // } reregister(); } @@ -111,14 +110,14 @@ bool BlockManager::reportBlockStatus(string blockId) { // 向master发送blocks的信息,当收到master的回应的时候 bool BlockManager::tryToReportBlockStatus(string blockId) { - worker_->_reportBlockStatus(blockId); + // worker_->_reportBlockStatus(blockId); return true; } void BlockManager::get(string blockId) { getLocal(blockId); } void* BlockManager::getLocal(string blockId) { - void* rt = 0; + void* rt = NULL; bool exists = false; map::iterator it_; it_ = blockInfoPool_.find(blockId); @@ -128,7 +127,7 @@ void* BlockManager::getLocal(string blockId) { if (exists) { // 如果存在就代表文件从hdfs上已经被拉到local了 if ((*it_).second->level_ == BlockManager::memory) { - // spark中的storageLevel是磁盘和内存中都有的,在storageLevel.scala中 + // spark中的storageLevel是磁盘和内存中都有的,在stor ageLevel.scala中 // 我们有那样的应用吗?todo: // 在此预留的序列化和反序列化接口,序列化与否也是 // 在storageLevel中的,是否备份也是在storageLevel中 @@ -179,7 +178,7 @@ bool BlockManager::put(string blockId, storageLevel level, void* value) { // else{ // hdfsFS fs=hdfsConnect(HDFS_N,9000); // hdfsFile -//readFile=hdfsOpenFile(fs,file_name.c_str(),O_RDONLY,0,0,0); +// readFile=hdfsOpenFile(fs,file_name.c_str(),O_RDONLY,0,0,0); // hdfsFileInfo *hdfsfile=hdfsGetPathInfo(fs,file_name.c_str()); //// char ///***path=hdfsGetHosts(fs,"/home/hayue/input/3_64m",0,201326592+12); @@ -193,7 +192,7 @@ bool BlockManager::put(string blockId, storageLevel level, void* value) { // ChunkInfo ci; // void *rt=malloc(CHUNK_SIZE); //newmalloc // tSize -//bytes_num=hdfsPread(fs,readFile,length,rt,CHUNK_SIZE); +// bytes_num=hdfsPread(fs,readFile,length,rt,CHUNK_SIZE); // cout<<"split interface: "<elog("Fail to open file [%s].Reason:%s", - chunk_id.partition_id.getPathAndName().c_str(), - strerror(errno)); + // logging_->elog("Fail to open file [%s].Reason:%s", + // chunk_id.partition_id.getPathAndName().c_str(), + // strerror(errno)); + ELOG(rLoadFromHdfsOpenFailed, + chunk_id.partition_id.getPathAndName().c_str()); hdfsDisconnect(fs); + lock.release(); return -1; - } else { - logging_->log("file [%s] is opened for offset[%d]\n", - chunk_id.partition_id.getPathAndName().c_str(), offset); + } //加错误码; + else { + // logging_->log("file [%s] is opened for offset[%d]\n", + // chunk_id.partition_id.getPathAndName().c_str(), offset); + DLOG(INFO) << "file [" << chunk_id.partition_id.getPathAndName().c_str() + << "] is opened for offset [" << offset << "]" << endl; } uint64_t start_pos = CHUNK_SIZE * offset; + if (start_pos < 0) assert(false); if (start_pos < hdfsfile->mSize) { ret = hdfsPread(fs, readFile, start_pos, desc, length); } else { @@ -280,28 +289,35 @@ int BlockManager::loadFromHdfs(const ChunkID& chunk_id, void* const& desc, ret = -1; } hdfsCloseFile(fs, readFile); - hdfsDisconnect(fs); + // hdfsDisconnect(fs); lock.release(); return ret; } -int BlockManager::loadFromDisk(const ChunkID& chunk_id, void* const& desc, + +int BlockManager::LoadFromDisk(const ChunkID& chunk_id, void* const& desc, const unsigned& length) const { - int ret; + int ret = 0; unsigned offset = chunk_id.chunk_off; int fd = FileOpen(chunk_id.partition_id.getPathAndName().c_str(), O_RDONLY); if (fd == -1) { - logging_->elog("Fail to open file [%s].Reason:%s", - chunk_id.partition_id.getPathAndName().c_str(), - strerror(errno)); + // logging_->elog("Fail to open file [%s].Reason:%s", + // chunk_id.partition_id.getPathAndName().c_str(), + // strerror(errno)); + ELOG(rLoadFromDiskOpenFailed, + chunk_id.partition_id.getPathAndName().c_str()); return -1; } else { - logging_->log("file [%s] is opened for offset[%d]\n", - chunk_id.partition_id.getPathAndName().c_str(), offset); + // logging_->log("file [%s] is opened for offset[%d]\n", + // chunk_id.partition_id.getPathAndName().c_str(), offset); + DLOG(INFO) << "file [" << chunk_id.partition_id.getPathAndName().c_str() + << "] is opened for offset [" << offset << "]" << endl; } long int file_length = lseek(fd, 0, SEEK_END); long start_pos = CHUNK_SIZE * offset; - logging_->log("start_pos=%ld**********\n", start_pos); + + // logging_->log("start_pos=%ld**********\n", start_pos); + DLOG(INFO) << "start_pos=" << start_pos << "*********" << endl; lseek(fd, start_pos, SEEK_SET); if (start_pos < file_length) { @@ -316,161 +332,73 @@ int BlockManager::loadFromDisk(const ChunkID& chunk_id, void* const& desc, BlockManagerId* BlockManager::getId() { return blockManagerId_; } string BlockManager::askForMatch(string filename, BlockManagerId bmi) { - if (!file_proj_.count(filename.c_str())) { - string rt = worker_->_askformatch(filename, bmi); - file_proj_[filename.c_str()] = rt; - } - return file_proj_[filename.c_str()]; + assert(false); + // if (!file_proj_.count(filename.c_str())) { + // string rt = worker_->_askformatch(filename, bmi); + // file_proj_[filename.c_str()] = rt; + // } + // return file_proj_[filename.c_str()]; } -bool BlockManager::containsPartition(const PartitionID& part) const { + +bool BlockManager::ContainsPartition(const PartitionID& part) const { boost::unordered_map::const_iterator it = partition_id_to_storage_.find(part); return !(it == partition_id_to_storage_.cend()); } -bool BlockManager::addPartition(const PartitionID& partition_id, + +bool BlockManager::AddPartition(const PartitionID& partition_id, const unsigned& number_of_chunks, const StorageLevel& desirable_storage_level) { + lock.acquire(); // test boost::unordered_map::const_iterator it = partition_id_to_storage_.find(partition_id); if (it != partition_id_to_storage_.cend()) { - partition_id_to_storage_[partition_id]->updateChunksWithInsertOrAppend( + partition_id_to_storage_[partition_id]->UpdateChunksWithInsertOrAppend( partition_id, number_of_chunks, desirable_storage_level); - logging_->log( - "Successfully updated partition[%s](desriable_storage_level = %d)!", - partition_id.getName().c_str(), desirable_storage_level); + // logging_->log( + // "Successfully updated partition[%s](desriable_storage_level = + // %d)!", + // partition_id.getName().c_str(), desirable_storage_level); + DLOG(INFO) << "Successfully updated partition[" + << partition_id.getName().c_str() + << "](desriable_storage_level =" << desirable_storage_level + << endl; + lock.release(); // test return true; } partition_id_to_storage_[partition_id] = new PartitionStorage( partition_id, number_of_chunks, desirable_storage_level); - logging_->log("Successfully added partition[%s](desriable_storage_level=%d)!", - partition_id.getName().c_str(), desirable_storage_level); + // logging_->log("Successfully added + // partition[%s](desriable_storage_level=%d)!", + // partition_id.getName().c_str(), desirable_storage_level); + DLOG(INFO) << "Successfully updated partition[" + << partition_id.getName().c_str() + << "](desriable_storage_level =" << desirable_storage_level + << endl; + lock.release(); // test return true; } -bool BlockManager::removePartition(const PartitionID& partition_id) { +bool BlockManager::RemovePartition(const PartitionID& partition_id) { boost::unordered_map::iterator it = partition_id_to_storage_.find(partition_id); if (it == partition_id_to_storage_.cend()) { - logging_->elog("Fail to unbinding partition [%s].", - partition_id.getName().c_str()); + // logging_->elog("Fail to unbinding partition [%s].", + // partition_id.getName().c_str()); + ELOG(rUnbindPartitionFailed, partition_id.getName().c_str()); return false; } - it->second->removeAllChunks(it->first); + it->second->RemoveAllChunks(it->first); partition_id_to_storage_.erase(it); return true; } -PartitionStorage* BlockManager::getPartitionHandle( +PartitionStorage* BlockManager::GetPartitionHandle( const PartitionID& partition_id) const { boost::unordered_map::const_iterator it = partition_id_to_storage_.find(partition_id); if (it == partition_id_to_storage_.cend()) { - return 0; + return NULL; } return it->second; } -BlockManager::BlockManagerWorkerActor::BlockManagerWorkerActor( - Theron::Framework* framework, const char* name, BlockManager* bm) - : Actor(*framework, name), bm_(bm) { - RegisterHandler(this, &BlockManagerWorkerActor::getBlock); - RegisterHandler(this, &BlockManagerWorkerActor::putBlock); - RegisterHandler(this, &BlockManagerWorkerActor::BindingPartition); - RegisterHandler(this, &BlockManagerWorkerActor::UnbindingPartition); -} - -BlockManager::BlockManagerWorkerActor::~BlockManagerWorkerActor() {} - -bool BlockManager::BlockManagerWorkerActor::_reigisterToMaster( - BlockManagerId* bMId) { - // cout<<"in the worker actor to register"<blockManagerId; - // StorageBudgetMessage rsm(receiverId_.c_str()); - // - // tor_=new TimeOutReceiver(endpoint_,receiverId_.c_str()); - // Theron::Catcher resultCatcher; - // tor_->RegisterHandler(&resultCatcher, - //&Theron::Catcher::Push); - // framework_->Send(rsm,tor_->GetAddress(),Theron::Address("blockManagerMasterActor")); - // // TimeOutWait(count,time_out),如果返回的数值小于count,那就是超时了 - // if(tor_->TimeOutWait(1,1000)==1){ - // cout<<"register respond"< resultCatcher; - // tor_->RegisterHandler(&resultCatcher, - //&Theron::Catcher::Push); - // framework_->Send(hbm,tor_->GetAddress(),Theron::Address("blockManagerMasterActor")); - // // TimeOutWait(count,time_out),如果返回的数值小于count,那就是超时了 - // if(tor_->TimeOutWait(1,1000)==1){ - // cout<<"heartbeat respond"< resultCatcher; - // tor_->RegisterHandler(&resultCatcher, - //&Theron::Catcher::Push); - // framework_->Send(bsm,tor_->GetAddress(),Theron::Address("blockManagerMasterActor")); - // // TimeOutWait(count,time_out),如果返回的数值小于count,那就是超时了 - // if(tor_->TimeOutWait(1,1000)==1){ - // cout<<"block status respond"< resultCatcher; - // tor_->RegisterHandler(&resultCatcher, - //&Theron::Catcher::Push); - // framework_->Send(mm,tor_->GetAddress(),Theron::Address("blockManagerMasterActor")); - // // TimeOutWait(count,time_out),如果返回的数值小于count,那就是超时了 - // cout<<"already send the message of matcher out"<TimeOutWait(1,1000)==1){ - // cout<<"matcher respond"<addPartition(message.partition_id, message.number_of_chunks, - message.storage_level); - Send(int(0), from); -} - -void BlockManager::BlockManagerWorkerActor::UnbindingPartition( - const PartitionUnbindingMessage& message, const Theron::Address from) { - bm_->removePartition(message.partition_id); - Send(int(0), from); -} diff --git a/storage/BlockManager.h b/storage/BlockManager.h index b5310b606..13ce8a294 100755 --- a/storage/BlockManager.h +++ b/storage/BlockManager.h @@ -1,43 +1,34 @@ - /* - * BlockManager.h - * - * Created on: 2013-10-11 - * Author: casa - */ +/* +* BlockManager.h +* +* Created on: 2013-10-11 +* Author: casa +*/ #ifndef BLOCKMANAGER_H_ #define BLOCKMANAGER_H_ -#include -#include #include #include #include #include #include -#ifdef DMALLOC -#include "dmalloc.h" -#endif -using namespace std; - #include - #include "hdfs.h" -#include "MemoryStore.h" +#include "MemoryManager.h" #include "DiskStore.h" #include "BlockManagerId.h" #include "PartitionStorage.h" - #include "../Debug.h" #include "../common/ids.h" #include "../common/Message.h" -#include "../common/TimeOutReceiver.h" #include "../common/Logging.h" #include "../utility/lock.h" +using namespace std; -struct ChunkInfo{ - ChunkID chunkId; - void * hook; +struct ChunkInfo { + ChunkID chunkId; + void *hook; }; // 在spark中,blockmanager中是含有成员变量blockmanagerMasterActor的,但是这里由于akka和theron的区别不能这么实现 @@ -45,110 +36,82 @@ struct ChunkInfo{ // 为什么要写为blockmanager呢?而不是column的manager呢?因为如果block为粒度更小一点 // 但是我们在交换的时候要以column的存储文件为粒度来交换,找到最近最久未使用的column而不是block -/* A better name may be StorageManager, as this module manages projections, chunks, and blocks.*/ -class BlockManager{ -public: - enum storageLevel{memory,disk,offline}; - - struct BlockInfo{ - // 现在blockinfo还只有一个属性 - // unsigned size_; - // 在磁盘上还是在哪里 - storageLevel level_; - BlockInfo(storageLevel level){ - level_=level; - } - }; - friend class BlockManagerWorkerActor; - class BlockManagerWorkerActor:public Theron::Actor{ - friend class BlockManager; - public: - BlockManagerWorkerActor(Theron::Framework *framework,const char *name,BlockManager* bm); - virtual ~BlockManagerWorkerActor(); - - bool _reigisterToMaster(BlockManagerId *bMId); - bool _sendHeartBeat(); - bool _reportBlockStatus(string blockId); - string _askformatch(string filename,BlockManagerId bmi); - private: - void getBlock(const Message256 &message,const Theron::Address from){}; - void putBlock(const Message256 &message,const Theron::Address from){}; - void BindingPartition(const PartitionBindingMessage& message,const Theron::Address from); - void UnbindingPartition(const PartitionUnbindingMessage& message, const Theron::Address from); - private: - TimeOutReceiver *tor_; - string receiverId_; - BlockManager* bm_; - }; -public: - static BlockManager *getInstance(); - MemoryChunkStore* getMemoryChunkStore()const; - virtual ~BlockManager(); - - - // 读各种配置文件,初始化,向master注册,然后启动worker - void initialize(); - // 刚启动时的注册 - void registerToMaster(BlockManagerId *blockManagerId); - // 心跳监听 - void heartBeat(); - // 同步安全的注册,一般是在put失败时 - void asyncReregister(); - // 在心跳失联时重新注册 - void reregister(); - // 报告所有的block,在重新注册之后 - void reportAllBlocks(); - // put单个block之后 - bool reportBlockStatus(string blockId); - // 在reportBlockStatus中调用 - bool tryToReportBlockStatus(string blockId); - - // todo:返回的是一个block一个数据结构,肯定不是一个void - void get(string blockId); - // 从本地获得数据,现在统一都是从本地获得数据,会调用loadFromHdfs,有没有getRemote还需要讨论 - // 应该是scan的state中输入文件名,然后将这个文件名组成blockId,然后在open当中调用,见hdfsscan - void* getLocal(string blockId); - ChunkInfo loadFromHdfs(string file_name); - - int loadFromHdfs(const ChunkID&, void* const &desc,const unsigned&); - - int loadFromDisk(const ChunkID&, void* const &desc,const unsigned&)const; - // 将这个blockId所代表的数据存进内存或者磁盘,所以其中有个参数肯定是storagelevel - bool put(string blockId,storageLevel level, void *value); - - /* poc测试 */ - BlockManagerId *getId(); - string askForMatch(string filename, BlockManagerId bmi); - bool containsPartition(const PartitionID& part)const; - bool addPartition(const PartitionID&, const unsigned & number_of_chunks,const StorageLevel& desirable_storage_level); - bool removePartition(const PartitionID&); - PartitionStorage* getPartitionHandle(const PartitionID& partition_id)const; -private: - BlockManager(); -private: - static BlockManager *blockmanager_; - // 这里blockmanager只是管理的是block的id,这个block到底是由memorystore管理 - // 还是diskstore,在blockmanager中再去划分 - map blockInfoPool_; - - BlockManagerWorkerActor *worker_; - BlockManagerId *blockManagerId_; - - MemoryChunkStore *memstore_; - DiskStore *diskstore_; - - - - /* poc测试 filename和projectid的映射*/ - map file_proj_; - - Theron::Framework *framework_; - Theron::Actor *actor_; - - boost::unordered_map partition_id_to_storage_; - Logging* logging_; - Lock lock; - +/* A better name may be StorageManager, as this module manages projections, + * chunks, and blocks.*/ +class BlockManager { + public: + enum storageLevel { memory, disk, offline }; + + struct BlockInfo { + // 现在blockinfo还只有一个属性 + // unsigned size_; + // 在磁盘上还是在哪里 + storageLevel level_; + BlockInfo(storageLevel level) { level_ = level; } + }; + + public: + static BlockManager *getInstance(); + MemoryChunkStore *getMemoryChunkStore() const; + virtual ~BlockManager(); + + // 读各种配置文件,初始化,向master注册,然后启动worker + void initialize(); + // 刚启动时的注册 + void registerToMaster(BlockManagerId *blockManagerId); + // 心跳监听 + void heartBeat(); + // 同步安全的注册,一般是在put失败时 + void asyncReregister(); + // 在心跳失联时重新注册 + void reregister(); + // 报告所有的block,在重新注册之后 + void reportAllBlocks(); + // put单个block之后 + bool reportBlockStatus(string blockId); + // 在reportBlockStatus中调用 + bool tryToReportBlockStatus(string blockId); + + // todo:返回的是一个block一个数据结构,肯定不是一个void + void get(string blockId); + // 从本地获得数据,现在统一都是从本地获得数据,会调用loadFromHdfs,有没有getRemote还需要讨论 + // 应该是scan的state中输入文件名,然后将这个文件名组成blockId,然后在open当中调用,见hdfsscan + void *getLocal(string blockId); + ChunkInfo loadFromHdfs(string file_name); + + int LoadFromHdfs(const ChunkID &, void *const &desc, const unsigned &); + int LoadFromDisk(const ChunkID &, void *const &desc, const unsigned &) const; + + // 将这个blockId所代表的数据存进内存或者磁盘,所以其中有个参数肯定是storagelevel + bool put(string blockId, storageLevel level, void *value); + + /* poc测试 */ + BlockManagerId *getId(); + string askForMatch(string filename, BlockManagerId bmi); + bool ContainsPartition(const PartitionID &part) const; + bool AddPartition(const PartitionID &, const unsigned &number_of_chunks, + const StorageLevel &desirable_storage_level); + bool RemovePartition(const PartitionID &); + PartitionStorage *GetPartitionHandle(const PartitionID &partition_id) const; + + private: + BlockManager(); + + private: + static BlockManager *blockmanager_; + // 这里blockmanager只是管理的是block的id,这个block到底是由memorystore管理 + // 还是diskstore,在blockmanager中再去划分 + map blockInfoPool_; + BlockManagerId *blockManagerId_; + MemoryChunkStore *memstore_; + DiskStore *diskstore_; + /* poc测试 filename和projectid的映射*/ + map file_proj_; + + boost::unordered_map + partition_id_to_storage_; + Logging *logging_; + Lock lock; }; #endif /* BLOCKMANAGER_H_ */ diff --git a/storage/BlockManagerMaster.cpp b/storage/BlockManagerMaster.cpp index ec2b2f053..c4d0d0841 100755 --- a/storage/BlockManagerMaster.cpp +++ b/storage/BlockManagerMaster.cpp @@ -9,91 +9,24 @@ #include "BlockManagerMaster.h" #include "../Environment.h" -#include "../common/TimeOutReceiver.h" #include "../common/Message.h" #include "../utility/print_tool.h" -BlockManagerMaster *BlockManagerMaster::master_=0; -BlockManagerMaster::BlockManagerMaster() { +#include "caf/io/all.hpp" +#include "caf/all.hpp" +#include "../node_manager/base_node.h" +using caf::after; +using caf::io::remote_actor; +using claims::BindingAtom; +using claims::OkAtom; +using claims::UnBindingAtom; +BlockManagerMaster *BlockManagerMaster::master_ = 0; - framework_=new Theron::Framework(*Environment::getInstance()->getEndPoint()); - actor_=new BlockManagerMasterActor(framework_,"blockManagerMasterActor"); - logging_=new StorageManagerMasterLogging(); -} - -BlockManagerMaster::~BlockManagerMaster() { - master_=0; - delete actor_; - delete framework_; - delete logging_; -} - -void BlockManagerMaster::initialize(){ - abi_=AllBlockInfo::getInstance(); - testForPoc(); -} - -void BlockManagerMaster::testForPoc(){ - bm_=BlanceMatcher::getInstance(); -// string file_name="/home/hayue/input/3_64m"; - string file_name="/home/claims/cj_/prj2/hs"; - list projs; - projs.push_back("/home/claims/cj_/prj2/hs_1"); - projs.push_back("/home/claims/cj_/prj2/hs_2"); - projs.push_back("/home/claims/cj_/prj2/hs_3"); - projs.push_back("/home/claims/cj_/prj2/hs_4"); - projs.push_back("/home/claims/cj_/prj2/hs_5"); - bm_->projectsInput(file_name.c_str(),projs); -} - -BlockManagerMaster::BlockManagerMasterActor::BlockManagerMasterActor(Theron::Framework* framework,const char *name) -:Actor(*(framework),name){ - RegisterHandler(this,&BlockManagerMasterActor::workerRegister); - RegisterHandler(this,&BlockManagerMasterActor::heartbeatReceiver); - RegisterHandler(this,&BlockManagerMasterActor::blockStatusReceiver); - RegisterHandler(this,&BlockManagerMasterActor::matcherReceiver); -// cout<<"the workerRegister is ready"<put(from.AsString(),message.mText); -} - -void BlockManagerMaster::BlockManagerMasterActor::matcherReceiver(const MatcherMessage &message,const Theron::Address from){ - cout<<"I want the proj "<matcher(message.filenameText,*bmi); - MatcherRespond resp(res.c_str()); - cout<<"I will send the proj "<getEndPoint()); - - Theron::Catcher resultCatcher; - receiver.RegisterHandler(&resultCatcher, &Theron::Catcher::Push); - - PartitionBindingMessage message(partition_id,number_of_chunks,desirable_storage_level); - logging_->log("Sending the binding message to [%s]",generateSlaveActorName(target).c_str()); - framework_->Send(message,receiver.GetAddress(),Theron::Address(generateSlaveActorName(target).c_str())); - if(receiver.TimeOutWait(1,200000)==0){ - logging_->elog("The node[%s] fails to receiver the partition binding message! target actor name=%s",NodeTracker::GetInstance()->GetNodeIP(target).c_str(),generateSlaveActorName(target).c_str()); - } - - return true; +bool BlockManagerMaster::SendBindingMessage( + const PartitionID &partition_id, const unsigned &number_of_chunks, + const StorageLevel &desirable_storage_level, const NodeID &target) const { + caf::scoped_actor self; + try { + auto target_actor = + Environment::getInstance()->get_master_node()->GetNodeActorFromId( + target); + self->sync_send(target_actor, BindingAtom::value, partition_id, + number_of_chunks, desirable_storage_level) + .await( + + [=](OkAtom) { + LOG(INFO) << "sending binding message is OK!!" << endl; + }, + after(std::chrono::seconds(30)) >> + [=]() { + LOG(WARNING) << "sending binding message, but timeout 30s!!" + << endl; + return false; + } + + ); + } catch (caf::network_error &e) { + LOG(WARNING) + << "cann't connect to remote actor when sending binding message!"; + return false; + } + return true; } /* - * As opposed to SendBindingMessage, - * except this method isn't used in updating chunk number + * As opposed to SendBindingMessage, except this method isn't used in updating + * chunk number */ -bool BlockManagerMaster::SendUnbindingMessage(const PartitionID &partition_id, NodeID &target) const -{ - TimeOutReceiver receiver(Environment::getInstance()->getEndPoint()); - - Theron::Catcher resultCatcher; - receiver.RegisterHandler(&resultCatcher, &Theron::Catcher::Push); - - PartitionUnbindingMessage message(partition_id); - logging_->log("Sending the unbinding message to [%s]",generateSlaveActorName(target).c_str()); - framework_->Send(message,receiver.GetAddress(),Theron::Address(generateSlaveActorName(target).c_str())); - if(receiver.TimeOutWait(1,200000)==0){ - logging_->elog("The node[%s] fails to receive the partition unbinding message! target actor name=%s",NodeTracker::GetInstance()->GetNodeIP(target).c_str(),generateSlaveActorName(target).c_str()); - } - return true; +bool BlockManagerMaster::SendUnbindingMessage(const PartitionID &partition_id, + NodeID &target) const { + caf::scoped_actor self; + try { + auto target_actor = + Environment::getInstance()->get_master_node()->GetNodeActorFromId( + target); + self->sync_send(target_actor, UnBindingAtom::value, partition_id).await( + [=](OkAtom) { + LOG(INFO) << "sending unbinding message is OK!!" << endl; + }, + after(std::chrono::seconds(30)) >> + [=]() { + LOG(WARNING) << "sending unbinding message, but timeout 30s!!" + << endl; + return false; + } + + ); + } catch (caf::network_error &e) { + LOG(WARNING) + << "cann't connect to remote actor when sending unbinding message!"; + return false; + } + return true; } diff --git a/storage/BlockManagerMaster.h b/storage/BlockManagerMaster.h index ee36b30b0..7752db0b3 100755 --- a/storage/BlockManagerMaster.h +++ b/storage/BlockManagerMaster.h @@ -7,80 +7,50 @@ /* consider to be merged with Catalog.binding*/ #ifndef BLOCKMANAGERMASTER_H_ #define BLOCKMANAGERMASTER_H_ -/* - * 和blockmanager一样尚未完成的带有actor的类 - * */ -#include -#include #include #include -#ifdef DMALLOC -#include "dmalloc.h" -#endif - #include "BlockManagerId.h" #include "AllBlockInfo.h" #include "BlanceMatcher.h" -#include "../common/Logging.h" #include "../common/ids.h" #include "../common/Message.h" using namespace std; - +/* + * just for sending binding and unbinding message + */ class BlockManagerMaster { -public: - enum storageLevel{memory,disk}; - struct BlockInfo{ - string blockId_; - storageLevel level_; - }; - class BlockManagerMasterActor:public Theron::Actor{ - friend class BlockManagerMaster; - public: - BlockManagerMasterActor(Theron::Framework *framework,const char *name); - virtual ~BlockManagerMasterActor(); - - void workerRegister(const StorageBudgetMessage &message,const Theron::Address from); - void heartbeatReceiver(const HeartBeatMessage &message,const Theron::Address from); - void blockStatusReceiver(const BlockStatusMessage &message,const Theron::Address from); - void matcherReceiver(const MatcherMessage &message,const Theron::Address from); - private: - - - }; - - - static BlockManagerMaster* getInstance(){ - if(master_==0){ - - master_=new BlockManagerMaster(); - - } - return master_; - } - virtual ~BlockManagerMaster(); - - void initialize(); - - /* 共测试使用 */ - void testForPoc(); - - // 这个函数返回的是blockmanagerId -// BlockManagerId getLocations(string blockId){}; - bool SendBindingMessage(const PartitionID&, const unsigned& number_of_chunks, const StorageLevel&,const NodeID&)const; - bool SendUnbindingMessage(const PartitionID &partition_id, NodeID &target) const; -private: - BlockManagerMaster(); - std::string generateSlaveActorName(const NodeID &)const; -private: - static BlockManagerMaster *master_; - // 将blockMessage收到之后,首先看他是什么消息,然后传给BlockManagerMasterActor处理 - AllBlockInfo *abi_; - BlanceMatcher *bm_; - Theron::Framework *framework_; - BlockManagerMasterActor* actor_; - Logging* logging_; - + public: + enum storageLevel { memory, disk }; + struct BlockInfo { + string blockId_; + storageLevel level_; + }; + + static BlockManagerMaster *getInstance() { + if (master_ == 0) { + master_ = new BlockManagerMaster(); + } + return master_; + } + virtual ~BlockManagerMaster(); + + void initialize(); + + // 这个函数返回的是blockmanagerId + // BlockManagerId getLocations(string blockId){}; + bool SendBindingMessage(const PartitionID &, const unsigned &number_of_chunks, + const StorageLevel &, const NodeID &) const; + bool SendUnbindingMessage(const PartitionID &partition_id, + NodeID &target) const; + + private: + BlockManagerMaster(); + + private: + static BlockManagerMaster *master_; + // 将blockMessage收到之后,首先看他是什么消息,然后传给BlockManagerMasterActor处理 + AllBlockInfo *abi_; + BlanceMatcher *bm_; }; - #endif /* BLOCKMANAGERMASTER_H_ */ diff --git a/storage/BlockMessage.cpp b/storage/BlockMessage.cpp deleted file mode 100755 index a1210ce4d..000000000 --- a/storage/BlockMessage.cpp +++ /dev/null @@ -1,16 +0,0 @@ -/* - * BlockMessage.cpp - * - * Created on: 2013-10-20 - * Author: casa - */ - -#include "BlockMessage.h" - -BlockMessage::BlockMessage() { - -} - -BlockMessage::~BlockMessage() { - -} diff --git a/storage/BlockMessage.h b/storage/BlockMessage.h deleted file mode 100755 index dc857ce6d..000000000 --- a/storage/BlockMessage.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * BlockMessage.h - * - * Created on: 2013-10-20 - * Author: casa - */ - -#ifndef BLOCKMESSAGE_H_ -#define BLOCKMESSAGE_H_ -#ifdef DMALLOC -#include "dmalloc.h" -#endif -/* - * 此类将被外层message吸收 - * */ -class BlockMessage { -public: - BlockMessage(); - virtual ~BlockMessage(); -}; - -#endif /* BLOCKMESSAGE_H_ */ diff --git a/storage/ChunkStorage.cpp b/storage/ChunkStorage.cpp index db5305e39..e67048566 100755 --- a/storage/ChunkStorage.cpp +++ b/storage/ChunkStorage.cpp @@ -1,19 +1,50 @@ /* - * ChunkStorage.cpp + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/storage/ChunkStorage.cpp + * + * Created on: NOV 14, 2013 + * Modified on: NOV 29, 2015 + * Author: Hanzhang,wangli + * Email: + * + * Description: * - * Created on: Nov 14, 2013 - * Author: wangli */ #include #include "ChunkStorage.h" +#include "../common/file_handle/hdfs_connector.h" #include "BlockManager.h" - #include "../Debug.h" #include "../utility/warmup.h" #include "../utility/rdtsc.h" #include "../Config.h" +#include "../common/error_define.h" +#include "../common/error_no.h" -bool ChunkReaderIterator::nextBlock() { +using claims::common::CStrError; +using claims::common::rUnkownStroageLevel; +using claims::common::rFailOpenFileInDiskChunkReaderIterator; +using claims::common::rFailReadOneBlockInDiskChunkReaderIterator; +using claims::common::rFailOpenHDFSFileInStorage; +using claims::common::rFailSetStartOffsetInStorage; +using claims::common::HdfsConnector; +bool ChunkReaderIterator::NextBlock() { lock_.acquire(); if (this->cur_block_ >= this->number_of_blocks_) { lock_.release(); @@ -23,35 +54,44 @@ bool ChunkReaderIterator::nextBlock() { lock_.release(); return true; } + ChunkStorage::ChunkStorage(const ChunkID& chunk_id, const unsigned& block_size, - const StorageLevel& desirable_level) + const StorageLevel& desirable_storage_level) : chunk_id_(chunk_id), block_size_(block_size), - desirable_storage_level_(desirable_level), + desirable_storage_level_(desirable_storage_level), current_storage_level_(HDFS), - chunk_size_(CHUNK_SIZE) { - // printf("CHUNKSTORAGE****:level=%d\n",desirable_storage_level_); -} + chunk_size_(CHUNK_SIZE) {} ChunkStorage::~ChunkStorage() { - // TODO Auto-generated destructor stub + // TODO(wangli): Auto-generated destructor stub } - -ChunkReaderIterator* ChunkStorage::createChunkReaderIterator() { - // printf("level value:%d\n",current_storage_level_); - ChunkReaderIterator* ret; +/** + * The function create the chunk iterator. Meantime, according to the storage + * level, create the chunk reader iterator in which storage level. It is a + * optimization that memory store data as a buffer. The granularity of reading + * file is chunk. + */ +ChunkReaderIterator* ChunkStorage::CreateChunkReaderIterator() { lock_.acquire(); + ChunkReaderIterator* ret; + HdfsInMemoryChunk chunk_info; + if (current_storage_level_ == MEMORY && + !BlockManager::getInstance()->getMemoryChunkStore()->GetChunk( + chunk_id_, chunk_info)) { + current_storage_level_ = HDFS; + DLOG(INFO) << "clean dirty data" << endl; + DLOG(INFO) << "the chunk has be freed : " << chunk_id_.chunk_off << endl; + } switch (current_storage_level_) { case MEMORY: { - // printf("current storage level: MEMORY\n"); - HdfsInMemoryChunk chunk_info; - if (BlockManager::getInstance()->getMemoryChunkStore()->getChunk( + if (BlockManager::getInstance()->getMemoryChunkStore()->GetChunk( chunk_id_, chunk_info)) ret = new InMemoryChunkReaderItetaor(chunk_info.hook, chunk_info.length, chunk_info.length / block_size_, block_size_, chunk_id_); else - ret = 0; + ret = NULL; break; } case DISK: { @@ -60,73 +100,66 @@ ChunkReaderIterator* ChunkStorage::createChunkReaderIterator() { break; } case HDFS: { - // printf("%lx current storage level for %d %d: - // HDFS\n",this,this->chunk_id_.partition_id.partition_off,this->chunk_id_.chunk_off); if (desirable_storage_level_ == MEMORY) { - HdfsInMemoryChunk chunk_info; chunk_info.length = CHUNK_SIZE; - if (BlockManager::getInstance()->getMemoryChunkStore()->applyChunk( + if (BlockManager::getInstance()->getMemoryChunkStore()->ApplyChunk( chunk_id_, chunk_info.hook)) { /* there is enough memory storage space, so the storage level can be * shifted.*/ if (Config::local_disk_mode) { - chunk_info.length = BlockManager::getInstance()->loadFromDisk( + chunk_info.length = BlockManager::getInstance()->LoadFromDisk( chunk_id_, chunk_info.hook, chunk_info.length); } else { - chunk_info.length = BlockManager::getInstance()->loadFromHdfs( + chunk_info.length = BlockManager::getInstance()->LoadFromHdfs( chunk_id_, chunk_info.hook, chunk_info.length); } if (chunk_info.length <= 0) { - /*chunk_info.length<=0 means that either the file does not exist or - * the current chunk_id exceeds the actual size of the file. - * * + /** + * chunk_info.length<=0 means that either the file does not exist or + * the current chunk_id exceeds the actual size of the file. */ - BlockManager::getInstance()->getMemoryChunkStore()->returnChunk( + BlockManager::getInstance()->getMemoryChunkStore()->ReturnChunk( chunk_id_); - ret = 0; + ret = NULL; break; - // return 0; } - // BlockManager::getInstance()->getMemoryChunkStore()->putChunk(chunk_id_,chunk_info); current_storage_level_ = MEMORY; /* update the chunk info in the Chunk store in case that the * chunk_info is updated.*/ - BlockManager::getInstance()->getMemoryChunkStore()->updateChunkInfo( + BlockManager::getInstance()->getMemoryChunkStore()->UpdateChunkInfo( chunk_id_, chunk_info); - // printf("%lx current is set to - // memory!\n"); ret = new InMemoryChunkReaderItetaor( chunk_info.hook, chunk_info.length, chunk_info.length / block_size_, block_size_, chunk_id_); break; } else { - /* - * The storage memory is full, some swap algorithm is needed here. - * TODO: swap algorithm. - */ - printf("Failed to get memory chunk budege!\n"); + /*The storage memory is full, some swap algorithm is needed here. + * TODO: swap algorithm. I finish in applychunk().*/ + LOG(WARNING) << "Failed to get memory chunk budege!" << endl; assert(false); } } - // return new - // HDFSChunkReaderIterator(chunk_id_,chunk_size_,block_size_); + ret = new DiskChunkReaderIteraror(chunk_id_, chunk_size_, block_size_); break; } - default: { printf("current storage level: unknown!\n"); } + default: { WLOG(rUnkownStroageLevel, "current storage level: unknown!"); } } lock_.release(); return ret; } -std::string ChunkStorage::printCurrentStorageLevel() const { return ""; } + +std::string ChunkStorage::PrintCurrentStorageLevel() const { return ""; } + InMemoryChunkReaderItetaor::InMemoryChunkReaderItetaor( void* const& start, const unsigned& chunk_size, const unsigned& number_of_blocks, const unsigned& block_size, const ChunkID& chunk_id) : start_(start), ChunkReaderIterator(chunk_id, block_size, chunk_size, number_of_blocks) {} -bool InMemoryChunkReaderItetaor::nextBlock(BlockStreamBase*& block) { + +bool InMemoryChunkReaderItetaor::NextBlock(BlockStreamBase*& block) { lock_.acquire(); if (cur_block_ >= number_of_blocks_) { lock_.release(); @@ -134,7 +167,7 @@ bool InMemoryChunkReaderItetaor::nextBlock(BlockStreamBase*& block) { } cur_block_++; lock_.release(); - // printf("Read Block:%d:%d\n",chunk_id_.chunk_off,cur_block_); + /* calculate the block start address.*/ const char* block_start_address = (char*)start_ + cur_block_ * block_size_; @@ -142,12 +175,9 @@ bool InMemoryChunkReaderItetaor::nextBlock(BlockStreamBase*& block) { Block temp_block(block_size_, block_start_address); /*construct the block stream from temp_block. In the current version, the - * memory copy - * is used for simplicity. - * TODO: avoid memory copy. - */ + * memory copy is used for simplicity. + * TODO(wangli): avoid memory copy.*/ block->constructFromBlock(temp_block); - return true; } InMemoryChunkReaderItetaor::~InMemoryChunkReaderItetaor() {} @@ -159,14 +189,14 @@ DiskChunkReaderIteraror::DiskChunkReaderIteraror(const ChunkID& chunk_id, block_buffer_ = new Block(block_size_); fd_ = FileOpen(chunk_id_.partition_id.getPathAndName().c_str(), O_RDONLY); if (fd_ == -1) { - printf("Failed to open file [%s], reason:%s\n", - chunk_id_.partition_id.getPathAndName().c_str(), strerror(errno)); + ELOG(rFailOpenFileInDiskChunkReaderIterator, + chunk_id_.partition_id.getPathAndName().c_str()); number_of_blocks_ = 0; } else { const unsigned start_pos = CHUNK_SIZE * chunk_id_.chunk_off; - const long length = lseek(fd_, 0, SEEK_END); + const unsigned long length = lseek(fd_, 0, SEEK_END); - if (length < 0 && length <= start_pos) { + if (length <= start_pos) { printf("fails to set the start offset %d for [%s]\n", start_pos, chunk_id.partition_id.getName().c_str()); number_of_blocks_ = 0; @@ -178,7 +208,8 @@ DiskChunkReaderIteraror::DiskChunkReaderIteraror(const ChunkID& chunk_id, number_of_blocks_ = CHUNK_SIZE / block_size_; } else { number_of_blocks_ = (length - start_pos) / block_size_; - printf("This chunk has only %d blocks!\n", number_of_blocks_); + LOG(INFO) << "This chunk has only" << number_of_blocks_ << "blocks!" + << endl; } } } @@ -187,56 +218,56 @@ DiskChunkReaderIteraror::~DiskChunkReaderIteraror() { block_buffer_->~Block(); FileClose(fd_); } -bool DiskChunkReaderIteraror::nextBlock(BlockStreamBase*& block) { + +bool DiskChunkReaderIteraror::NextBlock(BlockStreamBase*& block) { lock_.acquire(); if (cur_block_ >= number_of_blocks_) { lock_.release(); return false; } const unsigned posistion = lseek(fd_, 0, SEEK_CUR); - // printf("***** the data is read from position:[ %d MB %d KB - //]*******\n",posistion/1024/1024,(posistion/1024)%1024); - // sleep(1); /* * the read function will automatically move the read position, so the lseek * is not needed here. */ tSize bytes_num = read(fd_, block_buffer_->getBlock(), block_buffer_->getsize()); - // printf("Tuple - // count=%d\n",*(int*)((char*)block_buffer_->getBlock()+65532)); if (bytes_num == block_size_) { cur_block_++; - // lseek(fd_,64*1024,SEEK_CUR); block->constructFromBlock(*block_buffer_); lock_.release(); return true; } else { cur_block_++; - printf("failed to read one block, only %d bytes are read!,error=%s\n", - bytes_num, strerror(errno)); + ELOG(rFailReadOneBlockInDiskChunkReaderIterator, + "failed to read one block"); lock_.release(); return false; } } + HDFSChunkReaderIterator::HDFSChunkReaderIterator(const ChunkID& chunk_id, unsigned& chunk_size, const unsigned& block_size) : ChunkReaderIterator(chunk_id, block_size, chunk_size) { block_buffer_ = new Block(block_size_); - fs_ = hdfsConnect(Config::hdfs_master_ip.c_str(), Config::hdfs_master_port); + // fs_ = hdfsConnect(Config::hdfs_master_ip.c_str(), + // Config::hdfs_master_port); + + fs_ = HdfsConnector::Instance(); hdfs_fd_ = hdfsOpenFile(fs_, chunk_id.partition_id.getName().c_str(), O_RDONLY, 0, 0, 0); if (!hdfs_fd_) { - printf("fails to open HDFS file [%s]\n", - chunk_id.partition_id.getName().c_str()); + ELOG(rFailOpenHDFSFileInStorage, chunk_id.partition_id.getName().c_str()); number_of_blocks_ = 0; } const unsigned start_pos = start_pos + CHUNK_SIZE * chunk_id_.chunk_off; if (hdfsSeek(fs_, hdfs_fd_, start_pos) == -1) { - printf("fails to set the start offset %d for [%s]\n", start_pos, - chunk_id.partition_id.getName().c_str()); + LOG(WARNING) << "[" << rFailSetStartOffsetInStorage << " , " + << CStrError(rFailSetStartOffsetInStorage) << "]" + << "fails to set the start offset" << start_pos << "for " + << chunk_id.partition_id.getName().c_str() << endl; number_of_blocks_ = 0; } hdfsFileInfo* file_info = hdfsGetPathInfo( @@ -251,9 +282,9 @@ HDFSChunkReaderIterator::HDFSChunkReaderIterator(const ChunkID& chunk_id, HDFSChunkReaderIterator::~HDFSChunkReaderIterator() { block_buffer_->~Block(); hdfsCloseFile(fs_, hdfs_fd_); - hdfsDisconnect(fs_); + // hdfsDisconnect(fs_); } -bool HDFSChunkReaderIterator::nextBlock(BlockStreamBase*& block) { +bool HDFSChunkReaderIterator::NextBlock(BlockStreamBase*& block) { if (cur_block_ >= number_of_blocks_) { lock_.acquire(); return false; @@ -272,22 +303,11 @@ bool HDFSChunkReaderIterator::nextBlock(BlockStreamBase*& block) { return false; } } - -// bool InMemoryChunkReaderItetaor::getNextBlockAccessor(block_accessor & ba) { -// if(cur_block_>=number_of_blocks_){ -// lock_.release(); -// return false; -// } -// ba.target_block_start_address=(char*)start_+cur_block_*block_size_; -// ba.block_size=block_size_; -// return true; -// -//} -// -// void ChunkReaderIterator::getBlock(const block_accessor& ba) const { -//} - -bool InMemoryChunkReaderItetaor::getNextBlockAccessor(block_accessor*& ba) { +/** + * Generate the block_accessor and get information from block_accessor to + * acquire blocks. + */ +bool InMemoryChunkReaderItetaor::GetNextBlockAccessor(block_accessor*& ba) { lock_.acquire(); if (cur_block_ >= number_of_blocks_) { lock_.release(); @@ -298,12 +318,18 @@ bool InMemoryChunkReaderItetaor::getNextBlockAccessor(block_accessor*& ba) { lock_.release(); ba = new InMemeryBlockAccessor(); InMemeryBlockAccessor* imba = (InMemeryBlockAccessor*)ba; - imba->setBlockSize(block_size_); - imba->setTargetBlockStartAddress((char*)start_ + cur_block * block_size_); + + imba->SetBlockSize(block_size_); + + imba->SetTargetBlockStartAddress((char*)start_ + cur_block * block_size_); return true; } -bool DiskChunkReaderIteraror::getNextBlockAccessor(block_accessor*& ba) { +/** + * Generate the block_accessor and get information from block_accessor to + * acquire blocks. + */ +bool DiskChunkReaderIteraror::GetNextBlockAccessor(block_accessor*& ba) { lock_.acquire(); if (cur_block_ >= number_of_blocks_) { lock_.release(); @@ -314,14 +340,20 @@ bool DiskChunkReaderIteraror::getNextBlockAccessor(block_accessor*& ba) { lock_.release(); ba = new InDiskBlockAccessor(); InDiskBlockAccessor* idba = (InDiskBlockAccessor*)ba; - idba->setBlockCur(cur_block); - idba->setBlockSize(block_size_); - idba->setChunkId(chunk_id_); - idba->setBlockSize(chunk_size_); + idba->SetBlockCur(cur_block); + + idba->SetBlockSize(block_size_); + idba->SetChunkId(chunk_id_); + idba->SetBlockSize(chunk_size_); + return true; } -bool HDFSChunkReaderIterator::getNextBlockAccessor(block_accessor*& ba) { +/** + * Generate the block_accessor and get information from block_accessor to + * acquire blocks. + */ +bool HDFSChunkReaderIterator::GetNextBlockAccessor(block_accessor*& ba) { lock_.acquire(); if (cur_block_ >= number_of_blocks_) { lock_.release(); @@ -332,14 +364,15 @@ bool HDFSChunkReaderIterator::getNextBlockAccessor(block_accessor*& ba) { lock_.release(); ba = new InHDFSBlockAccessor(); InHDFSBlockAccessor* ihba = (InHDFSBlockAccessor*)ba; - ihba->setBlockCur(cur_block); - ihba->setBlockSize(block_size_); - ihba->setChunkId(chunk_id_); - ihba->setBlockSize(chunk_size_); + ihba->SetBlockCur(cur_block); + ihba->SetBlockSize(block_size_); + ihba->SetChunkId(chunk_id_); + ihba->SetBlockSize(chunk_size_); return true; } -void ChunkReaderIterator::InMemeryBlockAccessor::getBlock( +void ChunkReaderIterator::InMemeryBlockAccessor::GetBlock( + BlockStreamBase*& block) const { //#define MEMORY_COPY #ifdef MEMORY_COPY @@ -354,9 +387,9 @@ void ChunkReaderIterator::InMemeryBlockAccessor::getBlock( * avoiding the memory copy here */ block->setIsReference(true); - block->setBlock(target_block_start_address); + block->setBlock(target_block_start_address_); int tuple_count = - *(unsigned*)((char*)target_block_start_address + + *(unsigned*)((char*)target_block_start_address_ + block->getSerializedBlockSize() - sizeof(unsigned)); dynamic_cast(block)->setTuplesInBlock(tuple_count); // ((BlockStreamFix*)block)->free_ = @@ -366,13 +399,14 @@ void ChunkReaderIterator::InMemeryBlockAccessor::getBlock( #endif } -void ChunkReaderIterator::InDiskBlockAccessor::getBlock( +void ChunkReaderIterator::InDiskBlockAccessor::GetBlock( BlockStreamBase*& block) const { printf("InDiskBlockAccessor::getBlock() is not implemented!\n"); assert(false); } -void ChunkReaderIterator::InHDFSBlockAccessor::getBlock( +void ChunkReaderIterator::InHDFSBlockAccessor::GetBlock( + BlockStreamBase*& block) const { printf("InHDFSBlockAccessor::getBlock() is not implemented!\n"); assert(false); diff --git a/storage/ChunkStorage.h b/storage/ChunkStorage.h index 7549256c3..9aca62f45 100755 --- a/storage/ChunkStorage.h +++ b/storage/ChunkStorage.h @@ -1,106 +1,143 @@ /* - * ChunkStorage.h + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/storage/ChunkStorage.h + * + * Created on: NOV 14, 2013 + * Modified on: NOV 29, 2015 + * Author: Hanzhang,wangli + * Email: + * + * Description: * - * Created on: Nov 14, 2013 - * Author: wangli */ #ifndef CHUNKSTORAGE_H_ #define CHUNKSTORAGE_H_ #include #include -#ifdef DMALLOC -#include "dmalloc.h" -#endif -#include "StorageLevel.h" +#include "./StorageLevel.h" #include "../utility/lock.h" #include "../common/ids.h" #include "../common/Block/BlockStream.h" + +/** + * Design by the iterator pattern. ChunkReaderIterator gets blocks and handle + * the container of chunk. + */ class ChunkReaderIterator { public: /** - * This structure maintains all the information needed to access - * a block in in-memory chunk, in-disk chunk, or in-hdfs chunk. - * - * The underlying reason for using this structure is to. + * This structure maintains all the information needed to access a block in + * in-memory chunk, in-disk chunk, or in-hdfs chunk. The underlying reason for + * using this structure is to traverse each blocks. */ class block_accessor { public: block_accessor() {} + ~block_accessor() {} - virtual void getBlock(BlockStreamBase*& block) const { assert(false); } - unsigned getBlockSize() const { return block_size; } - void setBlockSize(unsigned blockSize) { - block_size = blockSize; - } + virtual void GetBlock(BlockStreamBase*& block) const { assert(false); } - ; + unsigned GetBlockSize() const { return block_size_; } + + void SetBlockSize(unsigned block_size) { block_size_ = block_size; } protected: - unsigned block_size; + unsigned block_size_; }; + + /** + * The structure of block_accessor in memory just need the address of blocks. + */ class InMemeryBlockAccessor : public block_accessor { public: - InMemeryBlockAccessor() { target_block_start_address = NULL; } + InMemeryBlockAccessor() : target_block_start_address_(NULL){}; + ~InMemeryBlockAccessor() {} - void getBlock(BlockStreamBase*& block) const; - void* getTargetBlockStartAddress() const { - return target_block_start_address; + + void GetBlock(BlockStreamBase*& block) const; + + void* GetTargetBlockStartAddress() const { + return target_block_start_address_; } - void setTargetBlockStartAddress(void* targetBlockStartAddress) { - target_block_start_address = targetBlockStartAddress; + void SetTargetBlockStartAddress(void* target_block_start_address) { + target_block_start_address_ = target_block_start_address; } private: - void* target_block_start_address; + void* target_block_start_address_; }; class InDiskBlockAccessor : public block_accessor { public: - void getBlock(BlockStreamBase*& block) const; + void GetBlock(BlockStreamBase*& block) const; - unsigned getBlockCur() const { return block_cur; } + unsigned GetBlockCur() const { return block_cur_; } - void setBlockCur(unsigned blockCur) { block_cur = blockCur; } + void SetBlockCur(unsigned block_cur) { block_cur_ = block_cur; } - const ChunkID& getChunkId() const { return chunk_id; } + const ChunkID& GetChunkId() const { return chunk_id_; } - void setChunkId(const ChunkID& chunkId) { chunk_id = chunkId; } + void SetChunkId(const ChunkID& chunk_id) { chunk_id_ = chunk_id; } - unsigned getChunkSize() const { return chunk_size; } + unsigned GetChunkSize() const { return chunk_size_; } - void setChunkSize(unsigned chunkSize) { chunk_size = chunkSize; } + void SetChunkSize(unsigned chunk_size) { chunk_size_ = chunk_size; } private: - unsigned chunk_size; - ChunkID chunk_id; - unsigned block_cur; + unsigned chunk_size_; + ChunkID chunk_id_; + unsigned block_cur_; }; class InHDFSBlockAccessor : public block_accessor { public: - void getBlock(BlockStreamBase*& block) const; + void GetBlock(BlockStreamBase*& block) const; - unsigned getBlockCur() const { return block_cur; } + unsigned GetBlockCur() const { return block_cur_; } - void setBlockCur(unsigned blockCur) { block_cur = blockCur; } + void SetBlockCur(unsigned block_cur) { block_cur_ = block_cur; } - const ChunkID& getChunkId() const { return chunk_id; } + const ChunkID& GetChunkId() const { return chunk_id_; } - void setChunkId(const ChunkID& chunkId) { chunk_id = chunkId; } + void SetChunkId(const ChunkID& chunk_id) { chunk_id_ = chunk_id; } - unsigned getChunkSize() const { return chunk_size; } + unsigned GetChunkSize() const { return chunk_size_; } - void setChunkSize(unsigned chunkSize) { chunk_size = chunkSize; } + void SetChunkSize(unsigned chunk_size) { chunk_size_ = chunk_size; } private: - unsigned chunk_size; - ChunkID chunk_id; - unsigned block_cur; + unsigned chunk_size_; + ChunkID chunk_id_; + unsigned block_cur_; }; + /** + * @brief Method description: construct the iterator of chunk by using the + * initialization list. + * @param :ChunkID: the id of chunk. + * @param: block_size: the size of block. + * @param: chunk_size: the size of chunk. + * @param: number_of_blocks: how many blocks in the chunk. + */ ChunkReaderIterator(const ChunkID& chunk_id, unsigned block_size, unsigned chunk_size, const unsigned& number_of_blocks = 0) : chunk_id_(chunk_id), @@ -108,19 +145,35 @@ class ChunkReaderIterator { cur_block_(0), block_size_(block_size), chunk_size_(chunk_size){}; - virtual bool nextBlock(BlockStreamBase*& block) = 0; - virtual bool getNextBlockAccessor(block_accessor*& ba) = 0; - bool nextBlock(); + + virtual bool NextBlock(BlockStreamBase*& block) = 0; + + virtual bool GetNextBlockAccessor(block_accessor*& ba) = 0; + + /** + * @brief Method description: Just judge whether the cursor of block arrived + * at the end of block. It is a logical information of the cursor of block. + * @return true: Has block in the chunk. + * @return false: nothing left in the chunk. + */ + bool NextBlock(); + virtual ~ChunkReaderIterator(){}; public: ChunkID chunk_id_; unsigned number_of_blocks_; + // the cursor of block in the chunk to make sure the operating position. unsigned cur_block_; Lock lock_; unsigned block_size_; unsigned chunk_size_; }; + +/** + * Inherit ChunkReaderIterator to manager the chunk in the memory. Different + * from other Chunk iterator, it just need the point of blocks. + */ class InMemoryChunkReaderItetaor : public ChunkReaderIterator { public: InMemoryChunkReaderItetaor(void* const& start, const unsigned& chunk_size, @@ -128,49 +181,88 @@ class InMemoryChunkReaderItetaor : public ChunkReaderIterator { const unsigned& block_size, const ChunkID& chunk_id); virtual ~InMemoryChunkReaderItetaor(); - bool nextBlock(BlockStreamBase*& block); - bool getNextBlockAccessor(block_accessor*& ba); + + /** + * @brief Method description: Just judge whether the cursor of block arrived + * at the end of block. It is a logical information of the cursor of block. + * @return true: Has block in the chunk. + * @return false: nothing left in the chunk. + */ + bool NextBlock(BlockStreamBase*& block); + + bool GetNextBlockAccessor(block_accessor*& ba); private: void* start_; }; +/** + * the iterator creates a buffer and allocates its memory such that the query + * processing can just use the Block without the concern the memory allocation + * and deallocation. + */ + class DiskChunkReaderIteraror : public ChunkReaderIterator { public: + /** + * @brief Method description: By call back read() and lseek() to construct, + * chunk reader iterator can read chunk which in disk. + * @param: ChunkID: the ID of chunk. + * @param: chunk_size: the size of chunk. + * @param: block_size: the size of block. + */ DiskChunkReaderIteraror(const ChunkID& chunk_id, unsigned& chunk_size, const unsigned& block_size); + virtual ~DiskChunkReaderIteraror(); - bool nextBlock(BlockStreamBase*& block); - bool getNextBlockAccessor(block_accessor*& ba); - private: - // unsigned number_of_blocks_; - // unsigned cur_block_; - /*the iterator creates a buffer and allocates its memory such that the query - * processing - * can just use the Block without the concern the memory allocation and - * deallocation. + /** + * @brief Method description: Just judge whether the cursor of block arrived + * at the end of block. It is a logical information of the cursor of block. + * @param: block + * @return true: Has block in the chunk. + * @return false: nothing left in the chunk. */ + bool NextBlock(BlockStreamBase*& block); + + bool GetNextBlockAccessor(block_accessor*& ba); + + private: Block* block_buffer_; int fd_; }; +/** + * the iterator creates a buffer and allocates its memory such that the query + * processing can just use the Block without the concern the memory allocation + * and deallocation. + */ class HDFSChunkReaderIterator : public ChunkReaderIterator { public: + /** + * @brief Method description: By call back the c++ api of hdfs to construct, + * chunk reader iterator can read chunk which in hdfs. + * @param: ChunkID: the ID of chunk. + * @param: chunk_size: the size of chunk. + * @param: block_size: the size of block. + */ HDFSChunkReaderIterator(const ChunkID& chunk_id, unsigned& chunk_size, const unsigned& block_size); + virtual ~HDFSChunkReaderIterator(); - bool nextBlock(BlockStreamBase*& block); - bool getNextBlockAccessor(block_accessor*& ba); - private: - // unsigned number_of_blocks_; - // unsigned cur_block_; - /*the iterator creates a buffer and allocates its memory such that the query - * processing - * can just use the Block without the concern the memory allocation and - * deallocation. + /** + * @brief Method description: Just judge whether the cursor of block arrived + * at the end of block. It is a logical information of the cursor of block. + * @param: block + * @return true: Has block in the chunk. + * @return false: nothing left in the chunk. */ + bool NextBlock(BlockStreamBase*& block); + + bool GetNextBlockAccessor(block_accessor*& ba); + + private: Block* block_buffer_; hdfsFS fs_; hdfsFile hdfs_fd_; @@ -178,19 +270,27 @@ class HDFSChunkReaderIterator : public ChunkReaderIterator { class ChunkStorage { public: - /* considering that how block size effects the performance is to be tested, - * here we leave - * a parameter block_size for the performance test concern. + /** + * Considering that how block size effects the performance is to be tested, + * here we leave a parameter block_size for the performance test concern. */ ChunkStorage(const ChunkID& chunk_id, const unsigned& block_size, - const StorageLevel& desirable_level); + const StorageLevel& desirable_storage_level); + virtual ~ChunkStorage(); - ChunkReaderIterator* createChunkReaderIterator(); - std::string printCurrentStorageLevel() const; - ChunkID getChunkID() { return chunk_id_; } - void setCurrentStorageLevel(const StorageLevel& current_level) { - current_storage_level_ = current_level; + /** + * @brief Method description: Generate the iterator for chunk storage to + * handle chunks. + */ + ChunkReaderIterator* CreateChunkReaderIterator(); + + std::string PrintCurrentStorageLevel() const; + + ChunkID GetChunkID() { return chunk_id_; } + + void SetCurrentStorageLevel(const StorageLevel& current_storage_level) { + current_storage_level_ = current_storage_level; } private: @@ -199,7 +299,6 @@ class ChunkStorage { StorageLevel desirable_storage_level_; StorageLevel current_storage_level_; ChunkID chunk_id_; - Lock lock_; }; diff --git a/storage/Makefile.am b/storage/Makefile.am index 97371b299..5607d4df5 100644 --- a/storage/Makefile.am +++ b/storage/Makefile.am @@ -1,8 +1,7 @@ AM_CPPFLAGS= -fPIC -fpermissive\ --I${HADOOP_HOME}/src/c++/libhdfs\ +-I${HADOOP_HOME}/include\ -I${JAVA_HOME}/include\ -I${JAVA_HOME}/include/linux \ --I${THERON_HOME}/Include \ -I${BOOST_HOME} \ -I${BOOST_HOME}/boost/serialization @@ -17,22 +16,21 @@ LDADD = ../BufferManager/libbuffermanager.a \ ../common/libcommon.a \ ../common/Block/libblock.a \ ../utility/libutility.a \ - ${THERON_HOME}/Lib/libtherond.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.a \ ${BOOST_HOME}/stage/lib/libboost_serialization.a noinst_LIBRARIES=libstorage.a + libstorage_a_SOURCES = \ AllBlockInfo.cpp AllBlockInfo.h \ - BlanceMatcher.cpp BlanceMatcher.h \ + BlanceMatcher.cpp BlanceMatcher.h \ BlockManager.cpp BlockManager.h \ - BlockManagerId.cpp BlockManagerId.h \ + BlockManagerId.cpp BlockManagerId.h \ BlockManagerMaster.cpp BlockManagerMaster.h \ - BlockMessage.cpp BlockMessage.h \ - BlockStore.cpp BlockStore.h \ + BlockStore.cpp BlockStore.h \ + MemoryManager.cpp MemoryManager.h \ + PartitionStorage.cpp PartitionStorage.h \ ChunkStorage.cpp ChunkStorage.h \ - DiskStore.cpp DiskStore.h \ - MemoryStore.cpp MemoryStore.h \ + DiskStore.cpp DiskStore.h \ PartitionReaderIterator.cpp PartitionReaderIterator.h \ - PartitionStorage.cpp PartitionStorage.h \ StorageLevel.h diff --git a/storage/MemoryManager.cpp b/storage/MemoryManager.cpp new file mode 100644 index 000000000..76309f205 --- /dev/null +++ b/storage/MemoryManager.cpp @@ -0,0 +1,207 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/storage/MemoryManager.cpp + * + * Created on: 2015年11月19日 + * Author: Han,casa + * Email: + * + * Description: + * + */ + +#include +#include +#include +#include +#include +#include +#include "MemoryManager.h" +#include "../configure.h" +#include "../Resource/BufferManager.h" +#include "../common/error_define.h" +#include "../common/error_no.h" +using namespace std; +using claims::common::rSuccess; +using claims::common::rNoEnoughMemory; +using claims::common::rMemoryPoolMallocFail; +using claims::common::rReturnFailFindTargetChunkId; + +// namespace claims { +// namespace stroage { + +MemoryChunkStore* MemoryChunkStore::instance_ = NULL; +MemoryChunkStore::MemoryChunkStore() + : chunk_pool_(CHUNK_SIZE), block_pool_(BLOCK_SIZE), fc_(NULL) {} + +MemoryChunkStore::~MemoryChunkStore() { + chunk_pool_.purge_memory(); + block_pool_.purge_memory(); + if (NULL != fc_) { + delete fc_; + fc_ = NULL; + } +} + +bool MemoryChunkStore::IsExist(ChunkID& chunk_id) { + boost::unordered_map::iterator it = + chunk_list_.find(chunk_id); + WasteTime(); + if (it != chunk_list_.cend()) { + DLOG(INFO) << "chunk id already exists (chunk id = " << chunk_id.chunk_off + << ")" << endl; + it->second.lifetime_ = 0; + return true; + } else { + return false; + } +} + +bool MemoryChunkStore::ApplyChunk(ChunkID chunk_id, void*& start_address) { + lock_.acquire(); + if (true == IsExist(chunk_id)) { + lock_.release(); + return false; + } + + if (rSuccess != HasEnoughMemory()) { + fc_->WayOfFreeChunk(); + DLOG(INFO) << "not enough memory!!" << std::endl; + } + + if (NULL != (start_address = chunk_pool_.malloc())) { + chunk_list_[chunk_id] = HdfsInMemoryChunk(start_address, CHUNK_SIZE); + lock_.release(); + return true; + } else { + ELOG(rMemoryPoolMallocFail, "Error occurs when memalign!"); + lock_.release(); + return false; + } +} + +void MemoryChunkStore::ReturnChunk(const ChunkID& chunk_id) { + lock_.acquire(); + boost::unordered_map::iterator it = + chunk_list_.find(chunk_id); + + if (it == chunk_list_.cend()) { + WLOG(rReturnFailFindTargetChunkId, + "return fail to find the target chunk id !"); + lock_.release(); + return; + } + HdfsInMemoryChunk chunk_info = it->second; + chunk_pool_.free(chunk_info.hook); + chunk_list_.erase(it); + BufferManager::getInstance()->returnStorageBudget(CHUNK_SIZE); + lock_.release(); +} + +bool MemoryChunkStore::GetChunk(const ChunkID& chunk_id, + HdfsInMemoryChunk& chunk_info) { + lock_.acquire(); + boost::unordered_map::const_iterator it = + chunk_list_.find(chunk_id); + if (it != chunk_list_.cend()) { + chunk_info = it->second; + lock_.release(); + return true; + } + lock_.release(); + return false; +} +bool MemoryChunkStore::UpdateChunkInfo(const ChunkID& chunk_id, + const HdfsInMemoryChunk& chunk_info) { + lock_.acquire(); + boost::unordered_map::iterator it = + chunk_list_.find(chunk_id); + if (it == chunk_list_.cend()) { + lock_.release(); + return false; + } + it->second = chunk_info; + lock_.release(); + return true; +} + +bool MemoryChunkStore::PutChunk(const ChunkID& chunk_id, + HdfsInMemoryChunk& chunk_info) { + lock_.acquire(); + boost::unordered_map::const_iterator it = + chunk_list_.find(chunk_id); + if (it != chunk_list_.cend()) { + LOG(INFO) << "The memory chunk is already existed!" << endl; + lock_.release(); + return false; + } + chunk_list_[chunk_id] = chunk_info; + lock_.release(); + return true; +} + +// TODO(han):LIRS is the optimization of LRU. +void MemoryChunkStore::FreeChunkLRU::WayOfFreeChunk() { + boost::unordered_map::iterator target_ = + MemoryChunkStore::GetInstance()->chunk_list_.begin(); + for (boost::unordered_map::iterator mei_ = + target_; + mei_ != MemoryChunkStore::GetInstance()->chunk_list_.end(); mei_++) { + if (mei_->second.lifetime_ >= target_->second.lifetime_) { + target_ = mei_; + } + } + LOG(INFO) << "The way is LRU: the free chunk: " << target_->first.chunk_off + << endl; + MemoryChunkStore::GetInstance()->chunk_pool_.free(target_->second.hook); + MemoryChunkStore::GetInstance()->chunk_list_.erase(target_); +} + +void MemoryChunkStore::FreeChunkRandom::WayOfFreeChunk() { + boost::unordered_map::iterator it_ = + MemoryChunkStore::GetInstance()->chunk_list_.begin(); + int count = (int)MemoryChunkStore::GetInstance()->chunk_list_.size(); + srand((unsigned)time(NULL)); + int size = rand() % count; + for (int i = 0; i < size; i++) it_++; + LOG(INFO) << "The way is Random: the free chunk: " << it_->first.chunk_off + << endl; + MemoryChunkStore::GetInstance()->chunk_pool_.free(it_->second.hook); + MemoryChunkStore::GetInstance()->chunk_list_.erase(it_); +} + +MemoryChunkStore* MemoryChunkStore::GetInstance() { + if (NULL == instance_) { + instance_ = new MemoryChunkStore(); + } + return instance_; +} +/** + * @brief: By adjusting the parameters to judge whether system has enough + * memory. + */ +RetCode MemoryChunkStore::HasEnoughMemory() { + if (!BufferManager::getInstance()->applyStorageDedget(CHUNK_SIZE)) { + return rNoEnoughMemory; + } else + return rSuccess; +} + +//} // namespace stroage +//} // namespace claims diff --git a/storage/MemoryManager.h b/storage/MemoryManager.h new file mode 100644 index 000000000..c33b76ad9 --- /dev/null +++ b/storage/MemoryManager.h @@ -0,0 +1,250 @@ +/* + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/storage/MemoryManager.h + * + * Created on: 2015年11月19日 + * Author: Han + * Email: + * + * Description: + * + */ + +#ifndef STORAGE_MEMORYMANAGER_H_ +#define STORAGE_MEMORYMANAGER_H_ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "../common/error_define.h" +#include "../common/error_no.h" +#include "BlockStore.h" +#include "../common/rename.h" +#include "../Debug.h" +#include "../utility/lock.h" + +using std::cout; +using std::vector; +using std::map; +using std::list; +using std::endl; +using boost::pool; + +/** + * @brief Method description: the struct of chunk, include the start_address, + * the length of chunk, and the life time in the memory. + */ +typedef struct HdfsBlock { + HdfsBlock() : hook(NULL), length(0), lifetime_(0) {} + HdfsBlock(void* add, int length) : hook(add), length(length), lifetime_(0) {} + void* hook; + // record every block that is the length of file. + int length; + // whether is serialized + // the time stay in memory,this value is used to LIRS. + int lifetime_; +} HdfsInMemoryChunk; + +/** + * @brief Method description: memorystore only responsible for access data, + * but this is noting to data management and how to store. when accessing the + * data, it must check out how many space in memory. if no enough memory, we + * should choose the policy to remove the space. There are a lot of strategy + * selections, so design memorystore in the pattern of singleton. + */ +class MemoryChunkStore { + public: + /** + * @brief: the implement of the singleton pattern + */ + static MemoryChunkStore* GetInstance(); + MemoryChunkStore(); + virtual ~MemoryChunkStore(); + + /** + * @brief Method description: judge whether the chunk in the chunk_list_ + * @param ChunkID: the only token. + * @return True: the chunk in the chunk_list_. False: or not. + */ + bool IsExist(ChunkID& chunk_id); + + /** + * @brief Method description: apply the space of memory for chunk .if the + * system is no enough memory, it will free other chunk before malloc the new + * chunk in the pool of memory. the policy is decided before construct the + * partition storage. + * @param ChunkID: the only token. + * @param start_address: if the start_address is null, it will assign the new + * space for it. if not, just skip the step of malloc. + * @return True: apply successful. False: Error occurs when mmealign. it + * happened in the step of malloc, you should check out the reminder of + * operation system's memory. + */ + bool ApplyChunk(ChunkID chunk_id, void*& start_address); + + /** + * @brief Method description: return the resource which be occupied by each of + * chunk_list_ + * @param ChunkID: the only token. + */ + void ReturnChunk(const ChunkID& chunk_id); + + /** + * @brief Method description: update the information of chunk info, and avoid + * the waste of resource. + * @param ChunkID: the only token. + * @param chunk_info: use for replace. + */ + bool UpdateChunkInfo(const ChunkID& chunk_id, + const HdfsInMemoryChunk& chunk_info); + + /** + * @brief: the method is that increasing the time component. + */ + void WasteTime() { + for (auto& i : chunk_list_) i.second.lifetime_++; + }; + + /** + * @brief: the base class for the method of free chunk.Aimed to polymorphic. + * the implement of the strategy pattern. + */ + class FreeChunk { + public: + FreeChunk(){}; + virtual ~FreeChunk(){}; + virtual void WayOfFreeChunk(){}; + }; + class FreeChunkLRU : public FreeChunk { + public: + FreeChunkLRU(){}; + ~FreeChunkLRU(){}; + void WayOfFreeChunk() override; + }; + + class FreeChunkRandom : public FreeChunk { + public: + FreeChunkRandom(){}; + ~FreeChunkRandom(){}; + void WayOfFreeChunk() override; + }; + /** + * @brief Method description: Currently according to the partition apply the + * space, we choose the best policy to remove chunk. + * @param flag:0:Random;1:LRU; + */ + // TODO(han): Add new algorithm in the future; + void SetFreeAlgorithm(int flag) { + if (NULL != fc_) { + delete fc_; + fc_ = NULL; + } + if (flag == 1) + fc_ = new FreeChunkLRU(); + else + fc_ = new FreeChunkRandom(); + } + + RetCode HasEnoughMemory(); + + /* todo:这里还有可能是直接存储对象或者存储将对象序列化之后的结果两种 + * 在spark中要估计结果,所以有一个hdfsBlock中的length变量,在此留接口 + * 如果我们要估计结果,要写一个类来做估计 + * */ + bool putValue(string chunkId, void* value) { + // todo: + // 在此如果放不下了怎么办?采用什么样的交换策略,留接口,tryToPut中应该还有个接口是估计出来的值 + tryToPut(chunkId, value); + return true; + }; + void* getChunk(string blockId) { + map::iterator it_; + it_ = bufferpool_.find(blockId); + if (it_ != bufferpool_.end()) { + return it_->second.hook; + } + }; + bool GetChunk(const ChunkID& chunk_id, HdfsInMemoryChunk& chunk_info); + + bool PutChunk(const ChunkID& chunk_id, HdfsInMemoryChunk& chunk_info); + /* + * 将block为单位放到buffer pool中 + */ + bool tryToPut(string chunkId, void* value) { + if (ensureFreeSpace()) { + lock_.acquire(); + HdfsBlock chunkin; + chunkin.hook = (char*)value; + // todo:接口,就先等于这个吧 + chunkin.length = CHUNK_SIZE; + bufferpool_[chunkId.c_str()] = chunkin; + lock_.release(); + } else { + } + return true; + } //这个是以block为单位维护消息表? --han + + bool ensureFreeSpace() { + // todo: 基于LRU的column-based交换 + return true; + } + /* 有这个函数提供一个文件到block的映射,这个地方可以用iterator模式将其从 + * master端获取,因为做iterator的节zcl点肯定不是主节点,下面为调试用 + * */ + vector getFileLocation(string partition_file_name) { + vector block_set; + block_set.push_back("/home/casa/storage/data/1"); + block_set.push_back("/home/casa/storage/data/2"); + return block_set; + } + + private: + static MemoryChunkStore* instance_; + /** + * @brief: The logical struct about the chunk in the memory + */ + boost::unordered_map chunk_list_; + /** + * @brief: when you store in the pool, you should lock to avoid the deadlock. + */ + Lock lock_; + + /** + * @brief: Instantiate the fc_ in different way to choose the policy. + */ + FreeChunk* fc_; + + /** + * @brief: the memory pool is used to the strategy of remove the chunk. + */ + pool<> chunk_pool_; + map bufferpool_; + pool<> block_pool_; +}; + +#endif // STORAGE_MEMORYMANAGER_H_ diff --git a/storage/MemoryStore.cpp b/storage/MemoryStore.cpp deleted file mode 100755 index 00afb2701..000000000 --- a/storage/MemoryStore.cpp +++ /dev/null @@ -1,107 +0,0 @@ -/* - * MemoryStore.cpp - * - * Created on: 2013-10-11 - * Author: casa - */ - -#include -#include -#include "MemoryStore.h" -#include "../configure.h" -#include "../Resource/BufferManager.h" -using namespace std; -MemoryChunkStore* MemoryChunkStore::instance_=0; -MemoryChunkStore::MemoryChunkStore():chunk_pool_(CHUNK_SIZE),block_pool_(BLOCK_SIZE){ -// cout<<"in the memorystroage initialize"<::const_iterator it=chunk_list_.find(chunk_id); - if(it!=chunk_list_.cend()){ - printf("chunk id already exists (chunk id =%d)!\n",chunk_id.chunk_off); - lock_.release(); - return false; - } - if(!BufferManager::getInstance()->applyStorageDedget(CHUNK_SIZE)){ - printf("not enough memory!!\n"); - lock_.release(); - return false; - } - if((start_address=chunk_pool_.malloc())!=0){ - chunk_list_[chunk_id]=HdfsInMemoryChunk(start_address,CHUNK_SIZE); - lock_.release(); - return true; - } - else{ - printf("Error occurs when memalign!\n"); - lock_.release(); - return false; - } -} - -void MemoryChunkStore::returnChunk(const ChunkID& chunk_id){ - lock_.acquire(); - boost::unordered_map::const_iterator it=chunk_list_.find(chunk_id); - if(it==chunk_list_.cend()){ - printf("return fail to find the target chunk id !\n"); - lock_.release(); - return; - } - HdfsInMemoryChunk chunk_info=it->second; - - chunk_pool_.free(chunk_info.hook); - chunk_list_.erase(it); - BufferManager::getInstance()->returnStorageBudget(chunk_info.length); - lock_.release(); -} - -bool MemoryChunkStore::getChunk(const ChunkID& chunk_id,HdfsInMemoryChunk& chunk_info){ - lock_.acquire(); - boost::unordered_map::const_iterator it=chunk_list_.find(chunk_id); - if(it!=chunk_list_.cend()){ - chunk_info=it->second; - lock_.release(); - return true; - } - lock_.release(); - return false; -} -bool MemoryChunkStore::updateChunkInfo(const ChunkID & chunk_id, const HdfsInMemoryChunk & chunk_info){ - lock_.acquire(); - boost::unordered_map::iterator it=chunk_list_.find(chunk_id); - if(it==chunk_list_.cend()){ - lock_.release(); - return false; - } - it->second=chunk_info; - lock_.release(); - return true; - - -} - -bool MemoryChunkStore::putChunk(const ChunkID& chunk_id,HdfsInMemoryChunk& chunk_info){ - lock_.acquire(); - boost::unordered_map::const_iterator it=chunk_list_.find(chunk_id); - if(it!=chunk_list_.cend()){ - printf("The memory chunk is already existed!\n"); - lock_.release(); - return false; - } - chunk_list_[chunk_id]=chunk_info; - lock_.release(); - return true; -} -MemoryChunkStore* MemoryChunkStore::getInstance(){ - if(instance_==0){ - instance_=new MemoryChunkStore(); - } - return instance_; -} diff --git a/storage/MemoryStore.h b/storage/MemoryStore.h deleted file mode 100755 index d6303e8f3..000000000 --- a/storage/MemoryStore.h +++ /dev/null @@ -1,157 +0,0 @@ -/* - * MemoryStore.h - * - * Created on: 2013-10-11 - * Author: casa - */ - -#ifndef MEMORYSTORE_H_ -#define MEMORYSTORE_H_ -/* - * 是否应该有个手动清楚内存的方式? - */ -#ifdef DMALLOC -#include "dmalloc.h" -#endif -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -using namespace std; - -#include "BlockStore.h" -#include "../common/rename.h" -#include "../Debug.h" -#include "../utility/lock.h" - -using boost::pool; - -/* 在此的block内的模式信息以及有多少个tuple是不会存储在这里的,那个就 - * 是catalog来管理的,所以这个地方只是负责存取,知道那些信息也没用,那 - * 些信息是做优化的 - * */ -typedef struct HdfsBlock{ -// /*可以直接使用hdfs的blockid*/ -// unsigned blockId; - HdfsBlock():hook(0),length(0){} - HdfsBlock(void* add,int length):hook(add),length(length){} - /*是将block mmap操作之后返回的内存地址*/ - void *hook; - /*记录每个block大小也就是文件长度*/ - int length; - // 是否被序列化过 -}HdfsInMemoryChunk; - -//typedef HdfsBlock HdfsInMemoryChunk; - -/* - * memorystore只是负责数据的存取,而和数据的管理和为什么存储是没有关系的, - * 在负责数据存取的同时,put的时候还要看看内存够不够,如果不够就要开始内存 - * 空间的移除,在此有很多的策略选择,要将memstore写成单例模式 - * */ -class MemoryChunkStore{ -public: - static MemoryChunkStore* getInstance(); - MemoryChunkStore(); - virtual ~MemoryChunkStore(); - - /* todo:这里还有可能是直接存储对象或者存储将对象序列化之后的结果两种 - * 在spark中要估计结果,所以有一个hdfsBlock中的length变量,在此留接口 - * 如果我们要估计结果,要写一个类来做估计 - * */ - bool putValue(string chunkId,void *value){ - // todo: 在此如果放不下了怎么办?采用什么样的交换策略,留接口,tryToPut中应该还有个接口是估计出来的值 - tryToPut(chunkId,value); - return true; - }; - - - bool applyChunk(ChunkID chunk_id,void*& start_address); - - void returnChunk(const ChunkID& chunk_id); - - bool updateChunkInfo(const ChunkID& chunk_id, const HdfsInMemoryChunk& chunk_info); - - void *getChunk(string blockId){ - map::iterator it_; - it_=bufferpool_.find(blockId); - if(it_!=bufferpool_.end()){ - return it_->second.hook; - } - }; - bool getChunk(const ChunkID& chunk_id,HdfsInMemoryChunk& chunk_info); - - bool putChunk(const ChunkID& chunk_id,HdfsInMemoryChunk& chunk_info); - - bool remove(string blockId){ - return true; - }; - - bool contains(string blockId){ - return false; - }; - - unsigned getSize(string blockId){ - return 0; - }; - - /* - * 将block为单位放到buffer pool中 - * */ - bool tryToPut(string chunkId,void *value){ - if(ensureFreeSpace()){ - lock_.acquire(); - HdfsBlock chunkin; - chunkin.hook=(char *)value; - // todo:接口,就先等于这个吧 - chunkin.length=CHUNK_SIZE; - bufferpool_[chunkId.c_str()]=chunkin; - lock_.release(); - } - else{ - - } - return true; - } - - bool ensureFreeSpace(){ - // todo: 基于LRU的column-based交换 - return true; - } - - /* 有这个函数提供一个文件到block的映射,这个地方可以用iterator模式将其从 - * master端获取,因为做iterator的节点肯定不是主节点,下面为调试用 - * */ - vector getFileLocation(string partition_file_name){ - vector block_set; - block_set.push_back("/home/casa/storage/data/1"); - block_set.push_back("/home/casa/storage/data/2"); - return block_set; - } - - -private: - map bufferpool_; - boost::unordered_map chunk_list_; - // 本节点能使用的最大的内存,以兆为单位 - long maxMemory_; - // 现在使用了多少内存?以兆为单位 - long currentMemory_; - // 在存储进去buffer pool的时候要枷锁 - Lock lock_; - - pool<> chunk_pool_; - pool<> block_pool_; - - static MemoryChunkStore* instance_; -}; - -#endif /* MEMORYSTORE_H_ */ diff --git a/storage/PartitionReaderIterator.cpp b/storage/PartitionReaderIterator.cpp old mode 100755 new mode 100644 index 4538a370d..f41a8b97d --- a/storage/PartitionReaderIterator.cpp +++ b/storage/PartitionReaderIterator.cpp @@ -1,10 +1,86 @@ /* - * PartitionReaderIterator.cpp + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/storage/PartitionReaderIterator.cpp + * + * Created on: NOV 19, 2015 + * Author: Hanzhang,wangli + * Email: + * + * Description: * - * Created on: Nov 17, 2013 - * Author: wangli */ - -#include "PartitionReaderIterator.h" - - +#include "./PartitionReaderIterator.h" +// namespace claims { +// namespace storage { +// PartitionReaderIterator::PartitionReaderIterator( +// PartitionStorage* partition_storage) +// : ps_(partition_storage), chunk_cur_(0), chunk_it_(NULL) {} +// +// PartitionReaderIterator::~PartitionReaderIterator() {} +// +// ChunkReaderIterator* PartitionReaderIterator::NextChunk() { +// if (chunk_cur_ < ps_->number_of_chunks_) +// return ps_->chunk_list_[chunk_cur_++]->CreateChunkReaderIterator(); +// else +// return NULL; +//} +// +// AtomicPartitionReaderIterator::~AtomicPartitionReaderIterator() {} +// +// ChunkReaderIterator* AtomicPartitionReaderIterator::NextChunk() { +// ChunkReaderIterator* ret = NULL; +// if (chunk_cur_ < ps_->number_of_chunks_) +// ret = ps_->chunk_list_[chunk_cur_++]->CreateChunkReaderIterator(); +// else +// ret = NULL; +// return ret; +//} +// +// bool PartitionReaderIterator::NextBlock(BlockStreamBase*& block) { +// assert(false); +// if (chunk_it_ > 0 && chunk_it_->NextBlock(block)) { +// return true; +// } else { +// if ((chunk_it_ = NextChunk()) > 0) { +// return NextBlock(block); +// } else { +// return false; +// } +// } +//} +// +// bool AtomicPartitionReaderIterator::NextBlock(BlockStreamBase*& block) { +// lock_.acquire(); +// ChunkReaderIterator::block_accessor* ba = NULL; +// if (chunk_it_ != 0 && chunk_it_->GetNextBlockAccessor(ba)) { +// lock_.release(); +// ba->GetBlock(block); +// return true; +// } else { +// if ((chunk_it_ = PartitionReaderIterator::NextChunk()) > 0) { +// lock_.release(); +// return NextBlock(block); +// } else { +// lock_.release(); +// return false; +// } +// } +//} //这个函数是关键,我需要考虑清楚,在决定如何动,这块~~~ --han 1123 +//} // namespace storage +//} // namespace claims diff --git a/storage/PartitionReaderIterator.h b/storage/PartitionReaderIterator.h old mode 100755 new mode 100644 index 9f820c60b..5423ff261 --- a/storage/PartitionReaderIterator.h +++ b/storage/PartitionReaderIterator.h @@ -1,14 +1,97 @@ /* - * PartitionReaderIterator.h + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/storage/PartitionReaderIderator.h + * + * Created on: Nov 19 ,2015 + * Author: Han,wangli + * Email: + * + * Description: It belongs to design patterns of iterator. This is a iterator of + * PartitionStorage. * - * Created on: Nov 17, 2013 - * Author: wangli */ -#ifndef PARTITIONREADERITERATOR_H_ -#define PARTITIONREADERITERATOR_H_ -#include "PartitionStorage.h" -#include "ChunkStorage.h" +#ifndef STORAGE_PARTITIONREADERITERATOR_H_ +#define STORAGE_PARTITIONREADERITERATOR_H_ + +#include "./ChunkStorage.h" +#include "./StorageLevel.h" +#include "./PartitionReaderIterator.h" +#include "./PartitionStorage.h" +#include "../utility/lock.h" +// class PartitionStorage; +// +// class PartitionReaderIterator { +// public: +// /** +// * @brief Method description: construct the partition iterator. +// * @param PartitionStorage: include the point of container, the cursor of +// * chunk, and chunk_list_. +// */ +// PartitionReaderIterator(PartitionStorage* partition_storage); +// +// virtual ~PartitionReaderIterator(); +// +// /** +// * @brief Method description: According the iterator to call chunk list and +// * create chunk iterator. +// * @return ret: NULL: create the chunk iterator failed. NOt NULL: succeed. +// */ +// virtual ChunkReaderIterator* NextChunk(); +// +// virtual bool NextBlock(BlockStreamBase*& block); +// +// protected: +// PartitionStorage* ps_; +// unsigned chunk_cur_; +// ChunkReaderIterator* chunk_it_; +//}; +// +// class AtomicPartitionReaderIterator : public PartitionReaderIterator { +// public: +// /** +// * @brief Method description: Construct the partition iterator. Different +// from +// * PartitionReaderiterator, it don't copy next block one by one, just using +// * the block_accessor that store the point of block to assign. +// * @param PartitionStorage: include the point of container, the cursor of +// * chunk, and chunk_list_. +// */ +// AtomicPartitionReaderIterator(PartitionStorage* partition_storage) +// : PartitionReaderIterator(partition_storage) {} +// +// virtual ~AtomicPartitionReaderIterator() override; +// +// /** +// * @brief Method description: According the iterator to call chunk list and +// * create chunk iterator. Different from PartitionReaderiterator, it don't +// * copy next block one by one, just using the block_accessor that store the +// * point of block to assign. +// * @return ret: NULL: create the chunk iterator failed. NOt NULL: succeed. +// */ +// ChunkReaderIterator* NextChunk() override; +// +// virtual bool NextBlock(BlockStreamBase*& block); +// +// private: +// Lock lock_; +//}; -#endif /* PARTITIONREADERITERATOR_H_ */ +#endif // STORAGE_PARTITIONREADERITERATOR_H_ diff --git a/storage/PartitionStorage.cpp b/storage/PartitionStorage.cpp index 6a57178b8..6729a8f55 100755 --- a/storage/PartitionStorage.cpp +++ b/storage/PartitionStorage.cpp @@ -1,24 +1,60 @@ /* - * ProjectionStorage.cpp + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/storage/PartitionStorage.cpp + * + * Created on: NOV 14 ,2013 + * Modified on: NOV 28, 2015 + * Author: Hanzhang, wangli + * Email: + * + * Description: * - * Created on: Nov 14, 2013 - * Author: wangli */ #include "PartitionStorage.h" #include "../Debug.h" -#include "MemoryStore.h" +#include "./MemoryManager.h" #include "../Config.h" -PartitionStorage::PartitionStorage(const PartitionID &partition_id, - const unsigned &number_of_chunks, +#include "../Resource/BufferManager.h" + +/** + * According to number_of_chunks, construct chunk from partition and add into + * the chunk_list_. Meantime, you can get specific information about chunk. when + * number_of_chunks more than storage_max_bugdege, you should choose the random + * way to remove the chunk which in the memory. or not, choose LRU. + */ +PartitionStorage::PartitionStorage(const PartitionID& partition_id, + const unsigned& number_of_chunks, + const StorageLevel& storage_level) : partition_id_(partition_id), number_of_chunks_(number_of_chunks), desirable_storage_level_(storage_level) { + if (number_of_chunks_ * CHUNK_SIZE / 1024 / 1024 > + BufferManager::getInstance()->getStorageMemoryBudegeInMilibyte() * + Config::memory_utilization / 100) + MemoryChunkStore::GetInstance()->SetFreeAlgorithm(0); + else + MemoryChunkStore::GetInstance()->SetFreeAlgorithm(1); for (unsigned i = 0; i < number_of_chunks_; i++) { - chunk_list_.push_back( - new ChunkStorage(ChunkID(partition_id_, i), BLOCK_SIZE, - desirable_storage_level_)); + chunk_list_.push_back(new ChunkStorage( + ChunkID(partition_id_, i), BLOCK_SIZE, desirable_storage_level_)); } } @@ -29,26 +65,20 @@ PartitionStorage::~PartitionStorage() { chunk_list_.clear(); } -void PartitionStorage::addNewChunk() { - number_of_chunks_++; -} +void PartitionStorage::AddNewChunk() { number_of_chunks_++; } -void PartitionStorage::updateChunksWithInsertOrAppend( - const PartitionID &partition_id, const unsigned &number_of_chunks, +/** + * when appending data, the last chunk may be dirty so set storage level as HDFS + * to make sure the data will be reload from HDFS. actually, DISK is not used, + * only HDFS and MEMORY is used. + */ +void PartitionStorage::UpdateChunksWithInsertOrAppend( + const PartitionID& partition_id, const unsigned& number_of_chunks, const StorageLevel& storage_level) { if (!chunk_list_.empty()) { - /* - * when appending data, the last chunk may be dirty - * so set storage level as HDFS - * to make sure the data will be reload from HDFS - */ - MemoryChunkStore::getInstance()->returnChunk( - chunk_list_.back()->getChunkID()); -// if (Config::local_disk_mode == 0) - // actually, DISK is not used, only HDFS and MEMORY is used - chunk_list_.back()->setCurrentStorageLevel(HDFS); -// else -// chunk_list_.back()->setCurrentStorageLevel(DISK); + MemoryChunkStore::GetInstance()->ReturnChunk( + chunk_list_.back()->GetChunkID()); + chunk_list_.back()->SetCurrentStorageLevel(HDFS); } for (unsigned i = number_of_chunks_; i < number_of_chunks; i++) chunk_list_.push_back( @@ -56,111 +86,93 @@ void PartitionStorage::updateChunksWithInsertOrAppend( number_of_chunks_ = number_of_chunks; } -void PartitionStorage::removeAllChunks(const PartitionID &partition_id) { +/** + * By searching in chunk_list_ to get chunk address(physical information), and + * free the memory. This function is a logical process of delete the chunk, and + * call back actual method. + */ +void PartitionStorage::RemoveAllChunks(const PartitionID& partition_id) { if (!chunk_list_.empty()) { vector::iterator iter = chunk_list_.begin(); - MemoryChunkStore* mcs = MemoryChunkStore::getInstance(); + MemoryChunkStore* mcs = MemoryChunkStore::GetInstance(); for (; iter != chunk_list_.end(); iter++) { - mcs->returnChunk((*iter)->getChunkID()); + mcs->ReturnChunk((*iter)->GetChunkID()); } chunk_list_.clear(); number_of_chunks_ = 0; } } -PartitionStorage::PartitionReaderItetaor* PartitionStorage::createReaderIterator() { - return new PartitionReaderItetaor(this); +PartitionStorage::PartitionReaderIterator* +PartitionStorage::CreateReaderIterator() { + return new PartitionReaderIterator(this); } -PartitionStorage::PartitionReaderItetaor* PartitionStorage::createAtomicReaderIterator() { +PartitionStorage::PartitionReaderIterator* +PartitionStorage::CreateAtomicReaderIterator() { return new AtomicPartitionReaderIterator(this); } -PartitionStorage::PartitionReaderItetaor::PartitionReaderItetaor( +PartitionStorage::PartitionReaderIterator::PartitionReaderIterator( PartitionStorage* partition_storage) - : ps(partition_storage), - chunk_cur_(0), - chunk_it_(0) { - -} + : ps_(partition_storage), chunk_cur_(0), chunk_it_(NULL) {} -//PartitionStorage::PartitionReaderItetaor::PartitionReaderItetaor():chunk_cur_(0){ -// -//} -PartitionStorage::PartitionReaderItetaor::~PartitionReaderItetaor() { +PartitionStorage::PartitionReaderIterator::~PartitionReaderIterator() {} -} -ChunkReaderIterator* PartitionStorage::PartitionReaderItetaor::nextChunk() { - if (chunk_cur_ < ps->number_of_chunks_) - return ps->chunk_list_[chunk_cur_++]->createChunkReaderIterator(); +ChunkReaderIterator* PartitionStorage::PartitionReaderIterator::NextChunk() { + if (chunk_cur_ < ps_->number_of_chunks_) + return ps_->chunk_list_[chunk_cur_++]->CreateChunkReaderIterator(); else - return 0; + return NULL; } -//PartitionStorage::AtomicPartitionReaderIterator::AtomicPartitionReaderIterator():PartitionReaderItetaor(){ -// -//} -PartitionStorage::AtomicPartitionReaderIterator::~AtomicPartitionReaderIterator() { -} -ChunkReaderIterator* PartitionStorage::AtomicPartitionReaderIterator::nextChunk() { -// lock_.acquire(); - ChunkReaderIterator* ret; - if (chunk_cur_ < ps->number_of_chunks_) - ret = ps->chunk_list_[chunk_cur_++]->createChunkReaderIterator(); +PartitionStorage::AtomicPartitionReaderIterator:: + ~AtomicPartitionReaderIterator() {} + +ChunkReaderIterator* +PartitionStorage::AtomicPartitionReaderIterator::NextChunk() { + ChunkReaderIterator* ret = NULL; + if (chunk_cur_ < ps_->number_of_chunks_) + ret = ps_->chunk_list_[chunk_cur_++]->CreateChunkReaderIterator(); else - ret = 0; -// lock_.release(); + ret = NULL; return ret; } -bool PartitionStorage::PartitionReaderItetaor::nextBlock( +bool PartitionStorage::PartitionReaderIterator::NextBlock( BlockStreamBase*& block) { assert(false); - if (chunk_it_ > 0 && chunk_it_->nextBlock(block)) { + if (chunk_it_ > 0 && chunk_it_->NextBlock(block)) { return true; - } - else { - if ((chunk_it_ = nextChunk()) > 0) { - return nextBlock(block); - } - else { + } else { + if ((chunk_it_ = NextChunk()) > 0) { + return NextBlock(block); + } else { return false; } } } -bool PartitionStorage::AtomicPartitionReaderIterator::nextBlock( +bool PartitionStorage::AtomicPartitionReaderIterator::NextBlock( BlockStreamBase*& block) { -//// lock_.acquire(); -// if(chunk_it_>0&&chunk_it_->nextBlock(block)){ -//// lock_.release(); -// return true; -// } -// else{ -// lock_.acquire(); -// if((chunk_it_=nextChunk())>0){ -// lock_.release(); -// return nextBlock(block); -// } -// else{ -// lock_.release(); -// return false; -// } -// } - // lock_.acquire(); - lock_.acquire(); - ChunkReaderIterator::block_accessor* ba; - if (chunk_it_ != 0 && chunk_it_->getNextBlockAccessor(ba)) { + ChunkReaderIterator::block_accessor* ba = NULL; + if (NULL != chunk_it_ && chunk_it_->GetNextBlockAccessor(ba)) { lock_.release(); - ba->getBlock(block); + ba->GetBlock(block); + if (NULL != ba) { + delete ba; + ba = NULL; + } return true; - } - else { - if ((chunk_it_ = PartitionReaderItetaor::nextChunk()) > 0) { - lock_.release(); - return nextBlock(block); + } else { + if (NULL != chunk_it_) { + delete chunk_it_; + chunk_it_ = NULL; } - else { + if ((chunk_it_ = PartitionReaderIterator::NextChunk()) > 0) { + lock_.release(); + return NextBlock(block); + } else { lock_.release(); return false; } diff --git a/storage/PartitionStorage.h b/storage/PartitionStorage.h index c228a7bf9..babfd4133 100755 --- a/storage/PartitionStorage.h +++ b/storage/PartitionStorage.h @@ -1,59 +1,157 @@ /* - * ProjectionStorage.h + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /Claims/storage/PartitionReaderIterator.cpp * * Created on: Nov 14, 2013 - * Author: wangli + * Modified on: Nov 28, 2015 + * Author: wangli, Hanzhang + * Email: + * + * Description: + * */ #ifndef PARTITIONSTORAGE_H_ #define PARTITIONSTORAGE_H_ -#ifdef DMALLOC -#include "dmalloc.h" -#endif #include "ChunkStorage.h" #include "StorageLevel.h" -#include "PartitionReaderIterator.h" +#include "./PartitionReaderIterator.h" #include "../utility/lock.h" - +// namespace claims { +// namespace storage { +/** + * Design by The Iterator pattern. It is a container of partition. We should + * create iterator in the container and use the iterator to operator this + * container. By operating the partition iterator, it can generate chunk. + */ class PartitionStorage { -public: - class PartitionReaderItetaor{ - public: -// PartitionReaderItetaor(); - PartitionReaderItetaor(PartitionStorage* partition_storage); - virtual ~PartitionReaderItetaor(); - virtual ChunkReaderIterator* nextChunk(); - virtual bool nextBlock(BlockStreamBase* &block); - protected: - PartitionStorage* ps; - unsigned chunk_cur_; - ChunkReaderIterator* chunk_it_; - }; - class AtomicPartitionReaderIterator:public PartitionReaderItetaor{ - public: -// AtomicPartitionReaderIterator(); - AtomicPartitionReaderIterator(PartitionStorage* partition_storage):PartitionReaderItetaor(partition_storage){}; - virtual ~AtomicPartitionReaderIterator(); - ChunkReaderIterator* nextChunk(); - virtual bool nextBlock(BlockStreamBase* &block); - private: - Lock lock_; - }; - - friend class PartitionReaderItetaor; - PartitionStorage(const PartitionID &partition_id,const unsigned &number_of_chunks,const StorageLevel&); - virtual ~PartitionStorage(); - void addNewChunk(); - void updateChunksWithInsertOrAppend(const PartitionID &partition_id, const unsigned &number_of_chunks, const StorageLevel& storage_level); - void removeAllChunks(const PartitionID &partition_id); - PartitionReaderItetaor* createReaderIterator(); - PartitionReaderItetaor* createAtomicReaderIterator(); -protected: - PartitionID partition_id_; - unsigned number_of_chunks_; - std::vector chunk_list_; - StorageLevel desirable_storage_level_; + friend class PartitionReaderIterator; + + public: + class PartitionReaderIterator { + public: + /** + * @brief Method description: construct the partition iterator. + * @param PartitionStorage: include the point of container, the cursor of + * chunk, and chunk_list_. + */ + PartitionReaderIterator(PartitionStorage* partition_storage); + + virtual ~PartitionReaderIterator(); + + /** + * @brief Method description: According the iterator to call chunk list and + * create chunk iterator. + * @return ret: NULL: create the chunk iterator failed. NOt NULL: succeed. + */ + virtual ChunkReaderIterator* NextChunk(); + + virtual bool NextBlock(BlockStreamBase*& block); + + protected: + PartitionStorage* ps_; + unsigned chunk_cur_; + ChunkReaderIterator* chunk_it_; + }; + + class AtomicPartitionReaderIterator : public PartitionReaderIterator { + public: + /** + * @brief Method description: Construct the partition iterator. Different + * from + * PartitionReaderiterator, it don't copy next block one by one, just using + * the block_accessor that store the point of block to assign. + * @param PartitionStorage: include the point of container, the cursor of + * chunk, and chunk_list_. + */ + AtomicPartitionReaderIterator(PartitionStorage* partition_storage) + : PartitionReaderIterator(partition_storage) {} + + virtual ~AtomicPartitionReaderIterator() override; + + /** + * @brief Method description: According the iterator to call chunk list and + * create chunk iterator. Different from PartitionReaderiterator, it don't + * copy next block one by one, just using the block_accessor that store the + * point of block to assign. + * @return ret: NULL: create the chunk iterator failed. NOt NULL: succeed. + */ + ChunkReaderIterator* NextChunk() override; + + virtual bool NextBlock(BlockStreamBase*& block); + + private: + Lock lock_; + }; + + /** + * @brief Method description: construct the partition container. + * @param :PartitionID: identify which partition is our require. + * @param :number_of_chunks: get how many chunks in the partition. + * @param :storage_level: the information of chunk in which storage level. + */ + PartitionStorage(const PartitionID& partition_id, + const unsigned& number_of_chunks, const StorageLevel&); + + /** + * @brief Method description: Destruction. Release all chunk_list_. + */ + virtual ~PartitionStorage(); + + void AddNewChunk(); + + /** + * @brief Method description: Expand the container of partition + * @param :PartitionID: identify which partition is our require. + * @param :number_of_chunks: get how many chunks in the partition. + * @param :storage_level: the information of chunk in which storage level. + */ + void UpdateChunksWithInsertOrAppend(const PartitionID& partition_id, + const unsigned& number_of_chunks, + const StorageLevel& storage_level); + + /** + * @brief Method description: Clear the container of partition + * @param :PartitionID: Choose the partition what we need to delete. + */ + void RemoveAllChunks(const PartitionID& partition_id); + + /** + * @brief Method description: Generate the iterator in iterator pattern. + */ + PartitionStorage::PartitionReaderIterator* CreateReaderIterator(); + + /** + * @brief Method description: Generate the iterator in iterator + * pattern. Different from PartitionReaderiterator, it don't copy next block + * one by one, just using the block_accessor that store the point of block. + */ + PartitionStorage::PartitionReaderIterator* CreateAtomicReaderIterator(); + + protected: + PartitionID partition_id_; + unsigned number_of_chunks_; + std::vector chunk_list_; + StorageLevel desirable_storage_level_; }; +//} // namespace storage +//} // namespace claims #endif /* PARTITIONSTORAGE_H_ */ diff --git a/storage/StorageLevel.h b/storage/StorageLevel.h old mode 100755 new mode 100644 index 8f169ce55..19ccb3e87 --- a/storage/StorageLevel.h +++ b/storage/StorageLevel.h @@ -1,15 +1,36 @@ /* - * StorageLevel.h + * Copyright [2012-2015] DaSE@ECNU + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * /CLAIMS/storage/StorageLevel.h * * Created on: Nov 16, 2013 - * Author: wangli + * Modified on: Nov 13, 2015 + * Author: Hanzhang, wangli + * Email: + * + * Description: The level of storage is divided into three level. Data is aimed + * to put into memory. + * */ -#ifndef STORAGELEVEL_H_ -#define STORAGELEVEL_H_ - +#ifndef STORAGE_STORAGELEVEL_H_ +#define STORAGE_STORAGELEVEL_H_ -//enum StorageLevel{MEMORY,DISK,HDFS}; typedef unsigned StorageLevel; #define MEMORY 0 #define DISK 1 @@ -17,4 +38,4 @@ typedef unsigned StorageLevel; #define DESIRIABLE_STORAGE_LEVEL MEMORY -#endif /* STORAGELEVEL_H_ */ +#endif // STORAGE_STORAGELEVEL_H_ diff --git a/utility/ThreadPool.cpp b/utility/ThreadPool.cpp new file mode 100644 index 000000000..cfafb7186 --- /dev/null +++ b/utility/ThreadPool.cpp @@ -0,0 +1,181 @@ +/* + * ThreadPool.cpp + * + * Created on: 2014-8-17 + * Author: yukai + * + * TODO: if task is more than thread, create some thread that execute + *function once + * 可以试着添加负载均衡 + * 如果每个线程需要分配内存,为了节省内存,可以设置线程第一次运行才分配内存,优先让已经分配到内存的线程运行 + * + */ +#include "thread_pool.h" +#include +#include + +#define __USE_GNU // 启用CPU_ZERO等相关的宏 + +ThreadPool::ThreadPool() {} + +ThreadPool::~ThreadPool() {} + +bool ThreadPool::Thread_Pool_init(int thread_count_in_pool_) { + bool success = true; + thread_count = thread_count_in_pool_; + free_thread_count = 0; + undo_task_count = 0; + + pthread_mutex_init(&free_thread_count_lock, NULL); + pthread_mutex_init(&undo_task_count_lock, NULL); + pthread_mutex_init(&task_queue_lock, NULL); + + sem_init(&undo_task_sem, 0, 0); // init semaphore + + thread_list_ = (pthread_t *)malloc(thread_count_in_pool_ * sizeof(pthread_t)); + while (!task_queue_.empty()) { + task_queue_.pop(); + } + + for (int i = 0; i < thread_count; ++i) { + if (pthread_create(&thread_list_[i], NULL, thread_exec, this) != + 0) { // if any failed, return false + cout << "ERROR: create pthread failed!" << endl; + success = false; + break; + } + ++free_thread_count; + } + + assert(free_thread_count == thread_count); + // ThreadPoolLogging::log("thread pool init %d free thread\n", + // free_thread_count); + return success; +} + +// TODO: +// 可以把f与a封装为一个类对象,比如Task,不同的任务可以继承Task,Task中有run函数,Task由智能指针管理销毁 +void ThreadPool::add_task(void *(*f)(void *), void *a, bool e) { + Task *t = new Task(f, a, e); + pthread_mutex_lock(&task_queue_lock); + task_queue_.push(t); + pthread_mutex_unlock(&task_queue_lock); + + sem_post(&undo_task_sem); +} + +void *ThreadPool::thread_exec(void *arg) { + ThreadPool *thread_pool = (ThreadPool *)arg; + Task *task = NULL; + + thread_pool->bind_cpu(); + + // every thread execute a endless loop, waiting for task, and exit when + // receive a task with end member of 'true' + while (1) { + sem_wait(&(thread_pool->undo_task_sem)); + + pthread_mutex_lock(&(thread_pool->task_queue_lock)); + if (!thread_pool->task_queue_.empty()) { + task = thread_pool->task_queue_.front(); + thread_pool->task_queue_.pop(); + } + pthread_mutex_unlock(&(thread_pool->task_queue_lock)); + + if (task != NULL) { + if (task->end) // it means destory this thread + break; + ThreadPoolLogging::log( + "thread (id=%ld,offset=%lx) in thread pool is executing..\n", + syscall(__NR_gettid), pthread_self()); + (*(task->func))(task->arg); + ThreadPoolLogging::log( + "thread (id=%ld,offset=%lx) in thread pool finished executing..\n", + syscall(__NR_gettid), pthread_self()); + + Task::destroy_task(task); // TODO: consider whether destroy task + task = NULL; + } + + // sem_post(&task_sem); + } + pthread_exit(NULL); + return NULL; +} +/* +void *ThreadPool::thread_exec_with_cond(void *arg){ + ThreadPool *thread_pool = (ThreadPool*)arg; + Task *task = new Task(); + + while (1){ + pthread_mutex_lock(&cond_lock); + while (free_thread_count == 0){ + pthread_cond_wait(&free_thread_cond, &cond_lock); + } + + pthread_mutex_lock(&free_thread_count_lock); + --free_thread_count; + pthread_mutex_unlock(&free_thread_count_lock); + + pthread_mutex_unlock(&cond_lock); + + pthread_mutex_lock(&task_queue_lock); + if (!thread_pool->task_queue_.empty()){ + task = thread_pool->task_queue_.pop(); + } + pthread_mutex_unlock(&task_queue_lock); + + (*(task->func))(task->args); + + pthread_mutex_lock(&free_thread_count_lock); + ++free_thread_count; + pthread_mutex_unlock(&free_thread_count_lock); + } + +} +*/ + +void ThreadPool::bind_cpu() { + //将该子线程的状态设置为detached,则该线程运行结束后会自动释放所有资源,不要使父线程因为调用pthread_join而阻塞 + pthread_detach(pthread_self()); + + static volatile int current_cpu = 0; + int cpu_count = sysconf(_SC_NPROCESSORS_CONF); + int insert_cpu = __sync_fetch_and_add(¤t_cpu, 1) % cpu_count; + + cpu_set_t mask; + CPU_ZERO(&mask); + CPU_SET(insert_cpu, &mask); + int ret = pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask); + if (ret == -1) { + ThreadPoolLogging::elog("thread %ld bind cpu failed,ret = %d. %s\n", + syscall(__NR_gettid), ret, strerror(errno)); + } else { + ThreadPoolLogging::log( + "thread (tid=%ld offset=%lx) stiffened cpu=%ld (start=%ld end=%ld)\n", + syscall(__NR_gettid), pthread_self(), insert_cpu, 0, cpu_count); + } +} + +void ThreadPool::destroy_pool(ThreadPool *tp) { + // destory every thread + for (int i = 0; i < tp->thread_count; + ++i) { // send destory task to every thread + tp->add_task(NULL, NULL, true); + } + for (int i = 0; i < tp->thread_count; ++i) { + pthread_join(tp->thread_list_[i], NULL); + } + while (!tp->task_queue_.empty()) { + Task *temp = tp->task_queue_.front(); + tp->task_queue_.pop(); + Task::destroy_task(temp); // TODO: consider whether destroy task + } + + sem_destroy(&tp->undo_task_sem); + pthread_mutex_destroy(&tp->free_thread_count_lock); + pthread_mutex_destroy(&tp->undo_task_count_lock); + pthread_mutex_destroy(&tp->task_queue_lock); + + delete tp->thread_list_; +}