diff --git a/.github/workflows/pika.yml b/.github/workflows/pika.yml index f24ff43c33..0543b19d48 100644 --- a/.github/workflows/pika.yml +++ b/.github/workflows/pika.yml @@ -30,7 +30,7 @@ jobs: - name: install Deps if: ${{ steps.cache.output.cache-hit != 'true' }} run: | - sudo apt install autoconf libprotobuf-dev protobuf-compiler clang-tidy -y + sudo apt install autoconf libprotobuf-dev protobuf-compiler -y - name: Configure CMake # Configure CMake in a 'build' subdirectory. `CMAKE_BUILD_TYPE` is only required if you are using a single-configuration generator such as make. @@ -41,12 +41,64 @@ jobs: # Build your program with the given configuration run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}} - - name: Clang-tidy Check + - name: Test working-directory: ${{github.workspace}}/build - run: make clang-tidy + # Execute tests defined by the CMake configuration. + # See https://cmake.org/cmake/help/latest/manual/ctest.1.html for more detail + run: ctest -C ${{env.BUILD_TYPE}} + + build_on_centos: + # The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac. + # You can convert this to a matrix build if you need cross-platform coverage. + # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix + runs-on: ubuntu-latest + container: + image: centos:7 + + steps: + - name: Install deps + # Configure CMake in a 'build' subdirectory. `CMAKE_BUILD_TYPE` is only required if you are using a single-configuration generator such as make. + # See https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html?highlight=cmake_build_type + run: | + yum install -y wget git autoconf centos-release-scl + yum install -y devtoolset-10-gcc devtoolset-10-gcc-c++ devtoolset-10-make devtoolset-10-bin-util + source /opt/rh/devtoolset-10/enable + gcc --version + make --version + + + - name: Install cmake + run: | + wget https://github.com/Kitware/CMake/releases/download/v3.26.4/cmake-3.26.4-linux-x86_64.sh + bash ./cmake-3.26.4-linux-x86_64.sh --skip-license --prefix=/usr + cmake --version + + - name: checkout + working-directory: ${{github.workspace}} + run: | + echo Fetching $GITHUB_REPOSITORY@$GITHUB_SHA + git init + git fetch --depth 1 https://github.com/$GITHUB_REPOSITORY $GITHUB_SHA + git checkout $GITHUB_SHA + + - name: Configure CMake + # Configure CMake in a 'build' subdirectory. `CMAKE_BUILD_TYPE` is only required if you are using a single-configuration generator such as make. + # See https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html?highlight=cmake_build_type + run: | + source /opt/rh/devtoolset-10/enable + cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DUSE_PIKA_TOOLS=ON + + - name: Build + # Build your program with the given configuration + run: | + cd ${{github.workspace}} + source /opt/rh/devtoolset-10/enable + cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}} - name: Test - working-directory: ${{github.workspace}}/build # Execute tests defined by the CMake configuration. # See https://cmake.org/cmake/help/latest/manual/ctest.1.html for more detail - run: ctest -C ${{env.BUILD_TYPE}} \ No newline at end of file + run: | + cd ${{github.workspace}}/build + source /opt/rh/devtoolset-10/enable + ctest -C ${{env.BUILD_TYPE}} \ No newline at end of file diff --git a/.github/workflows/pika_exporter.yml b/.github/workflows/pika_exporter.yml deleted file mode 100644 index 771b343b0d..0000000000 --- a/.github/workflows/pika_exporter.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Pika_exporter - -on: - push: - branches: [ "unstable" ] - pull_request: - branches: [ "unstable" ] - paths: - - 'tools/pika_exporter/**' - -jobs: - - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: 1.19 - - - name: Build - run: | - cd tools/pika_exporter && make -j - - name: Test - run: | - cd tools/pika_exporter && make -j diff --git a/.github/workflows/tools_go.yml b/.github/workflows/tools_go.yml new file mode 100644 index 0000000000..82d88e2ecc --- /dev/null +++ b/.github/workflows/tools_go.yml @@ -0,0 +1,47 @@ +name: Tools_go_build + +on: + push: + branches: [ "unstable" ] + paths: + - 'tools/**' + pull_request: + branches: [ "unstable" ] + paths: + - 'tools/**' + +jobs: + + build_pika_exporter: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: 1.19 + + - name: Build + run: | + cd tools/pika_exporter && make -j + - name: Test + run: | + cd tools/pika_exporter && make -j + + build_codis2pika: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: 1.19 + + - name: Build + run: | + cd tools/codis2pika && sh build.sh + - name: Test + run: | + cd tools/codis2pika && sh build.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index b9d2de44eb..f178f3eb78 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -89,9 +89,6 @@ if (${AUTOCONF} MATCHES AUTOCONF-NOTFOUND) message(FATAL_ERROR "not find autoconf on localhost") endif() -set(CLANG_SEARCH_PATH "/usr/local/bin" "/usr/bin" "/usr/local/opt/llvm/bin" "/usr/local/opt/llvm@12/bin" - "/usr/local/Cellar/llvm/12.0.1/bin" "/opt/homebrew/opt/llvm@12/bin/") - find_program(CLANG_TIDY_BIN NAMES clang-tidy clang-tidy-12 HINTS ${CLANG_SEARCH_PATH}) @@ -263,7 +260,11 @@ else() set(LIB_GLOG libglog.a) endif() -set(GLOG_LIBRARY ${INSTALL_LIBDIR}/${LIB_GLOG}) +if(${OS_VERSION} MATCHES "CentOS") + set(GLOG_LIBRARY ${INSTALL_LIBDIR_64}/${LIB_GLOG}) +else() + set(GLOG_LIBRARY ${INSTALL_LIBDIR}/${LIB_GLOG}) +endif() set(GLOG_INCLUDE_DIR ${INSTALL_INCLUDEDIR}) ExternalProject_Add(snappy @@ -756,7 +757,7 @@ target_link_libraries(${PROJECT_NAME} storage net pstd - ${LIB_GLOG} + ${GLOG_LIBRARY} librocksdb.a ${LIB_PROTOBUF} ${LIB_GFLAGS} @@ -768,21 +769,6 @@ target_link_libraries(${PROJECT_NAME} ${LIBUNWIND_LIBRARY} ${JEMALLOC_LIBRARY}) -option(ENABLE_IPO "enable interprocedural optimization" ON) -if (ENABLE_IPO) - include(CheckIPOSupported) - check_ipo_supported(RESULT ipo_result OUTPUT ipo_output LANGUAGES CXX) - - if (ipo_result) - set_property(TARGET ${PROJECT_NAME} PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE) - if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - target_link_libraries(${PROJECT_NAME} PUBLIC "-fuse-ld=lld") - endif () - else () - message(WARNING "IPO is not supported: ${ipo_output}") - endif () -endif () - option(USE_SSL "Enable SSL support" OFF) add_custom_target( clang-tidy diff --git a/format_code.sh b/format_code.sh index ccfe19bce6..a24d8084a9 100755 --- a/format_code.sh +++ b/format_code.sh @@ -1,6 +1,6 @@ #!/bin/bash -find include src pika-tools -regex '.*\.\(cpp\|hpp\|c\|h\|cc\)' | xargs clang-format -i +find include src tools -regex '.*\.\(cpp\|hpp\|c\|h\|cc\)' | xargs clang-format -i # If you want to automatically format your code before git commit diff --git a/include/pika_admin.h b/include/pika_admin.h index b855d239dd..1ebdac6182 100644 --- a/include/pika_admin.h +++ b/include/pika_admin.h @@ -22,8 +22,8 @@ class SlaveofCmd : public Cmd { public: SlaveofCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SlaveofCmd(*this); } private: @@ -42,8 +42,8 @@ class DbSlaveofCmd : public Cmd { public: DbSlaveofCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new DbSlaveofCmd(*this); } private: @@ -66,8 +66,8 @@ class AuthCmd : public Cmd { public: AuthCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new AuthCmd(*this); } private: @@ -79,8 +79,8 @@ class BgsaveCmd : public Cmd { public: BgsaveCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new BgsaveCmd(*this); } private: @@ -93,8 +93,8 @@ class CompactCmd : public Cmd { public: CompactCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new CompactCmd(*this); } private: @@ -111,8 +111,8 @@ class PurgelogstoCmd : public Cmd { public: PurgelogstoCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PurgelogstoCmd(*this); } private: @@ -125,8 +125,8 @@ class PingCmd : public Cmd { public: PingCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PingCmd(*this); } private: @@ -137,8 +137,8 @@ class SelectCmd : public Cmd { public: SelectCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SelectCmd(*this); } private: @@ -151,8 +151,8 @@ class FlushallCmd : public Cmd { public: FlushallCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new FlushallCmd(*this); } private: @@ -165,8 +165,8 @@ class FlushdbCmd : public Cmd { public: FlushdbCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new FlushdbCmd(*this); } private: @@ -181,8 +181,8 @@ class ClientCmd : public Cmd { void Do(std::shared_ptr partition = nullptr) override; const static std::string CLIENT_LIST_S; const static std::string CLIENT_KILL_S; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ClientCmd(*this); } private: @@ -210,8 +210,8 @@ class InfoCmd : public Cmd { InfoCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new InfoCmd(*this); } private: @@ -255,8 +255,8 @@ class ShutdownCmd : public Cmd { public: ShutdownCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ShutdownCmd(*this); } private: @@ -267,8 +267,8 @@ class ConfigCmd : public Cmd { public: ConfigCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ConfigCmd(*this); } private: @@ -284,8 +284,8 @@ class MonitorCmd : public Cmd { public: MonitorCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new MonitorCmd(*this); } private: @@ -296,8 +296,8 @@ class DbsizeCmd : public Cmd { public: DbsizeCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new DbsizeCmd(*this); } private: @@ -308,8 +308,8 @@ class TimeCmd : public Cmd { public: TimeCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new TimeCmd(*this); } private: @@ -320,8 +320,8 @@ class DelbackupCmd : public Cmd { public: DelbackupCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new DelbackupCmd(*this); } private: @@ -332,8 +332,8 @@ class EchoCmd : public Cmd { public: EchoCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new EchoCmd(*this); } private: @@ -345,8 +345,8 @@ class ScandbCmd : public Cmd { public: ScandbCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ScandbCmd(*this); } private: @@ -360,8 +360,8 @@ class SlowlogCmd : public Cmd { enum SlowlogCondition { kGET, kLEN, kRESET }; SlowlogCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SlowlogCmd(*this); } private: @@ -378,8 +378,8 @@ class PaddingCmd : public Cmd { public: PaddingCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PaddingCmd(*this); } private: @@ -392,8 +392,8 @@ class PKPatternMatchDelCmd : public Cmd { public: PKPatternMatchDelCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PKPatternMatchDelCmd(*this); } private: @@ -407,8 +407,8 @@ class DummyCmd : public Cmd { DummyCmd() : Cmd("", 0, 0) {} DummyCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new DummyCmd(*this); } private: @@ -419,8 +419,8 @@ class QuitCmd : public Cmd { public: QuitCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new QuitCmd(*this); } private: @@ -431,8 +431,8 @@ class HelloCmd : public Cmd { public: HelloCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HelloCmd(*this); } private: diff --git a/include/pika_binlog.h b/include/pika_binlog.h index f0fad9db56..bcc130d1b3 100644 --- a/include/pika_binlog.h +++ b/include/pika_binlog.h @@ -11,15 +11,16 @@ #include "pstd/include/env.h" #include "pstd/include/pstd_mutex.h" #include "pstd/include/pstd_status.h" +#include "pstd/include/noncopyable.h" #include "include/pika_define.h" std::string NewFileName(const std::string& name, uint32_t current); -class Version { +class Version final : public pstd::noncopyable { public: - Version(pstd::RWFile* save); + Version(const std::shared_ptr& save); ~Version(); pstd::Status Init(); @@ -38,15 +39,13 @@ class Version { std::shared_lock l(rwlock_); printf("Current pro_num %u pro_offset %llu\n", pro_num_, pro_offset_); } - // No copying allowed; - Version(const Version&) = delete; - void operator=(const Version&) = delete; private: - pstd::RWFile* save_ = nullptr; + // shared with versionfile_ + std::shared_ptr save_; }; -class Binlog { +class Binlog : public pstd::noncopyable { public: Binlog(std::string Binlog_path, int file_size = 100 * 1024 * 1024); ~Binlog(); @@ -84,10 +83,6 @@ class Binlog { void Close(); - // No copying allowed - Binlog(const Binlog&) = delete; - void operator=(const Binlog&) = delete; - private: pstd::Status Put(const char* item, int len); static pstd::Status AppendPadding(pstd::WritableFile* file, uint64_t* len); @@ -103,9 +98,10 @@ class Binlog { std::atomic opened_; - Version* version_ = nullptr; - pstd::WritableFile* queue_ = nullptr; - pstd::RWFile* versionfile_ = nullptr; + std::unique_ptr version_; + std::unique_ptr queue_; + // versionfile_ can only be used as a shared_ptr, and it will be used as a variable version_ in the ~Version() function. + std::shared_ptr versionfile_; pstd::Mutex mutex_; diff --git a/include/pika_binlog_reader.h b/include/pika_binlog_reader.h index ab84f8d2c1..9a9d41ad0d 100644 --- a/include/pika_binlog_reader.h +++ b/include/pika_binlog_reader.h @@ -16,13 +16,16 @@ #include "include/pika_binlog.h" +using pstd::Slice; +using pstd::Status; class PikaBinlogReader { public: PikaBinlogReader(uint32_t cur_filenum, uint64_t cur_offset); PikaBinlogReader(); - ~PikaBinlogReader(); - pstd::Status Get(std::string* scratch, uint32_t* filenum, uint64_t* offset); + ~PikaBinlogReader() {}; + + Status Get(std::string* scratch, uint32_t* filenum, uint64_t* offset); int Seek(const std::shared_ptr& logger, uint32_t filenum, uint64_t offset); bool ReadToTheEnd(); void GetReaderStatus(uint32_t* cur_filenum, uint64_t* cur_offset); @@ -39,10 +42,10 @@ class PikaBinlogReader { uint64_t last_record_offset_ = 0; std::shared_ptr logger_; - pstd::SequentialFile* queue_ = nullptr; + std::unique_ptr queue_; - char* const backing_store_; - pstd::Slice buffer_; + std::unique_ptr const backing_store_; + Slice buffer_; }; #endif // PIKA_BINLOG_READER_H_ diff --git a/include/pika_bit.h b/include/pika_bit.h index da458e8535..8afa557167 100644 --- a/include/pika_bit.h +++ b/include/pika_bit.h @@ -23,8 +23,8 @@ class BitGetCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new BitGetCmd(*this); } private: @@ -46,8 +46,8 @@ class BitSetCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new BitSetCmd(*this); } private: @@ -71,8 +71,8 @@ class BitCountCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new BitCountCmd(*this); } private: @@ -98,8 +98,8 @@ class BitPosCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new BitPosCmd(*this); } private: @@ -124,8 +124,8 @@ class BitOpCmd : public Cmd { public: BitOpCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag){}; void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new BitOpCmd(*this); } private: diff --git a/include/pika_client_processor.h b/include/pika_client_processor.h index d18ac6b68e..59059c3a42 100644 --- a/include/pika_client_processor.h +++ b/include/pika_client_processor.h @@ -8,7 +8,7 @@ #include #include - +#include #include "net/include/bg_thread.h" #include "net/include/thread_pool.h" @@ -23,7 +23,7 @@ class PikaClientProcessor { size_t ThreadPoolCurQueueSize(); private: - net::ThreadPool* pool_ = nullptr; - std::vector bg_threads_; + std::unique_ptr pool_; + std::vector> bg_threads_; }; #endif // PIKA_CLIENT_PROCESSOR_H_ diff --git a/include/pika_cmd_table_manager.h b/include/pika_cmd_table_manager.h index cbf8538100..e996953093 100644 --- a/include/pika_cmd_table_manager.h +++ b/include/pika_cmd_table_manager.h @@ -15,7 +15,7 @@ class PikaCmdTableManager { public: PikaCmdTableManager(); - virtual ~PikaCmdTableManager(); + virtual ~PikaCmdTableManager(){}; std::shared_ptr GetCmd(const std::string& opt); uint32_t DistributeKey(const std::string& key, uint32_t partition_num); @@ -25,9 +25,9 @@ class PikaCmdTableManager { void InsertCurrentThreadDistributionMap(); bool CheckCurrentThreadDistributionMapExist(const std::thread::id& tid); - CmdTable* cmds_ = nullptr; + std::unique_ptr cmds_; std::shared_mutex map_protector_; - std::unordered_map thread_distribution_map_; + std::unordered_map> thread_distribution_map_; }; #endif diff --git a/include/pika_command.h b/include/pika_command.h index a9e145d23f..6d9b7238bd 100644 --- a/include/pika_command.h +++ b/include/pika_command.h @@ -8,6 +8,7 @@ #include #include +#include #include "net/include/net_conn.h" #include "net/include/redis_conn.h" @@ -383,7 +384,7 @@ class Cmd : public std::enable_shared_from_this { ProcessArg() = default; ProcessArg(std::shared_ptr _partition, std::shared_ptr _sync_partition, HintKeys _hint_keys) - : partition(std::move(std::move(_partition))), sync_partition(std::move(std::move(_sync_partition))), hint_keys(std::move(std::move(_hint_keys))) {} + : partition(std::move(_partition)), sync_partition(std::move(_sync_partition)), hint_keys(std::move(_hint_keys)) {} std::shared_ptr partition; std::shared_ptr sync_partition; HintKeys hint_keys; @@ -413,6 +414,7 @@ class Cmd : public std::enable_shared_from_this { bool is_admin_require() const; bool is_single_partition() const; bool is_multi_partition() const; + bool is_classic_mode() const; bool HashtagIsConsistent(const std::string& lhs, const std::string& rhs) const; uint64_t GetDoDuration() const { return do_duration_; }; @@ -464,12 +466,11 @@ class Cmd : public std::enable_shared_from_this { Cmd& operator=(const Cmd&); }; -using CmdTable = std::unordered_map; +using CmdTable = std::unordered_map>; // Method for Cmd Table void InitCmdTable(CmdTable* cmd_table); Cmd* GetCmdFromTable(const std::string& opt, const CmdTable& cmd_table); -void DestoryCmdTable(CmdTable* cmd_table); void RedisAppendContent(std::string& str, const std::string& value) { str.append(value.data(), value.size()); diff --git a/include/pika_conf.h b/include/pika_conf.h index 99dd93b92c..c7d9b26a73 100644 --- a/include/pika_conf.h +++ b/include/pika_conf.h @@ -26,7 +26,7 @@ class PikaConf : public pstd::BaseConf { public: PikaConf(const std::string& path); - ~PikaConf() override; + ~PikaConf() override {} // Getter int port() { @@ -141,6 +141,7 @@ class PikaConf : public pstd::BaseConf { std::shared_lock l(rwlock_); return user_blacklist_; } + bool classic_mode() { return classic_mode_.load(); } int databases() { std::shared_lock l(rwlock_); return databases_; @@ -290,7 +291,7 @@ class PikaConf : public pstd::BaseConf { bool daemonize() { return daemonize_; } std::string pidfile() { return pidfile_; } int binlog_file_size() { return binlog_file_size_; } - PikaMeta* local_meta() { return local_meta_; } + PikaMeta* local_meta() { return local_meta_.get(); } std::vector compression_per_level(); static rocksdb::CompressionType GetCompression(const std::string& value); @@ -575,7 +576,7 @@ class PikaConf : public pstd::BaseConf { int64_t blob_cache_ = 0; int64_t blob_num_shard_bits_ = 0; - PikaMeta* local_meta_ = nullptr; + std::unique_ptr local_meta_; std::shared_mutex rwlock_; }; diff --git a/include/pika_consensus.h b/include/pika_consensus.h index e6061add23..d7552f7985 100644 --- a/include/pika_consensus.h +++ b/include/pika_consensus.h @@ -7,8 +7,6 @@ #include -#include - #include "include/pika_binlog_transverter.h" #include "include/pika_client_conn.h" #include "include/pika_define.h" @@ -16,10 +14,9 @@ #include "include/pika_stable_log.h" #include "pstd/include/env.h" -class Context { +class Context : public pstd::noncopyable { public: Context(std::string path); - ~Context(); pstd::Status Init(); // RWLock should be held when access members. @@ -39,13 +36,10 @@ class Context { tmp_stream << " Applied window " << applied_win_.ToStringStatus(); return tmp_stream.str(); } - // No copying allowed; - Context(const Context&) = delete; - void operator=(const Context&) = delete; private: std::string path_; - pstd::RWFile* save_ = nullptr; + std::unique_ptr save_; }; class SyncProgress { @@ -75,7 +69,7 @@ class MemLog { struct LogItem { LogItem(const LogOffset& _offset, std::shared_ptr _cmd_ptr, std::shared_ptr _conn_ptr, std::shared_ptr _resp_ptr) - : offset(_offset), cmd_ptr(std::move(std::move(_cmd_ptr))), conn_ptr(std::move(std::move(_conn_ptr))), resp_ptr(std::move(std::move(_resp_ptr))) {} + : offset(_offset), cmd_ptr(std::move(_cmd_ptr)), conn_ptr(std::move(_conn_ptr)), resp_ptr(std::move(_resp_ptr)) {} LogOffset offset; std::shared_ptr cmd_ptr; std::shared_ptr conn_ptr; @@ -157,7 +151,7 @@ class ConsensusCoordinator { // redis parser cb struct CmdPtrArg { - CmdPtrArg(std::shared_ptr ptr) : cmd_ptr(std::move(std::move(ptr))) {} + CmdPtrArg(std::shared_ptr ptr) : cmd_ptr(std::move(ptr)) {} std::shared_ptr cmd_ptr; }; static int InitCmd(net::RedisParser* parser, const net::RedisCmdArgsType& argv); diff --git a/include/pika_define.h b/include/pika_define.h index bb7ab166fc..0fcb6f26d6 100644 --- a/include/pika_define.h +++ b/include/pika_define.h @@ -183,7 +183,7 @@ const std::string BinlogSyncStateMsg[] = {"NotSync", "ReadFromCache", "ReadFromF struct BinlogChip { LogOffset offset_; std::string binlog_; - BinlogChip(const LogOffset& offset, std::string binlog) : offset_(offset), binlog_(std::move(std::move(binlog))) {} + BinlogChip(const LogOffset& offset, std::string binlog) : offset_(offset), binlog_(std::move(binlog)) {} BinlogChip(const BinlogChip& binlog_chip) { offset_ = binlog_chip.offset_; binlog_ = binlog_chip.binlog_; diff --git a/include/pika_geo.h b/include/pika_geo.h index f21f4ab639..055c1bfd25 100644 --- a/include/pika_geo.h +++ b/include/pika_geo.h @@ -57,8 +57,8 @@ class GeoAddCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new GeoAddCmd(*this); } private: @@ -76,8 +76,8 @@ class GeoPosCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new GeoPosCmd(*this); } private: @@ -95,8 +95,8 @@ class GeoDistCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new GeoDistCmd(*this); } private: @@ -113,8 +113,8 @@ class GeoHashCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new GeoHashCmd(*this); } private: @@ -127,8 +127,8 @@ class GeoRadiusCmd : public Cmd { public: GeoRadiusCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new GeoRadiusCmd(*this); } private: @@ -152,8 +152,8 @@ class GeoRadiusByMemberCmd : public Cmd { public: GeoRadiusByMemberCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new GeoRadiusByMemberCmd(*this); } private: diff --git a/include/pika_geohash.h b/include/pika_geohash.h index 696c623493..2971351f68 100644 --- a/include/pika_geohash.h +++ b/include/pika_geohash.h @@ -46,12 +46,13 @@ extern "C" { #define GEO_STEP_MAX 26 /* 26*2 = 52 bits. */ /* Limits from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ -#define GEO_LAT_MIN (-85.05112878) -#define GEO_LAT_MAX 85.05112878 -#define GEO_LONG_MIN (-180) -#define GEO_LONG_MAX 180 +constexpr double GEO_LAT_MIN{-85.05112878}; +constexpr double GEO_LAT_MAX{85.05112878}; +constexpr int64_t GEO_LONG_MIN{-180}; +constexpr int64_t GEO_LONG_MAX{180}; -using GeoDirection = enum { + +enum GeoDirection { GEOHASH_NORTH = 0, GEOHASH_EAST, GEOHASH_WEST, @@ -67,18 +68,18 @@ struct GeoHashBits { uint8_t step; }; -using GeoHashRange = struct { +struct GeoHashRange { double min; double max; }; -using GeoHashArea = struct { +struct GeoHashArea { GeoHashBits hash; GeoHashRange longitude; GeoHashRange latitude; }; -using GeoHashNeighbors = struct { +struct GeoHashNeighbors { GeoHashBits north; GeoHashBits east; GeoHashBits west; diff --git a/include/pika_geohash_helper.h b/include/pika_geohash_helper.h index a1743206ba..8478ea60ac 100644 --- a/include/pika_geohash_helper.h +++ b/include/pika_geohash_helper.h @@ -53,7 +53,7 @@ int geohashBoundingBox(double longitude, double latitude, double radius_meters, GeoHashRadius geohashGetAreasByRadius(double longitude, double latitude, double radius_meters); GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, double radius_meters); GeoHashRadius geohashGetAreasByRadiusMercator(double longitude, double latitude, double radius_meters); -GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits &hash); +GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits& hash); double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d); int geohashGetDistanceIfInRadius(double x1, double y1, double x2, double y2, double radius, double* distance); int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, double y2, double radius, double* distance); diff --git a/include/pika_hash.h b/include/pika_hash.h index 6e46e11423..48f6025de9 100644 --- a/include/pika_hash.h +++ b/include/pika_hash.h @@ -23,8 +23,8 @@ class HDelCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HDelCmd(*this); } private: @@ -42,8 +42,8 @@ class HGetCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HGetCmd(*this); } private: @@ -60,8 +60,8 @@ class HGetallCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HGetallCmd(*this); } private: @@ -78,8 +78,8 @@ class HSetCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HSetCmd(*this); } private: @@ -96,8 +96,8 @@ class HExistsCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HExistsCmd(*this); } private: @@ -114,8 +114,8 @@ class HIncrbyCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HIncrbyCmd(*this); } private: @@ -133,8 +133,8 @@ class HIncrbyfloatCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HIncrbyfloatCmd(*this); } private: @@ -151,8 +151,8 @@ class HKeysCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HKeysCmd(*this); } private: @@ -169,8 +169,8 @@ class HLenCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HLenCmd(*this); } private: @@ -187,8 +187,8 @@ class HMgetCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HMgetCmd(*this); } private: @@ -206,8 +206,8 @@ class HMsetCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HMsetCmd(*this); } private: @@ -225,8 +225,8 @@ class HSetnxCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HSetnxCmd(*this); } private: @@ -243,8 +243,8 @@ class HStrlenCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HStrlenCmd(*this); } private: @@ -261,8 +261,8 @@ class HValsCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HValsCmd(*this); } private: @@ -279,8 +279,8 @@ class HScanCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HScanCmd(*this); } private: @@ -302,8 +302,8 @@ class HScanxCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new HScanxCmd(*this); } private: @@ -326,8 +326,8 @@ class PKHScanRangeCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PKHScanRangeCmd(*this); } private: @@ -353,8 +353,8 @@ class PKHRScanRangeCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PKHRScanRangeCmd(*this); } private: diff --git a/include/pika_hyperloglog.h b/include/pika_hyperloglog.h index 9405c866ea..54ae14f25b 100644 --- a/include/pika_hyperloglog.h +++ b/include/pika_hyperloglog.h @@ -21,8 +21,8 @@ class PfAddCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PfAddCmd(*this); } private: @@ -36,8 +36,8 @@ class PfCountCmd : public Cmd { public: PfCountCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PfCountCmd(*this); } private: @@ -50,8 +50,8 @@ class PfMergeCmd : public Cmd { public: PfMergeCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PfMergeCmd(*this); } private: diff --git a/include/pika_kv.h b/include/pika_kv.h index 45dc32e643..15b00dc992 100644 --- a/include/pika_kv.h +++ b/include/pika_kv.h @@ -24,8 +24,8 @@ class SetCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SetCmd(*this); } private: @@ -54,8 +54,8 @@ class GetCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new GetCmd(*this); } private: @@ -87,8 +87,8 @@ class IncrCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new IncrCmd(*this); } private: @@ -106,8 +106,8 @@ class IncrbyCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new IncrbyCmd(*this); } private: @@ -125,8 +125,8 @@ class IncrbyfloatCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new IncrbyfloatCmd(*this); } private: @@ -144,8 +144,8 @@ class DecrCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new DecrCmd(*this); } private: @@ -163,8 +163,8 @@ class DecrbyCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new DecrbyCmd(*this); } private: @@ -182,8 +182,8 @@ class GetsetCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new GetsetCmd(*this); } private: @@ -201,8 +201,8 @@ class AppendCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new AppendCmd(*this); } private: @@ -230,8 +230,8 @@ class KeysCmd : public Cmd { public: KeysCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag), type_(storage::DataType::kAll) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new KeysCmd(*this); } private: @@ -250,8 +250,8 @@ class SetnxCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SetnxCmd(*this); } private: @@ -272,8 +272,8 @@ class SetexCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SetexCmd(*this); } private: @@ -294,8 +294,8 @@ class PsetexCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PsetexCmd(*this); } private: @@ -316,8 +316,8 @@ class DelvxCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new DelvxCmd(*this); } private: @@ -351,8 +351,8 @@ class MsetnxCmd : public Cmd { public: MsetnxCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new MsetnxCmd(*this); } private: @@ -370,8 +370,8 @@ class GetrangeCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new GetrangeCmd(*this); } private: @@ -390,8 +390,8 @@ class SetrangeCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SetrangeCmd(*this); } private: @@ -410,8 +410,8 @@ class StrlenCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new StrlenCmd(*this); } private: @@ -443,8 +443,8 @@ class ExpireCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ExpireCmd(*this); } private: @@ -464,8 +464,8 @@ class PexpireCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PexpireCmd(*this); } private: @@ -485,8 +485,8 @@ class ExpireatCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ExpireatCmd(*this); } private: @@ -504,8 +504,8 @@ class PexpireatCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PexpireatCmd(*this); } private: @@ -525,8 +525,8 @@ class TtlCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new TtlCmd(*this); } private: @@ -543,8 +543,8 @@ class PttlCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PttlCmd(*this); } private: @@ -561,8 +561,8 @@ class PersistCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PersistCmd(*this); } private: @@ -579,8 +579,8 @@ class TypeCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new TypeCmd(*this); } private: @@ -592,8 +592,8 @@ class ScanCmd : public Cmd { public: ScanCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag), pattern_("*") {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ScanCmd(*this); } private: @@ -611,8 +611,8 @@ class ScanxCmd : public Cmd { public: ScanxCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag), pattern_("*") {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ScanxCmd(*this); } private: @@ -636,8 +636,8 @@ class PKSetexAtCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PKSetexAtCmd(*this); } private: @@ -658,8 +658,8 @@ class PKScanRangeCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PKScanRangeCmd(*this); } private: @@ -687,8 +687,8 @@ class PKRScanRangeCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PKRScanRangeCmd(*this); } private: diff --git a/include/pika_list.h b/include/pika_list.h index e88065d7f1..765047051f 100644 --- a/include/pika_list.h +++ b/include/pika_list.h @@ -23,8 +23,8 @@ class LIndexCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new LIndexCmd(*this); } private: @@ -43,8 +43,8 @@ class LInsertCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new LInsertCmd(*this); } private: @@ -64,8 +64,8 @@ class LLenCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new LLenCmd(*this); } private: @@ -82,8 +82,8 @@ class LPopCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new LPopCmd(*this); } private: @@ -100,8 +100,8 @@ class LPushCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new LPushCmd(*this); } private: @@ -120,8 +120,8 @@ class LPushxCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new LPushxCmd(*this); } private: @@ -139,8 +139,8 @@ class LRangeCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new LRangeCmd(*this); } private: @@ -159,8 +159,8 @@ class LRemCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new LRemCmd(*this); } private: @@ -179,8 +179,8 @@ class LSetCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new LSetCmd(*this); } private: @@ -199,8 +199,8 @@ class LTrimCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new LTrimCmd(*this); } private: @@ -219,8 +219,8 @@ class RPopCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new RPopCmd(*this); } private: @@ -237,8 +237,8 @@ class RPopLPushCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new RPopLPushCmd(*this); } private: @@ -256,8 +256,8 @@ class RPushCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new RPushCmd(*this); } private: @@ -276,8 +276,8 @@ class RPushxCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new RPushxCmd(*this); } private: diff --git a/include/pika_meta.h b/include/pika_meta.h index e810ad5b0f..252d527e62 100644 --- a/include/pika_meta.h +++ b/include/pika_meta.h @@ -7,13 +7,14 @@ #define PIKA_META #include + #include "pstd/include/env.h" #include "pstd/include/pstd_mutex.h" #include "include/pika_define.h" -class PikaMeta { +class PikaMeta : public pstd::noncopyable { public: PikaMeta() = default; ~PikaMeta() = default; @@ -23,9 +24,6 @@ class PikaMeta { pstd::Status StableSave(const std::vector& table_structs); pstd::Status ParseMeta(std::vector* table_structs); - // No copying allowed; - PikaMeta(const PikaMeta&) = delete; - void operator=(const PikaMeta&) = delete; private: std::shared_mutex rwlock_; std::string local_meta_path_; diff --git a/include/pika_partition.h b/include/pika_partition.h index c09c02d134..27e7c9c584 100644 --- a/include/pika_partition.h +++ b/include/pika_partition.h @@ -7,7 +7,9 @@ #define PIKA_PARTITION_H_ #include + #include "pstd/include/scope_record_lock.h" + #include "storage/backupable.h" #include "storage/storage.h" @@ -24,10 +26,8 @@ struct KeyScanInfo { int32_t duration = -3; std::vector key_infos; // the order is strings, hashes, lists, zsets, sets bool key_scaning_ = false; - KeyScanInfo() - : + KeyScanInfo() : s_start_time("0"), - key_infos({{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}) {} }; @@ -62,7 +62,7 @@ class Partition : public std::enable_shared_from_this { void DbRWLockReader(); void DbRWUnLock(); - pstd::lock::LockMgr* LockMgr(); + std::shared_ptr LockMgr(); void PrepareRsync(); bool TryUpdateMasterOffset(); @@ -103,7 +103,8 @@ class Partition : public std::enable_shared_from_this { bool opened_ = false; std::shared_mutex db_rwlock_; - pstd::lock::LockMgr* lock_mgr_ = nullptr; + // class may be shared, using shared_ptr would be a better choice + std::shared_ptr lock_mgr_; std::shared_ptr db_; bool full_sync_ = false; @@ -122,7 +123,7 @@ class Partition : public std::enable_shared_from_this { void FinishBgsave(); BgSaveInfo bgsave_info_; pstd::Mutex bgsave_protector_; - storage::BackupEngine* bgsave_engine_; + std::shared_ptr bgsave_engine_; // key scan info use void InitKeyScan(); @@ -130,3 +131,4 @@ class Partition : public std::enable_shared_from_this { }; #endif + diff --git a/include/pika_pubsub.h b/include/pika_pubsub.h index 61ccb6571e..2cc206adeb 100644 --- a/include/pika_pubsub.h +++ b/include/pika_pubsub.h @@ -15,8 +15,8 @@ class PublishCmd : public Cmd { public: PublishCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PublishCmd(*this); } private: @@ -29,8 +29,8 @@ class SubscribeCmd : public Cmd { public: SubscribeCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SubscribeCmd(*this); } private: @@ -41,8 +41,8 @@ class UnSubscribeCmd : public Cmd { public: UnSubscribeCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new UnSubscribeCmd(*this); } private: @@ -53,8 +53,8 @@ class PUnSubscribeCmd : public Cmd { public: PUnSubscribeCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PUnSubscribeCmd(*this); } private: @@ -65,8 +65,8 @@ class PSubscribeCmd : public Cmd { public: PSubscribeCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PSubscribeCmd(*this); } private: @@ -77,8 +77,8 @@ class PubSubCmd : public Cmd { public: PubSubCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new PubSubCmd(*this); } private: diff --git a/include/pika_repl_client.h b/include/pika_repl_client.h index 14d2d315ff..9a2f9a70b9 100644 --- a/include/pika_repl_client.h +++ b/include/pika_repl_client.h @@ -29,7 +29,7 @@ struct ReplClientTaskArg { std::shared_ptr res; std::shared_ptr conn; ReplClientTaskArg(std::shared_ptr _res, std::shared_ptr _conn) - : res(std::move(std::move(_res))), conn(std::move(std::move(_conn))) {} + : res(std::move(_res)), conn(std::move(_conn)) {} }; struct ReplClientWriteBinlogTaskArg { @@ -37,9 +37,10 @@ struct ReplClientWriteBinlogTaskArg { std::shared_ptr conn; void* res_private_data; PikaReplBgWorker* worker; - ReplClientWriteBinlogTaskArg(std::shared_ptr _res, - std::shared_ptr _conn, void* _res_private_data, PikaReplBgWorker* _worker) - : res(std::move(_res)), conn(std::move(std::move(_conn))), res_private_data(_res_private_data), worker(_worker) {} + ReplClientWriteBinlogTaskArg(const std::shared_ptr& _res, + const std::shared_ptr& _conn, + void* _res_private_data, PikaReplBgWorker* _worker) + : res(std::move(_res)), conn(std::move(_conn)), res_private_data(_res_private_data), worker(_worker) {} }; struct ReplClientWriteDBTaskArg { @@ -88,10 +89,10 @@ class PikaReplClient { size_t GetHashIndex(const std::string& key, bool upper_half); void UpdateNextAvail() { next_avail_ = (next_avail_ + 1) % bg_workers_.size(); } - PikaReplClientThread* client_thread_; + std::unique_ptr client_thread_; int next_avail_ = 0; std::hash str_hash; - std::vector bg_workers_; + std::vector> bg_workers_; }; #endif diff --git a/include/pika_repl_client_conn.h b/include/pika_repl_client_conn.h index 838a8c343a..5d8fa7b1cd 100644 --- a/include/pika_repl_client_conn.h +++ b/include/pika_repl_client_conn.h @@ -43,7 +43,7 @@ class PikaReplClientConn : public net::PbConn { std::shared_ptr resp; std::shared_ptr conn; ReplRespArg(std::shared_ptr _resp, std::shared_ptr _conn) - : resp(std::move(std::move(_resp))), conn(std::move(std::move(_conn))) {} + : resp(std::move(_resp)), conn(std::move(_conn)) {} }; }; diff --git a/include/pika_repl_client_thread.h b/include/pika_repl_client_thread.h index b21ac50023..92c29ee0a2 100644 --- a/include/pika_repl_client_thread.h +++ b/include/pika_repl_client_thread.h @@ -45,7 +45,7 @@ class PikaReplClientThread : public net::ClientThread { } int CreateWorkerSpecificData(void** data) const override { return 0; } int DeleteWorkerSpecificData(void* data) const override { return 0; } - void DestConnectFailedHandle(const std::string &ip_port, const std::string &reason) const override {} + void DestConnectFailedHandle(const std::string& ip_port, const std::string& reason) const override {} }; ReplClientConnFactory conn_factory_; diff --git a/include/pika_repl_server.h b/include/pika_repl_server.h index 8cf79a6428..68daf1ee4d 100644 --- a/include/pika_repl_server.h +++ b/include/pika_repl_server.h @@ -20,7 +20,7 @@ struct ReplServerTaskArg { std::shared_ptr req; std::shared_ptr conn; ReplServerTaskArg(std::shared_ptr _req, std::shared_ptr _conn) - : req(std::move(std::move(_req))), conn(std::move(std::move(_conn))) {} + : req(std::move(_req)), conn(std::move(_conn)) {} }; class PikaReplServer { @@ -42,8 +42,8 @@ class PikaReplServer { void KillAllConns(); private: - net::ThreadPool* server_tp_ = nullptr; - PikaReplServerThread* pika_repl_server_thread_ = nullptr; + std::unique_ptr server_tp_ = nullptr; + std::unique_ptr pika_repl_server_thread_ = nullptr; std::shared_mutex client_conn_rwlock_; std::map client_conn_map_; diff --git a/include/pika_rm.h b/include/pika_rm.h index 6e455b7227..a7d77fb711 100644 --- a/include/pika_rm.h +++ b/include/pika_rm.h @@ -166,7 +166,7 @@ class SyncSlavePartition : public SyncPartition { class PikaReplicaManager { public: PikaReplicaManager(); - ~PikaReplicaManager(); + ~PikaReplicaManager(){}; friend Cmd; @@ -221,7 +221,7 @@ class PikaReplicaManager { pstd::Status LostConnection(const std::string& ip, int port); // Update binlog win and try to send next binlog - pstd::Status UpdateSyncBinlogStatus(const RmNode& slave, const LogOffset& range_start, const LogOffset& range_end); + pstd::Status UpdateSyncBinlogStatus(const RmNode& slave, const LogOffset& offset_start, const LogOffset& offset_end); pstd::Status WakeUpBinlogSync(); @@ -254,8 +254,8 @@ class PikaReplicaManager { // every host owns a queue, the key is "ip+port" std::unordered_map>> write_queues_; - PikaReplClient* pika_repl_client_ = nullptr; - PikaReplServer* pika_repl_server_ = nullptr; + std::unique_ptr pika_repl_client_; + std::unique_ptr pika_repl_server_; }; #endif // PIKA_RM_H diff --git a/include/pika_rsync_service.h b/include/pika_rsync_service.h index 6171205405..a5c2bdf1e7 100644 --- a/include/pika_rsync_service.h +++ b/include/pika_rsync_service.h @@ -10,7 +10,7 @@ class PikaRsyncService { public: - PikaRsyncService(std::string raw_path, int port); + PikaRsyncService(const std::string& raw_path, int port); ~PikaRsyncService(); int StartRsync(); bool CheckRsyncAlive(); diff --git a/include/pika_server.h b/include/pika_server.h index c43a50f42f..691a0fea08 100644 --- a/include/pika_server.h +++ b/include/pika_server.h @@ -110,7 +110,7 @@ static std::set ShardingModeNotSupportCommands{kCmdNameMsetnx, kCmdNameSlaveof, kCmdNameDbSlaveof}; -extern PikaConf* g_pika_conf; +extern std::unique_ptr g_pika_conf; enum TaskType { kCompactAll, @@ -126,7 +126,7 @@ enum TaskType { kBgSave, }; -class PikaServer { +class PikaServer : public pstd::noncopyable { public: PikaServer(); ~PikaServer(); @@ -160,8 +160,8 @@ class PikaServer { * Table use */ void InitTableStruct(); - pstd::Status AddTableStruct(const std::string &table_name, uint32_t num); - pstd::Status DelTableStruct(const std::string &table_name); + pstd::Status AddTableStruct(const std::string& table_name, uint32_t num); + pstd::Status DelTableStruct(const std::string& table_name); std::shared_ptr GetTable(const std::string& table_name); std::set GetTablePartitionIds(const std::string& table_name); bool IsBgSaving(); @@ -332,8 +332,6 @@ class PikaServer { friend class PikaReplClientConn; friend class PkClusterInfoCmd; - PikaServer(PikaServer& ps) = delete; - void operator=(const PikaServer& ps) = delete; private: /* * TimingTask use @@ -371,8 +369,8 @@ class PikaServer { * Communicate with the client used */ int worker_num_ = 0; - PikaClientProcessor* pika_client_processor_ = nullptr; - PikaDispatchThread* pika_dispatch_thread_ = nullptr; + std::unique_ptr pika_client_processor_; + std::unique_ptr pika_dispatch_thread_ = nullptr; /* * Slave used @@ -412,22 +410,22 @@ class PikaServer { /* * Monitor used */ - PikaMonitorThread* pika_monitor_thread_ = nullptr; + std::unique_ptr pika_monitor_thread_; /* * Rsync used */ - PikaRsyncService* pika_rsync_service_ = nullptr; + std::unique_ptr pika_rsync_service_; /* * Pubsub used */ - net::PubSubThread* pika_pubsub_thread_ = nullptr; + std::unique_ptr pika_pubsub_thread_; /* * Communication used */ - PikaAuxiliaryThread* pika_auxiliary_thread_ = nullptr; + std::unique_ptr pika_auxiliary_thread_; /* * Slowlog used diff --git a/include/pika_set.h b/include/pika_set.h index c5fa019d94..94c2e6fc75 100644 --- a/include/pika_set.h +++ b/include/pika_set.h @@ -1,4 +1,4 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. + // Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. @@ -21,8 +21,8 @@ class SAddCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SAddCmd(*this); } private: @@ -40,8 +40,8 @@ class SPopCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SPopCmd(*this); } private: @@ -59,8 +59,8 @@ class SCardCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SCardCmd(*this); } private: @@ -77,8 +77,8 @@ class SMembersCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SMembersCmd(*this); } private: @@ -95,13 +95,14 @@ class SScanCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SScanCmd(*this); } private: std::string key_, pattern_ = "*"; - int64_t cursor_ = 0, count_ = 10; + int64_t cursor_ = 0; + int64_t count_ = 10; void DoInitial() override; void Clear() override { pattern_ = "*"; @@ -118,8 +119,8 @@ class SRemCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SRemCmd(*this); } private: @@ -132,8 +133,8 @@ class SUnionCmd : public Cmd { public: SUnionCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SUnionCmd(*this); } private: @@ -145,8 +146,8 @@ class SUnionstoreCmd : public Cmd { public: SUnionstoreCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SUnionstoreCmd(*this); } private: @@ -159,8 +160,8 @@ class SInterCmd : public Cmd { public: SInterCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SInterCmd(*this); } private: @@ -172,8 +173,8 @@ class SInterstoreCmd : public Cmd { public: SInterstoreCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SInterstoreCmd(*this); } private: @@ -191,8 +192,8 @@ class SIsmemberCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SIsmemberCmd(*this); } private: @@ -204,8 +205,8 @@ class SDiffCmd : public Cmd { public: SDiffCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SDiffCmd(*this); } private: @@ -217,8 +218,8 @@ class SDiffstoreCmd : public Cmd { public: SDiffstoreCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SDiffstoreCmd(*this); } private: @@ -231,8 +232,8 @@ class SMoveCmd : public Cmd { public: SMoveCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SMoveCmd(*this); } private: @@ -249,8 +250,8 @@ class SRandmemberCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new SRandmemberCmd(*this); } private: diff --git a/include/pika_table.h b/include/pika_table.h index edff3b451b..3deb19276a 100644 --- a/include/pika_table.h +++ b/include/pika_table.h @@ -7,12 +7,13 @@ #define PIKA_TABLE_H_ #include + #include "storage/storage.h" #include "include/pika_command.h" #include "include/pika_partition.h" -class Table : public std::enable_shared_from_this
{ +class Table : public std::enable_shared_from_this
, public pstd::noncopyable { public: Table(std::string table_name, uint32_t partition_num, const std::string& db_path, const std::string& log_path); virtual ~Table(); @@ -56,11 +57,6 @@ class Table : public std::enable_shared_from_this
{ pstd::Status MovetoToTrash(const std::string& path); pstd::Status Leave(); - /* - * No allowed copy and copy assign - */ - Table(const Table&) = delete; - void operator=(const Table&) = delete; private: std::string table_name_; uint32_t partition_num_ = 0; diff --git a/include/pika_zset.h b/include/pika_zset.h index 781781bd4c..181035a43a 100644 --- a/include/pika_zset.h +++ b/include/pika_zset.h @@ -23,8 +23,8 @@ class ZAddCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZAddCmd(*this); } private: @@ -42,8 +42,8 @@ class ZCardCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZCardCmd(*this); } private: @@ -60,8 +60,8 @@ class ZScanCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZScanCmd(*this); } private: @@ -83,8 +83,8 @@ class ZIncrbyCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZIncrbyCmd(*this); } private: @@ -114,8 +114,8 @@ class ZRangeCmd : public ZsetRangeParentCmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZRangeCmd(*this); } private: @@ -131,8 +131,8 @@ class ZRevrangeCmd : public ZsetRangeParentCmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZRevrangeCmd(*this); } private: @@ -167,8 +167,8 @@ class ZRangebyscoreCmd : public ZsetRangebyscoreParentCmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZRangebyscoreCmd(*this); } private: @@ -185,8 +185,8 @@ class ZRevrangebyscoreCmd : public ZsetRangebyscoreParentCmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZRevrangebyscoreCmd(*this); } private: @@ -203,8 +203,8 @@ class ZCountCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZCountCmd(*this); } private: @@ -227,8 +227,8 @@ class ZRemCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZRemCmd(*this); } private: @@ -256,8 +256,8 @@ class ZUnionstoreCmd : public ZsetUIstoreParentCmd { public: ZUnionstoreCmd(const std::string& name, int arity, uint16_t flag) : ZsetUIstoreParentCmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZUnionstoreCmd(*this); } private: @@ -268,8 +268,8 @@ class ZInterstoreCmd : public ZsetUIstoreParentCmd { public: ZInterstoreCmd(const std::string& name, int arity, uint16_t flag) : ZsetUIstoreParentCmd(name, arity, flag) {} void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZInterstoreCmd(*this); } private: @@ -294,8 +294,8 @@ class ZRankCmd : public ZsetRankParentCmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZRankCmd(*this); } private: @@ -311,8 +311,8 @@ class ZRevrankCmd : public ZsetRankParentCmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZRevrankCmd(*this); } private: @@ -328,8 +328,8 @@ class ZScoreCmd : public ZsetRankParentCmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZScoreCmd(*this); } private: @@ -363,8 +363,8 @@ class ZRangebylexCmd : public ZsetRangebylexParentCmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZRangebylexCmd(*this); } private: @@ -380,8 +380,8 @@ class ZRevrangebylexCmd : public ZsetRangebylexParentCmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZRevrangebylexCmd(*this); } private: @@ -398,8 +398,8 @@ class ZLexcountCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZLexcountCmd(*this); } private: @@ -418,8 +418,8 @@ class ZRemrangebyrankCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZRemrangebyrankCmd(*this); } private: @@ -438,8 +438,8 @@ class ZRemrangebyscoreCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZRemrangebyscoreCmd(*this); } private: @@ -460,8 +460,8 @@ class ZRemrangebylexCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZRemrangebylexCmd(*this); } private: @@ -481,8 +481,8 @@ class ZPopmaxCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZPopmaxCmd(*this); } private: @@ -500,8 +500,8 @@ class ZPopminCmd : public Cmd { return res; } void Do(std::shared_ptr partition = nullptr) override; - void Split(std::shared_ptr partition, const HintKeys& hint_keys) override{}; - void Merge() override{}; + void Split(std::shared_ptr partition, const HintKeys& hint_keys) override {}; + void Merge() override {}; Cmd* Clone() override { return new ZPopminCmd(*this); } private: diff --git a/pikatests.sh b/pikatests.sh new file mode 100755 index 0000000000..84439a8191 --- /dev/null +++ b/pikatests.sh @@ -0,0 +1,10 @@ +#!/bin/bash +rm -rf ./log +rm -rf .db +cp output/pika src/redis-server +cp conf/pika.conf tests/assets/default.conf + +tclsh tests/test_helper.tcl --clients 1 --single unit/$1 +rm src/redis-server +rm -rf ./log +rm -rf ./db \ No newline at end of file diff --git a/src/net/examples/bg_thread.cc b/src/net/examples/bg_thread.cc index 14e3ce3fd1..a8bc75c2bc 100644 --- a/src/net/examples/bg_thread.cc +++ b/src/net/examples/bg_thread.cc @@ -14,12 +14,12 @@ using namespace std; static pstd::Mutex print_lock; void task(void* arg) { + std::unique_ptr int_arg(static_cast(arg)); { std::lock_guard l(print_lock); - std::cout << " task : " << *((int*)arg) << std::endl; + std::cout << " task : " << *int_arg << std::endl; } sleep(1); - delete (int*)arg; } struct TimerItem { diff --git a/src/net/examples/binlog_parser_test.cc b/src/net/examples/binlog_parser_test.cc index 83594eb9fa..9077db4c17 100644 --- a/src/net/examples/binlog_parser_test.cc +++ b/src/net/examples/binlog_parser_test.cc @@ -16,7 +16,7 @@ int main(int argc, char* argv[]) { std::string ip(argv[1]); int port = atoi(argv[2]); - NetCli* rcli = NewRedisCli(); + std::unique_ptr rcli(NewRedisCli()); rcli->set_connect_timeout(3000); Status s = rcli->Connect(ip, port, "127.0.0.1"); diff --git a/src/net/examples/http_server.cc b/src/net/examples/http_server.cc index 04d989b96e..634083a43f 100644 --- a/src/net/examples/http_server.cc +++ b/src/net/examples/http_server.cc @@ -96,8 +96,8 @@ int main(int argc, char* argv[]) { SignalSetup(); - ConnFactory* my_conn_factory = new MyConnFactory(); - ServerThread* st = NewDispatchThread(port, 4, my_conn_factory, 1000); + std::unique_ptr my_conn_factory = std::make_unique(); + std::unique_ptr st(NewDispatchThread(port, 4, my_conn_factory.get(), 1000)); if (st->StartThread() != 0) { printf("StartThread error happened!\n"); @@ -109,8 +109,5 @@ int main(int argc, char* argv[]) { } st->StopThread(); - delete st; - delete my_conn_factory; - return 0; } diff --git a/src/net/examples/https_server.cc b/src/net/examples/https_server.cc index 0a592cea04..7b7243a825 100644 --- a/src/net/examples/https_server.cc +++ b/src/net/examples/https_server.cc @@ -97,8 +97,8 @@ int main(int argc, char* argv[]) { SignalSetup(); - ConnFactory* my_conn_factory = new MyConnFactory(); - ServerThread* st = NewDispatchThread(port, 4, my_conn_factory, 1000); + std::unique_ptr my_conn_factory = std::make_unique(); + std::unique_ptr st(NewDispatchThread(port, 4, my_conn_factory.get(), 1000)); #if __ENABLE_SSL if (st->EnableSecurity("/complete_path_to/host.crt", "/complete_path_to/host.key") != 0) { @@ -117,8 +117,5 @@ int main(int argc, char* argv[]) { } st->StopThread(); - delete st; - delete my_conn_factory; - return 0; } diff --git a/src/net/examples/mydispatch_srv.cc b/src/net/examples/mydispatch_srv.cc index 1618c4718c..23fc591b91 100644 --- a/src/net/examples/mydispatch_srv.cc +++ b/src/net/examples/mydispatch_srv.cc @@ -75,8 +75,8 @@ static void SignalSetup() { int main() { SignalSetup(); - ConnFactory* my_conn_factory = new MyConnFactory(); - ServerThread* st = NewDispatchThread(9211, 10, my_conn_factory, 1000); + std::unique_ptr my_conn_factory = std::make_unique(); + std::unique_ptr st(NewDispatchThread(9211, 10, my_conn_factory.get(), 1000)); if (st->StartThread() != 0) { printf("StartThread error happened!\n"); @@ -88,8 +88,5 @@ int main() { } st->StopThread(); - delete st; - delete my_conn_factory; - return 0; } diff --git a/src/net/examples/myholy_srv.cc b/src/net/examples/myholy_srv.cc index e82a593f58..27607e9a4e 100644 --- a/src/net/examples/myholy_srv.cc +++ b/src/net/examples/myholy_srv.cc @@ -80,9 +80,9 @@ int main(int argc, char* argv[]) { SignalSetup(); - ConnFactory* conn_factory = new MyConnFactory(); + std::unique_ptr conn_factory = std::make_unique(); - ServerThread* my_thread = NewHolyThread(my_port, conn_factory); + std::unique_ptr my_thread(NewHolyThread(my_port, conn_factory.get())); if (my_thread->StartThread() != 0) { printf("StartThread error happened!\n"); exit(-1); @@ -93,8 +93,5 @@ int main(int argc, char* argv[]) { } my_thread->StopThread(); - delete my_thread; - delete conn_factory; - return 0; } diff --git a/src/net/examples/myholy_srv_chandle.cc b/src/net/examples/myholy_srv_chandle.cc index 4d64e24120..a6f6b6cd97 100644 --- a/src/net/examples/myholy_srv_chandle.cc +++ b/src/net/examples/myholy_srv_chandle.cc @@ -107,7 +107,7 @@ int main(int argc, char* argv[]) { MyConnFactory conn_factory; MyServerHandle handle; - ServerThread* my_thread = NewHolyThread(my_port, &conn_factory, 1000, &handle); + std::unique_ptr my_thread(NewHolyThread(my_port, &conn_factory, 1000, &handle)); if (my_thread->StartThread() != 0) { printf("StartThread error happened!\n"); exit(-1); @@ -118,7 +118,5 @@ int main(int argc, char* argv[]) { } my_thread->StopThread(); - delete my_thread; - return 0; } diff --git a/src/net/examples/myproto_cli.cc b/src/net/examples/myproto_cli.cc index 898a56eb94..881b2b4f74 100644 --- a/src/net/examples/myproto_cli.cc +++ b/src/net/examples/myproto_cli.cc @@ -2,6 +2,7 @@ #include #include #include +#include #include "myproto.pb.h" #include "net/include/net_cli.h" @@ -18,7 +19,7 @@ int main(int argc, char* argv[]) { std::string ip(argv[1]); int port = atoi(argv[2]); - NetCli* cli = NewPbCli(); + std::unique_ptr cli(NewPbCli()); Status s = cli->Connect(ip, port); if (!s.ok()) { @@ -48,6 +49,5 @@ int main(int argc, char* argv[]) { } cli->Close(); - delete cli; return 0; } diff --git a/src/net/examples/myredis_cli.cc b/src/net/examples/myredis_cli.cc index 80ca33fda2..2fb053d076 100644 --- a/src/net/examples/myredis_cli.cc +++ b/src/net/examples/myredis_cli.cc @@ -29,7 +29,7 @@ MyConn::MyConn(int fd, const std::string& ip_port, Thread* thread, void* worker_ // Handle worker_specific_data ... } -ClientThread* client; +std::unique_ptr client; int sendto_port; int MyConn::DealMessage(const RedisCmdArgsType& argv, std::string* response) { sleep(1); @@ -95,10 +95,11 @@ int main(int argc, char* argv[]) { SignalSetup(); - ConnFactory* conn_factory = new MyConnFactory(); + std::unique_ptr conn_factory = std::make_unique(); + //"handle" will be deleted within "client->StopThread()" ClientHandle* handle = new ClientHandle(); - client = new ClientThread(conn_factory, 3000, 60, handle, nullptr); + client = std::make_unique(conn_factory.get(), 3000, 60, handle, nullptr); if (client->StartThread() != 0) { printf("StartThread error happened!\n"); @@ -107,11 +108,10 @@ int main(int argc, char* argv[]) { running.store(true); while (running.load()) { sleep(1); - DoCronWork(client, sendto_port); + DoCronWork(client.get(), sendto_port); } client->StopThread(); - delete client; - delete conn_factory; + client.reset(); return 0; } diff --git a/src/net/examples/myredis_srv.cc b/src/net/examples/myredis_srv.cc index bedc6c9e08..6672a412bb 100644 --- a/src/net/examples/myredis_srv.cc +++ b/src/net/examples/myredis_srv.cc @@ -96,9 +96,10 @@ int main(int argc, char* argv[]) { SignalSetup(); - ConnFactory* conn_factory = new MyConnFactory(); + std::unique_ptr conn_factory = std::make_unique(); - ServerThread* my_thread = new HolyThread(my_port, conn_factory, 1000, nullptr, false); + std::unique_ptr my_thread = + std::make_unique(my_port, conn_factory.get(), 1000, nullptr, false); if (my_thread->StartThread() != 0) { printf("StartThread error happened!\n"); exit(-1); @@ -109,8 +110,5 @@ int main(int argc, char* argv[]) { } my_thread->StopThread(); - delete my_thread; - delete conn_factory; - return 0; } diff --git a/src/net/examples/performance/client.cc b/src/net/examples/performance/client.cc index 3f73577ef0..a408b03308 100644 --- a/src/net/examples/performance/client.cc +++ b/src/net/examples/performance/client.cc @@ -18,7 +18,7 @@ int main(int argc, char* argv[]) { std::string ip(argv[1]); int port = atoi(argv[2]); - NetCli* cli = NewPbCli(); + std::unique_ptr cli(NewPbCli()); Status s = cli->Connect(ip, port); if (!s.ok()) { @@ -44,6 +44,5 @@ int main(int argc, char* argv[]) { } cli->Close(); - delete cli; return 0; } diff --git a/src/net/examples/performance/server.cc b/src/net/examples/performance/server.cc index 245457bd16..5b7b65cbc7 100644 --- a/src/net/examples/performance/server.cc +++ b/src/net/examples/performance/server.cc @@ -84,7 +84,7 @@ int main(int argc, char* argv[]) { SignalSetup(); - ServerThread* st_thread = NewDispatchThread(ip, port, 24, &conn_factory, 1000); + std::unique_ptr st_thread(NewDispatchThread(ip, port, 24, &conn_factory, 1000)); st_thread->StartThread(); uint64_t st, ed; @@ -99,7 +99,5 @@ int main(int argc, char* argv[]) { } st_thread->StopThread(); - delete st_thread; - return 0; } diff --git a/src/net/examples/redis_cli_test.cc b/src/net/examples/redis_cli_test.cc index 6decf32080..c2b40c33dd 100644 --- a/src/net/examples/redis_cli_test.cc +++ b/src/net/examples/redis_cli_test.cc @@ -30,7 +30,7 @@ int main(int argc, char* argv[]) { ret = net::SerializeRedisCommand(vec, &str); printf(" 2. Serialize by vec return %d, (%s)\n", ret, str.c_str()); - NetCli* rcli = NewRedisCli(); + std::unique_ptr rcli(NewRedisCli()); rcli->set_connect_timeout(3000); // redis v3.2+ protect mode will block other ip diff --git a/src/net/examples/redis_parser_test.cc b/src/net/examples/redis_parser_test.cc index 0b3cd72522..90bee28692 100644 --- a/src/net/examples/redis_parser_test.cc +++ b/src/net/examples/redis_parser_test.cc @@ -15,7 +15,7 @@ int main(int argc, char* argv[]) { std::string ip(argv[1]); int port = atoi(argv[2]); - NetCli* rcli = NewRedisCli(); + std::unique_ptr rcli(NewRedisCli()); rcli->set_connect_timeout(3000); Status s = rcli->Connect(ip, port, "127.0.0.1"); diff --git a/src/net/examples/simple_http_server.cc b/src/net/examples/simple_http_server.cc index a6cd597a65..73751c95e3 100644 --- a/src/net/examples/simple_http_server.cc +++ b/src/net/examples/simple_http_server.cc @@ -76,8 +76,8 @@ int main(int argc, char* argv[]) { SignalSetup(); - ConnFactory* my_conn_factory = new MyConnFactory(); - ServerThread* st = NewDispatchThread(port, 4, my_conn_factory, 1000); + std::unique_ptr my_conn_factory = std::make_unique(); + std::unique_ptr st(NewDispatchThread(port, 4, my_conn_factory.get(), 1000)); if (st->StartThread() != 0) { printf("StartThread error happened!\n"); @@ -89,8 +89,5 @@ int main(int argc, char* argv[]) { } st->StopThread(); - delete st; - delete my_conn_factory; - return 0; } diff --git a/src/net/examples/thread_pool_test.cc b/src/net/examples/thread_pool_test.cc index d40de4a9fc..d220b7f695 100644 --- a/src/net/examples/thread_pool_test.cc +++ b/src/net/examples/thread_pool_test.cc @@ -24,13 +24,13 @@ uint64_t NowMicros() { static pstd::Mutex print_lock; void task(void* arg) { + std::unique_ptr int_arg(static_cast(arg)); { std::lock_guard l(print_lock); - std::cout << " task : " << *((int*)arg) << " time(micros) " << NowMicros() << " thread id: " << pthread_self() + std::cout << " task : " << *int_arg << " time(micros) " << NowMicros() << " thread id: " << pthread_self() << std::endl; } sleep(1); - delete (int*)arg; } int main() { diff --git a/src/net/include/client_thread.h b/src/net/include/client_thread.h index 21011ec59c..6a6ee6c5ac 100644 --- a/src/net/include/client_thread.h +++ b/src/net/include/client_thread.h @@ -93,7 +93,7 @@ class ClientHandle { /* * DestConnectFailedHandle(...) will run the invoker's logic when socket connect failed */ - virtual void DestConnectFailedHandle(const std::string &ip_port, const std::string &reason) const { + virtual void DestConnectFailedHandle(const std::string& ip_port, const std::string& reason) const { UNUSED(ip_port); UNUSED(reason); } diff --git a/src/net/include/http_conn.h b/src/net/include/http_conn.h index e3c278fdff..4cabc3f914 100644 --- a/src/net/include/http_conn.h +++ b/src/net/include/http_conn.h @@ -12,6 +12,7 @@ #include "pstd/include/pstd_status.h" #include "pstd/include/xdebug.h" +#include "pstd/include/noncopyable.h" #include "net/include/net_conn.h" #include "net/include/net_define.h" @@ -128,7 +129,7 @@ class HTTPResponse { bool SerializeHeader(); }; -class HTTPHandles { +class HTTPHandles : public pstd::noncopyable { public: // You need implement these handles. /* @@ -164,12 +165,6 @@ class HTTPHandles { HTTPHandles() = default; virtual ~HTTPHandles() = default; - /* - * No allowed copy and copy assign - */ - HTTPHandles(const HTTPHandles&) = delete; - void operator=(const HTTPHandles&) = delete; - protected: /* * Assigned in ServerHandle's CreateWorkerSpecificData diff --git a/src/net/include/net_cli.h b/src/net/include/net_cli.h index 51f437787f..8fff4b9c4b 100644 --- a/src/net/include/net_cli.h +++ b/src/net/include/net_cli.h @@ -6,13 +6,15 @@ #ifndef NET_INCLUDE_NET_CLI_H_ #define NET_INCLUDE_NET_CLI_H_ +#include #include #include "pstd/include/pstd_status.h" +#include "pstd/include/noncopyable.h" namespace net { -class NetCli { +class NetCli : public pstd::noncopyable { public: explicit NetCli(const std::string& ip = "", int port = 0); virtual ~NetCli(); @@ -39,16 +41,13 @@ class NetCli { int set_recv_timeout(int recv_timeout); void set_connect_timeout(int connect_timeout); - NetCli(const NetCli&) = delete; - void operator=(const NetCli&) = delete; - protected: pstd::Status SendRaw(void* buf, size_t count); pstd::Status RecvRaw(void* buf, size_t* count); private: struct Rep; - Rep* rep_; + std::unique_ptr rep_; int set_tcp_nodelay(); }; diff --git a/src/net/include/net_conn.h b/src/net/include/net_conn.h index 365fb24a92..fab23f71b2 100644 --- a/src/net/include/net_conn.h +++ b/src/net/include/net_conn.h @@ -19,12 +19,13 @@ #include "net/include/server_thread.h" #include "net/src/net_multiplexer.h" #include "pstd/include/testutil.h" +#include "pstd/include/noncopyable.h" namespace net { class Thread; -class NetConn : public std::enable_shared_from_this { +class NetConn : public std::enable_shared_from_this, public pstd::noncopyable { public: NetConn(int fd, std::string ip_port, Thread* thread, NetMultiplexer* mpx = nullptr); #ifdef __ENABLE_SSL @@ -93,11 +94,6 @@ class NetConn : public std::enable_shared_from_this { bool security() { return ssl_ != nullptr; } #endif - /* - * No allowed copy and copy assign operator - */ - NetConn(const NetConn&) = delete; - void operator=(const NetConn&) = delete; private: int fd_ = -1; diff --git a/src/net/include/net_pubsub.h b/src/net/include/net_pubsub.h index 5f3cee36b2..28ef98a764 100644 --- a/src/net/include/net_pubsub.h +++ b/src/net/include/net_pubsub.h @@ -62,7 +62,7 @@ class PubSubThread : public Thread { }; struct ConnHandle { - ConnHandle(std::shared_ptr pc, ReadyState state = kNotReady) : conn(std::move(std::move(std::move(std::move(pc))))), ready_state(state) {} + ConnHandle(std::shared_ptr pc, ReadyState state = kNotReady) : conn(std::move(pc)), ready_state(state) {} void UpdateReadyState(const ReadyState& state); bool IsReady(); std::shared_ptr conn; @@ -72,9 +72,6 @@ class PubSubThread : public Thread { void UpdateConnReadyState(int fd, const ReadyState& state); bool IsReady(int fd); - // No copying allowed - PubSubThread(const PubSubThread&) = delete; - void operator=(const PubSubThread&) = delete; private: void RemoveConn(const std::shared_ptr& conn); diff --git a/src/net/include/net_thread.h b/src/net/include/net_thread.h index 2b63ca960a..133a611dea 100644 --- a/src/net/include/net_thread.h +++ b/src/net/include/net_thread.h @@ -11,10 +11,11 @@ #include #include "pstd/include/pstd_mutex.h" +#include "pstd/include/noncopyable.h" namespace net { -class Thread { +class Thread : public pstd::noncopyable { public: Thread(); virtual ~Thread(); @@ -35,12 +36,6 @@ class Thread { void set_thread_name(const std::string& name) { thread_name_ = name; } - /* - * No allowed copy and copy assign - */ - Thread(const Thread&) = delete; - void operator=(const Thread&) = delete; - protected: std::atomic should_stop_; @@ -52,7 +47,6 @@ class Thread { bool running_{false}; pthread_t thread_id_{}; std::string thread_name_; - }; } // namespace net diff --git a/src/net/include/server_thread.h b/src/net/include/server_thread.h index 189594b0aa..c1dce29930 100644 --- a/src/net/include/server_thread.h +++ b/src/net/include/server_thread.h @@ -188,7 +188,7 @@ class ServerThread : public Thread { */ int port_ = -1; std::set ips_; - std::vector server_sockets_; + std::vector> server_sockets_; std::set server_fds_; virtual int InitHandle(); diff --git a/src/net/include/thread_pool.h b/src/net/include/thread_pool.h index 5ac6f8f51c..466df487d0 100644 --- a/src/net/include/thread_pool.h +++ b/src/net/include/thread_pool.h @@ -32,7 +32,7 @@ struct TimeTask { bool operator<(const TimeTask& task) const { return exec_time > task.exec_time; } }; -class ThreadPool { +class ThreadPool : public pstd::noncopyable { public: class Worker { public: @@ -41,11 +41,6 @@ class ThreadPool { int start(); int stop(); - /* - * No allowed copy and copy assign - */ - Worker(const Worker&) = delete; - void operator=(const Worker&) = delete; private: pthread_t thread_id_; diff --git a/src/net/src/backend_thread.cc b/src/net/src/backend_thread.cc index bc12666601..eecb027d52 100644 --- a/src/net/src/backend_thread.cc +++ b/src/net/src/backend_thread.cc @@ -39,13 +39,13 @@ BackendThread::BackendThread(ConnFactory* conn_factory, int cron_interval, int k BackendThread::~BackendThread() = default; int BackendThread::StartThread() { - if (handle_ == nullptr) { + if (!handle_) { handle_ = new BackendHandle(); own_handle_ = true; } own_handle_ = false; int res = handle_->CreateWorkerSpecificData(&private_data_); - if (res != 0) { + if (res) { return res; } return Thread::StartThread(); @@ -54,7 +54,7 @@ int BackendThread::StartThread() { int BackendThread::StopThread() { if (private_data_ != nullptr) { int res = handle_->DeleteWorkerSpecificData(private_data_); - if (res != 0) { + if (res) { return res; } private_data_ = nullptr; @@ -100,7 +100,7 @@ Status BackendThread::Close(const int fd) { } Status BackendThread::ProcessConnectStatus(NetFiredEvent* pfe, int* should_close) { - if ((pfe->mask & kErrorEvent) != 0) { + if (pfe->mask & kErrorEvent) { *should_close = 1; return Status::Corruption("POLLERR or POLLHUP"); } @@ -111,7 +111,7 @@ Status BackendThread::ProcessConnectStatus(NetFiredEvent* pfe, int* should_close *should_close = 1; return Status::Corruption("Get Socket opt failed"); } - if (val != 0) { + if (val) { *should_close = 1; return Status::Corruption("Get socket error " + std::to_string(val)); } @@ -149,11 +149,11 @@ Status BackendThread::Connect(const std::string& dst_ip, const int dst_port, int hints.ai_family = AF_INET; hints.ai_socktype = SOCK_STREAM; - if (fd == nullptr) { + if (!fd) { return Status::InvalidArgument("fd argument is nullptr"); } // We do not handle IPv6 - if ((rv = getaddrinfo(dst_ip.c_str(), cport, &hints, &servinfo)) != 0) { + if (rv = getaddrinfo(dst_ip.c_str(), cport, &hints, &servinfo)) { return Status::IOError("connect getaddrinfo error for ", dst_ip); } for (p = servinfo; p != nullptr; p = p->ai_next) { @@ -194,7 +194,7 @@ Status BackendThread::Connect(const std::string& dst_ip, const int dst_port, int freeaddrinfo(servinfo); return s; } - if (p == nullptr) { + if (!p) { s = Status::IOError(strerror(errno), "Can't create socket "); return s; } @@ -246,10 +246,10 @@ void BackendThread::DoCronTask() { net_multiplexer_->NetDelEvent(conn->fd(), 0); close(conn->fd()); handle_->FdTimeoutHandle(conn->fd(), conn->ip_port()); - if (conns_.count(conn->fd()) != 0U) { + if (conns_.count(conn->fd())) { conns_.erase(conn->fd()); } - if (connecting_fds_.count(conn->fd()) != 0U) { + if (connecting_fds_.count(conn->fd())) { connecting_fds_.erase(conn->fd()); } iter = conns_.erase(iter); @@ -309,7 +309,7 @@ void BackendThread::NotifyClose(const int fd) { } void BackendThread::ProcessNotifyEvents(const NetFiredEvent* pfe) { - if ((pfe->mask & kReadable) != 0) { + if (pfe->mask & kReadable) { char bb[2048]; int32_t nread = read(net_multiplexer_->NotifyReceiveFd(), bb, 2048); if (nread == 0) { @@ -322,7 +322,7 @@ void BackendThread::ProcessNotifyEvents(const NetFiredEvent* pfe) { std::lock_guard l(mu_); if (ti.notify_type() == kNotiWrite) { if (conns_.find(fd) == conns_.end()) { - // TODO(): need clean and notify? + // TODO: need clean and notify? continue; } else { // connection exist @@ -390,7 +390,7 @@ void* BackendThread::ThreadMain() { nfds = net_multiplexer_->NetPoll(timeout); for (int i = 0; i < nfds; i++) { pfe = (net_multiplexer_->FiredEvents()) + i; - if (pfe == nullptr) { + if (!pfe) { continue; } @@ -413,7 +413,7 @@ void* BackendThread::ThreadMain() { } } - if (connecting_fds_.count(pfe->fd) != 0U) { + if (connecting_fds_.count(pfe->fd)) { Status s = ProcessConnectStatus(pfe, &should_close); if (!s.ok()) { handle_->DestConnectFailedHandle(conn->ip_port(), s.ToString()); @@ -421,7 +421,7 @@ void* BackendThread::ThreadMain() { connecting_fds_.erase(pfe->fd); } - if ((should_close == 0) && ((pfe->mask & kWritable) != 0) && conn->is_reply()) { + if ((should_close == 0) && (pfe->mask & kWritable) && conn->is_reply()) { WriteStatus write_status = conn->SendReply(); conn->set_last_interaction(now); if (write_status == kWriteAll) { @@ -435,7 +435,7 @@ void* BackendThread::ThreadMain() { } } - if ((should_close == 0) && ((pfe->mask & kReadable) != 0)) { + if (!should_close && (pfe->mask & kReadable)) { ReadStatus read_status = conn->GetRequest(); conn->set_last_interaction(now); if (read_status == kReadAll) { @@ -447,7 +447,7 @@ void* BackendThread::ThreadMain() { } } - if (((pfe->mask & kErrorEvent) != 0) || (should_close != 0)) { + if ((pfe->mask & kErrorEvent) || (should_close)) { { LOG(INFO) << "close connection " << pfe->fd << " reason " << pfe->mask << " " << should_close; net_multiplexer_->NetDelEvent(pfe->fd, 0); @@ -455,7 +455,7 @@ void* BackendThread::ThreadMain() { mu_.lock(); conns_.erase(pfe->fd); mu_.unlock(); - if (connecting_fds_.count(conn->fd()) != 0U) { + if (connecting_fds_.count(conn->fd())) { connecting_fds_.erase(conn->fd()); } } diff --git a/src/net/src/client_thread.cc b/src/net/src/client_thread.cc index 4e92e07097..9bc6222b1f 100644 --- a/src/net/src/client_thread.cc +++ b/src/net/src/client_thread.cc @@ -39,13 +39,13 @@ ClientThread::ClientThread(ConnFactory* conn_factory, int cron_interval, int kee ClientThread::~ClientThread() = default; int ClientThread::StartThread() { - if (handle_ == nullptr) { + if (!handle_) { handle_ = new ClientHandle(); own_handle_ = true; } own_handle_ = false; int res = handle_->CreateWorkerSpecificData(&private_data_); - if (res != 0) { + if (res) { return res; } return Thread::StartThread(); @@ -54,7 +54,7 @@ int ClientThread::StartThread() { int ClientThread::StopThread() { if (private_data_ != nullptr) { int res = handle_->DeleteWorkerSpecificData(private_data_); - if (res != 0) { + if (res) { return res; } private_data_ = nullptr; @@ -94,7 +94,7 @@ Status ClientThread::Close(const std::string& ip, const int port) { } Status ClientThread::ProcessConnectStatus(NetFiredEvent* pfe, int* should_close) { - if ((pfe->mask & kErrorEvent) != 0) { + if (pfe->mask & kErrorEvent) { *should_close = 1; return Status::Corruption("POLLERR or POLLHUP"); } @@ -105,7 +105,7 @@ Status ClientThread::ProcessConnectStatus(NetFiredEvent* pfe, int* should_close) *should_close = 1; return Status::Corruption("Get Socket opt failed"); } - if (val != 0) { + if (val) { *should_close = 1; return Status::Corruption("Get socket error " + std::to_string(val)); } @@ -142,7 +142,7 @@ Status ClientThread::ScheduleConnect(const std::string& dst_ip, int dst_port) { hints.ai_socktype = SOCK_STREAM; // We do not handle IPv6 - if ((rv = getaddrinfo(dst_ip.c_str(), cport, &hints, &servinfo)) != 0) { + if (rv = getaddrinfo(dst_ip.c_str(), cport, &hints, &servinfo)) { return Status::IOError("connect getaddrinfo error for ", dst_ip); } for (p = servinfo; p != nullptr; p = p->ai_next) { @@ -183,7 +183,7 @@ Status ClientThread::ScheduleConnect(const std::string& dst_ip, int dst_port) { return s; } - if (p == nullptr) { + if (!p) { s = Status::IOError(strerror(errno), "Can't create socket "); return s; } @@ -226,10 +226,10 @@ void ClientThread::DoCronTask() { // will try to send remaining by reconnecting close(conn->fd()); handle_->FdTimeoutHandle(conn->fd(), conn->ip_port()); - if (ipport_conns_.count(conn->ip_port()) != 0U) { + if (ipport_conns_.count(conn->ip_port())) { ipport_conns_.erase(conn->ip_port()); } - if (connecting_fds_.count(conn->fd()) != 0U) { + if (connecting_fds_.count(conn->fd())) { connecting_fds_.erase(conn->fd()); } iter = fd_conns_.erase(iter); @@ -303,7 +303,7 @@ void ClientThread::NotifyWrite(const std::string& ip_port) { } void ClientThread::ProcessNotifyEvents(const NetFiredEvent* pfe) { - if ((pfe->mask & kReadable) != 0) { + if (pfe->mask & kReadable) { char bb[2048]; int32_t nread = read(net_multiplexer_->NotifyReceiveFd(), bb, 2048); if (nread == 0) { @@ -340,7 +340,7 @@ void ClientThread::ProcessNotifyEvents(const NetFiredEvent* pfe) { // get msg from to_send_ std::vector& msgs = iter->second; for (auto& msg : msgs) { - if (ipport_conns_[ip_port]->WriteResp(msg) != 0) { + if (ipport_conns_[ip_port]->WriteResp(msg)) { to_send_[ip_port].push_back(msg); NotifyWrite(ip_port); } @@ -398,7 +398,7 @@ void* ClientThread::ThreadMain() { nfds = net_multiplexer_->NetPoll(timeout); for (int i = 0; i < nfds; i++) { pfe = (net_multiplexer_->FiredEvents()) + i; - if (pfe == nullptr) { + if (!pfe) { continue; } @@ -417,7 +417,7 @@ void* ClientThread::ThreadMain() { std::shared_ptr conn = iter->second; - if (connecting_fds_.count(pfe->fd) != 0U) { + if (connecting_fds_.count(pfe->fd)) { Status s = ProcessConnectStatus(pfe, &should_close); if (!s.ok()) { handle_->DestConnectFailedHandle(conn->ip_port(), s.ToString()); @@ -425,7 +425,7 @@ void* ClientThread::ThreadMain() { connecting_fds_.erase(pfe->fd); } - if ((should_close == 0) && ((pfe->mask & kWritable) != 0) && conn->is_reply()) { + if ((should_close == 0) && (pfe->mask & kWritable) && conn->is_reply()) { WriteStatus write_status = conn->SendReply(); conn->set_last_interaction(now); if (write_status == kWriteAll) { @@ -439,7 +439,7 @@ void* ClientThread::ThreadMain() { } } - if ((should_close == 0) && ((pfe->mask & kReadable) != 0)) { + if ((should_close == 0) && (pfe->mask & kReadable)) { ReadStatus read_status = conn->GetRequest(); conn->set_last_interaction(now); if (read_status == kReadAll) { @@ -452,16 +452,16 @@ void* ClientThread::ThreadMain() { } } - if (((pfe->mask & kErrorEvent) != 0) || (should_close != 0)) { + if ((pfe->mask & kErrorEvent) || should_close) { { LOG(INFO) << "close connection " << pfe->fd << " reason " << pfe->mask << " " << should_close; net_multiplexer_->NetDelEvent(pfe->fd, 0); CloseFd(conn); fd_conns_.erase(pfe->fd); - if (ipport_conns_.count(conn->ip_port()) != 0U) { + if (ipport_conns_.count(conn->ip_port())) { ipport_conns_.erase(conn->ip_port()); } - if (connecting_fds_.count(conn->fd()) != 0U) { + if (connecting_fds_.count(conn->fd())) { connecting_fds_.erase(conn->fd()); } } diff --git a/src/net/src/dispatch_thread.cc b/src/net/src/dispatch_thread.cc index 1035aec442..2c913e5ce4 100644 --- a/src/net/src/dispatch_thread.cc +++ b/src/net/src/dispatch_thread.cc @@ -20,9 +20,8 @@ DispatchThread::DispatchThread(int port, int work_num, ConnFactory* conn_factory last_thread_(0), work_num_(work_num), queue_limit_(queue_limit) { - worker_thread_ = new WorkerThread*[work_num_]; for (int i = 0; i < work_num_; i++) { - worker_thread_[i] = new WorkerThread(conn_factory, this, queue_limit, cron_interval); + worker_thread_.emplace_back(std::make_unique(conn_factory, this, queue_limit, cron_interval)); } } @@ -32,9 +31,9 @@ DispatchThread::DispatchThread(const std::string& ip, int port, int work_num, Co last_thread_(0), work_num_(work_num), queue_limit_(queue_limit) { - worker_thread_ = new WorkerThread*[work_num_]; + for (int i = 0; i < work_num_; i++) { - worker_thread_[i] = new WorkerThread(conn_factory, this, queue_limit, cron_interval); + worker_thread_.emplace_back(std::make_unique(conn_factory, this, queue_limit, cron_interval)); } } @@ -44,23 +43,21 @@ DispatchThread::DispatchThread(const std::set& ips, int port, int w last_thread_(0), work_num_(work_num), queue_limit_(queue_limit) { - worker_thread_ = new WorkerThread*[work_num_]; + for (int i = 0; i < work_num_; i++) { - worker_thread_[i] = new WorkerThread(conn_factory, this, queue_limit, cron_interval); + worker_thread_.emplace_back(std::make_unique(conn_factory, this, queue_limit, cron_interval)); + } } DispatchThread::~DispatchThread() { - for (int i = 0; i < work_num_; i++) { - delete worker_thread_[i]; - } - delete[] worker_thread_; + } int DispatchThread::StartThread() { for (int i = 0; i < work_num_; i++) { int ret = handle_->CreateWorkerSpecificData(&(worker_thread_[i]->private_data_)); - if (ret != 0) { + if (ret) { return ret; } @@ -68,7 +65,7 @@ int DispatchThread::StartThread() { worker_thread_[i]->set_thread_name("WorkerThread"); } ret = worker_thread_[i]->StartThread(); - if (ret != 0) { + if (ret) { return ret; } } @@ -81,12 +78,12 @@ int DispatchThread::StopThread() { } for (int i = 0; i < work_num_; i++) { int ret = worker_thread_[i]->StopThread(); - if (ret != 0) { + if (ret) { return ret; } if (worker_thread_[i]->private_data_ != nullptr) { ret = handle_->DeleteWorkerSpecificData(worker_thread_[i]->private_data_); - if (ret != 0) { + if (ret) { return ret; } worker_thread_[i]->private_data_ = nullptr; @@ -129,7 +126,7 @@ std::shared_ptr DispatchThread::MoveConnOut(int fd) { } void DispatchThread::MoveConnIn(std::shared_ptr conn, const NotifyType& type) { - WorkerThread* worker_thread = worker_thread_[last_thread_]; + std::unique_ptr& worker_thread = worker_thread_[last_thread_]; bool success = worker_thread->MoveConnIn(conn, type, true); if (success) { last_thread_ = (last_thread_ + 1) % work_num_; @@ -155,7 +152,7 @@ void DispatchThread::HandleNewConn(const int connfd, const std::string& ip_port) int next_thread = last_thread_; bool find = false; for (int cnt = 0; cnt < work_num_; cnt++) { - WorkerThread* worker_thread = worker_thread_[next_thread]; + std::unique_ptr& worker_thread = worker_thread_[next_thread]; find = worker_thread->MoveConnIn(ti, false); if (find) { last_thread_ = (next_thread + 1) % work_num_; diff --git a/src/net/src/dispatch_thread.h b/src/net/src/dispatch_thread.h index 26a816cd07..fb52016666 100644 --- a/src/net/src/dispatch_thread.h +++ b/src/net/src/dispatch_thread.h @@ -54,9 +54,6 @@ class DispatchThread : public ServerThread { void HandleNewConn(int connfd, const std::string& ip_port) override; void SetQueueLimit(int queue_limit) override; - // No copying allowed - DispatchThread(const DispatchThread&) = delete; - void operator=(const DispatchThread&) = delete; private: /* @@ -68,7 +65,7 @@ class DispatchThread : public ServerThread { /* * This is the work threads */ - WorkerThread** worker_thread_; + std::vector> worker_thread_; int queue_limit_; std::map localdata_; diff --git a/src/net/src/holy_thread.cc b/src/net/src/holy_thread.cc index 1971347536..2d63eb4a5c 100644 --- a/src/net/src/holy_thread.cc +++ b/src/net/src/holy_thread.cc @@ -72,16 +72,16 @@ std::shared_ptr HolyThread::get_conn(int fd) { int HolyThread::StartThread() { int ret = handle_->CreateWorkerSpecificData(&private_data_); - if (ret != 0) { + if (ret) { return ret; } return ServerThread::StartThread(); } int HolyThread::StopThread() { - if (private_data_ != nullptr) { + if (private_data_) { int ret = handle_->DeleteWorkerSpecificData(private_data_); - if (ret != 0) { + if (ret) { return ret; } private_data_ = nullptr; @@ -101,7 +101,7 @@ void HolyThread::HandleNewConn(const int connfd, const std::string& ip_port) { } void HolyThread::HandleConnEvent(NetFiredEvent* pfe) { - if (pfe == nullptr) { + if (!pfe) { return; } std::shared_ptr in_conn = nullptr; @@ -292,7 +292,7 @@ void HolyThread::ProcessNotifyEvents(const net::NetFiredEvent* pfe) { } else if (ti.notify_type() == net::kNotiClose) { LOG(INFO) << "receive noti close"; std::shared_ptr conn = get_conn(fd); - if (conn == nullptr) { + if (!conn) { continue; } CloseFd(conn); diff --git a/src/net/src/http_conn.cc b/src/net/src/http_conn.cc index b841d58b9c..075fef1f37 100644 --- a/src/net/src/http_conn.cc +++ b/src/net/src/http_conn.cc @@ -123,7 +123,7 @@ bool HTTPRequest::ParseHeadLine(const char* data, int line_start, int line_end) bool HTTPRequest::ParseGetUrl() { path_ = url_; // Format path - if ((headers_.count("host") != 0U) && path_.find(headers_["host"]) != std::string::npos && + if ((headers_.count("host")) && path_.find(headers_["host"]) != std::string::npos && path_.size() > (7 + headers_["host"].size())) { // http://www.xxx.xxx/path_/to path_.assign(path_.substr(7 + headers_["host"].size())); @@ -169,7 +169,7 @@ bool HTTPRequest::ParseParameters(std::string& data, size_t line_start) { int HTTPRequest::ParseHeader() { rbuf_[rbuf_pos_] = '\0'; // Avoid strstr() parsing expire char char* sep_pos = strstr(rbuf_, "\r\n\r\n"); - if (sep_pos == nullptr) { + if (!sep_pos) { // Haven't find header return 0; } @@ -200,13 +200,13 @@ int HTTPRequest::ParseHeader() { return -1; } - remain_recv_len_ = headers_.count("content-length") != 0U ? std::stoul(headers_.at("content-length")) : 0; + remain_recv_len_ = headers_.count("content-length") ? std::stoul(headers_.at("content-length")) : 0; - if (headers_.count("content-type") != 0U) { + if (headers_.count("content-type")) { content_type_.assign(headers_.at("content-type")); } - if ((headers_.count("expect") != 0U) && + if ((headers_.count("expect")) && (headers_.at("expect") == "100-Continue" || headers_.at("expect") == "100-continue")) { reply_100continue_ = true; } @@ -268,7 +268,7 @@ HTTPConn::HTTPConn(const int fd, const std::string& ip_port, Thread* thread, std #ifdef __ENABLE_SSL // security_(thread->security()), #endif - handles_(std::move(std::move(handles))) { + handles_(std::move(handles)) { handles_->worker_specific_data_ = worker_specific_data; // this pointer is safe here request_ = new HTTPRequest(this); @@ -297,14 +297,14 @@ std::string HTTPRequest::url() const { return url_; } std::string HTTPRequest::path() const { return path_; } std::string HTTPRequest::query_value(const std::string& field) const { - if (query_params_.count(field) != 0U) { + if (query_params_.count(field)) { return query_params_.at(field); } return ""; } std::string HTTPRequest::postform_value(const std::string& field) const { - if (postform_params_.count(field) != 0U) { + if (postform_params_.count(field)) { return postform_params_.at(field); } return ""; @@ -498,7 +498,7 @@ void HTTPResponse::SetHeaders(const std::string& key, const size_t value) { head void HTTPResponse::SetContentLength(uint64_t size) { remain_send_len_ = size; - if ((headers_.count("Content-Length") != 0U) || (headers_.count("content-length") != 0U)) { + if ((headers_.count("Content-Length")) || (headers_.count("content-length"))) { return; } SetHeaders("Content-Length", size); diff --git a/src/net/src/net_cli.cc b/src/net/src/net_cli.cc index 10d14e4024..41c1e7aaf0 100644 --- a/src/net/src/net_cli.cc +++ b/src/net/src/net_cli.cc @@ -36,19 +36,16 @@ struct NetCli::Rep { Rep(std::string ip, int port) : peer_ip(std::move(ip)),peer_port(port) {} }; -NetCli::NetCli(const std::string& ip, const int port) : rep_(new Rep(ip, port)) {} +NetCli::NetCli(const std::string& ip, const int port) : rep_(std::make_unique(ip, port)) {} -NetCli::~NetCli() { - Close(); - delete rep_; -} +NetCli::~NetCli() { Close(); } bool NetCli::Available() const { return rep_->available; } Status NetCli::Connect(const std::string& bind_ip) { return Connect(rep_->peer_ip, rep_->peer_port, bind_ip); } Status NetCli::Connect(const std::string& ip, const int port, const std::string& bind_ip) { - Rep* r = rep_; + std::unique_ptr& r = rep_; Status s; int rv; char cport[6]; @@ -143,7 +140,7 @@ Status NetCli::Connect(const std::string& ip, const int port, const std::string& rep_->available = true; return s; } - if (p == nullptr) { + if (!p) { s = Status::IOError(strerror(errno), "Can't create socket "); return s; } @@ -239,7 +236,7 @@ Status NetCli::SendRaw(void* buf, size_t count) { } Status NetCli::RecvRaw(void* buf, size_t* count) { - Rep* r = rep_; + std::unique_ptr& r = rep_; char* rbuf = reinterpret_cast(buf); size_t nleft = *count; size_t pos = 0; @@ -278,7 +275,7 @@ void NetCli::Close() { void NetCli::set_connect_timeout(int connect_timeout) { rep_->connect_timeout = connect_timeout; } int NetCli::set_send_timeout(int send_timeout) { - Rep* r = rep_; + std::unique_ptr& r = rep_; int ret = 0; if (send_timeout > 0) { r->send_timeout = send_timeout; @@ -289,7 +286,7 @@ int NetCli::set_send_timeout(int send_timeout) { } int NetCli::set_recv_timeout(int recv_timeout) { - Rep* r = rep_; + std::unique_ptr& r = rep_; int ret = 0; if (recv_timeout > 0) { r->recv_timeout = recv_timeout; @@ -300,7 +297,7 @@ int NetCli::set_recv_timeout(int recv_timeout) { } int NetCli::set_tcp_nodelay() { - Rep* r = rep_; + std::unique_ptr& r = rep_; int val = 1; int ret = 0; ret = setsockopt(r->sockfd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)); diff --git a/src/net/src/net_epoll.cc b/src/net/src/net_epoll.cc index 59f4f4f152..2215a62764 100644 --- a/src/net/src/net_epoll.cc +++ b/src/net/src/net_epoll.cc @@ -40,10 +40,10 @@ int NetEpoll::NetAddEvent(int fd, int mask) { ee.data.fd = fd; ee.events = 0; - if ((mask & kReadable) != 0) { + if (mask & kReadable) { ee.events |= EPOLLIN; } - if ((mask & kWritable) != 0) { + if (mask & kWritable) { ee.events |= EPOLLOUT; } @@ -56,10 +56,10 @@ int NetEpoll::NetModEvent(int fd, int old_mask, int mask) { ee.events = (old_mask | mask); ee.events = 0; - if (((old_mask | mask) & kReadable) != 0) { + if ((old_mask | mask) & kReadable) { ee.events |= EPOLLIN; } - if (((old_mask | mask) & kWritable) != 0) { + if ((old_mask | mask) & kWritable) { ee.events |= EPOLLOUT; } return epoll_ctl(multiplexer_, EPOLL_CTL_MOD, fd, &ee); @@ -75,7 +75,7 @@ int NetEpoll::NetDelEvent(int fd, [[maybe_unused]] int mask) { } int NetEpoll::NetPoll(int timeout) { - int num_events = epoll_wait(multiplexer_, (events_).data(), NET_MAX_CLIENTS, timeout); + int num_events = epoll_wait(multiplexer_, &events_[0], NET_MAX_CLIENTS, timeout); if (num_events <= 0) { return 0; } @@ -85,15 +85,15 @@ int NetEpoll::NetPoll(int timeout) { ev.fd = events_[i].data.fd; ev.mask = 0; - if ((events_[i].events & EPOLLIN) != 0) { + if (events_[i].events & EPOLLIN) { ev.mask |= kReadable; } - if ((events_[i].events & EPOLLOUT) != 0) { + if (events_[i].events & EPOLLOUT) { ev.mask |= kWritable; } - if ((events_[i].events & (EPOLLERR | EPOLLHUP)) != 0) { + if (events_[i].events & (EPOLLERR | EPOLLHUP)) { ev.mask |= kErrorEvent; } } diff --git a/src/net/src/net_interfaces.cc b/src/net/src/net_interfaces.cc index f241274b82..1fef632761 100644 --- a/src/net/src/net_interfaces.cc +++ b/src/net/src/net_interfaces.cc @@ -114,7 +114,7 @@ std::string GetIpByInterface(const std::string& network_interface) { std::string host; for (ifa = ifAddrStruct; ifa != nullptr; ifa = ifa->ifa_next) { - if (ifa->ifa_addr == nullptr) { + if (!(ifa->ifa_addr)) { continue; } @@ -141,7 +141,7 @@ std::string GetIpByInterface(const std::string& network_interface) { freeifaddrs(ifAddrStruct); } - if (ifa == nullptr) { + if (!ifa) { LOG(ERROR) << "error network interface: " << network_interface; } diff --git a/src/net/src/net_item.h b/src/net/src/net_item.h index 2c88f6ac39..a6863e376b 100644 --- a/src/net/src/net_item.h +++ b/src/net/src/net_item.h @@ -17,7 +17,7 @@ class NetItem { public: NetItem() = default; NetItem(const int fd, std::string ip_port, const NotifyType& type = kNotiConnect) - : fd_(fd), ip_port_(std::move(std::move(ip_port))), notify_type_(type) {} + : fd_(fd), ip_port_(std::move(ip_port)), notify_type_(type) {} int fd() const { return fd_; } std::string ip_port() const { return ip_port_; } diff --git a/src/net/src/net_kqueue.cc b/src/net/src/net_kqueue.cc index 1bd77a3800..16c831ff37 100644 --- a/src/net/src/net_kqueue.cc +++ b/src/net/src/net_kqueue.cc @@ -36,12 +36,12 @@ int NetKqueue::NetAddEvent(int fd, int mask) { int cnt = 0; struct kevent change[2]; - if ((mask & kReadable) != 0) { + if (mask & kReadable) { EV_SET(change + cnt, fd, EVFILT_READ, EV_ADD, 0, 0, nullptr); ++cnt; } - if ((mask & kWritable) != 0) { + if (mask & kWritable) { EV_SET(change + cnt, fd, EVFILT_WRITE, EV_ADD, 0, 0, nullptr); ++cnt; } @@ -51,8 +51,9 @@ int NetKqueue::NetAddEvent(int fd, int mask) { int NetKqueue::NetModEvent(int fd, int /*old_mask*/, int mask) { int ret = NetDelEvent(fd, kReadable | kWritable); - if (mask == 0) { return ret; -} + if (mask == 0) { + return ret; + } return NetAddEvent(fd, mask); } @@ -61,18 +62,19 @@ int NetKqueue::NetDelEvent(int fd, int mask) { int cnt = 0; struct kevent change[2]; - if ((mask & kReadable) != 0) { + if (mask & kReadable) { EV_SET(change + cnt, fd, EVFILT_READ, EV_DELETE, 0, 0, nullptr); ++cnt; } - if ((mask & kWritable) != 0) { + if (mask & kWritable) { EV_SET(change + cnt, fd, EVFILT_WRITE, EV_DELETE, 0, 0, nullptr); ++cnt; } - if (cnt == 0) { return -1; -} + if (cnt == 0) { + return -1; + } return kevent(multiplexer_, change, cnt, nullptr, 0, nullptr); } @@ -87,8 +89,9 @@ int NetKqueue::NetPoll(int timeout) { } int num_events = ::kevent(multiplexer_, nullptr, 0, &events_[0], NET_MAX_CLIENTS, p_timeout); - if (num_events <= 0) { return 0; -} + if (num_events <= 0) { + return 0; + } for (int i = 0; i < num_events; i++) { NetFiredEvent& ev = fired_events_[i]; @@ -103,7 +106,7 @@ int NetKqueue::NetPoll(int timeout) { ev.mask |= kWritable; } - if ((events_[i].flags & EV_ERROR) != 0) { + if (events_[i].flags & EV_ERROR) { ev.mask |= kErrorEvent; } } diff --git a/src/net/src/net_pubsub.cc b/src/net/src/net_pubsub.cc index f16bbcb57f..b7513454c3 100644 --- a/src/net/src/net_pubsub.cc +++ b/src/net/src/net_pubsub.cc @@ -11,7 +11,6 @@ #include "net/include/net_conn.h" #include "net/include/net_pubsub.h" -#include "net/src/net_item.h" namespace net { @@ -52,7 +51,7 @@ PubSubThread::PubSubThread() { set_thread_name("PubSubThread"); net_multiplexer_.reset(CreateNetMultiplexer()); net_multiplexer_->Initialize(); - if (pipe(msg_pfd_) != 0) { + if (pipe(msg_pfd_)) { exit(-1); } fcntl(msg_pfd_[0], F_SETFD, fcntl(msg_pfd_[0], F_GETFD) | FD_CLOEXEC); @@ -299,7 +298,7 @@ void PubSubThread::PubSubChannels(const std::string& pattern, std::vectorpush_back(channel.first); } @@ -344,7 +343,7 @@ void* PubSubThread::ThreadMain() { for (int i = 0; i < nfds; i++) { pfe = (net_multiplexer_->FiredEvents()) + i; if (pfe->fd == net_multiplexer_->NotifyReceiveFd()) { // New connection comming - if ((pfe->mask & kReadable) != 0) { + if (pfe->mask & kReadable) { read(net_multiplexer_->NotifyReceiveFd(), triger, 1); { NetItem ti = net_multiplexer_->NotifyQueuePop(); @@ -364,7 +363,7 @@ void* PubSubThread::ThreadMain() { } } if (pfe->fd == msg_pfd_[0]) { // Publish message - if ((pfe->mask & kReadable) != 0) { + if (pfe->mask & kReadable) { read(msg_pfd_[0], triger, 1); std::string channel; std::string msg; @@ -404,7 +403,7 @@ void* PubSubThread::ThreadMain() { // Send message to a channel pattern's clients pattern_mutex_.lock(); for (auto & it : pubsub_pattern_) { - if (pstd::stringmatchlen(it.first.c_str(), it.first.size(), channel.c_str(), channel.size(), 0) != 0) { + if (pstd::stringmatchlen(it.first.c_str(), it.first.size(), channel.c_str(), channel.size(), 0)) { for (size_t i = 0; i < it.second.size(); i++) { if (!IsReady(it.second[i]->fd())) { continue; @@ -451,7 +450,7 @@ void* PubSubThread::ThreadMain() { } // Send reply - if (((pfe->mask & kWritable) != 0) && in_conn->is_ready_to_reply()) { + if ((pfe->mask & kWritable) && in_conn->is_ready_to_reply()) { WriteStatus write_status = in_conn->SendReply(); if (write_status == kWriteAll) { in_conn->set_is_reply(false); @@ -466,7 +465,7 @@ void* PubSubThread::ThreadMain() { } // Client request again - if (!should_close && ((pfe->mask & kReadable) != 0)) { + if (!should_close && (pfe->mask & kReadable)) { ReadStatus getRes = in_conn->GetRequest(); // Do not response to client when we leave the pub/sub status here if (getRes != kReadAll && getRes != kReadHalf) { @@ -486,7 +485,7 @@ void* PubSubThread::ThreadMain() { } } // Error - if (((pfe->mask & kErrorEvent) != 0) || should_close) { + if ((pfe->mask & kErrorEvent) || should_close) { MoveConnOut(in_conn); CloseFd(in_conn); in_conn = nullptr; diff --git a/src/net/src/net_thread.cc b/src/net/src/net_thread.cc index ac5f502502..c6c669c5ee 100644 --- a/src/net/src/net_thread.cc +++ b/src/net/src/net_thread.cc @@ -15,7 +15,7 @@ Thread::Thread() : should_stop_(false) {} Thread::~Thread() = default; void* Thread::RunThread(void* arg) { - auto* thread = reinterpret_cast(arg); + auto thread = reinterpret_cast(arg); if (!(thread->thread_name().empty())) { SetThreadName(pthread_self(), thread->thread_name()); } diff --git a/src/net/src/pb_cli.cc b/src/net/src/pb_cli.cc index 17963e0ee9..f6e4ea0e07 100644 --- a/src/net/src/pb_cli.cc +++ b/src/net/src/pb_cli.cc @@ -12,8 +12,11 @@ #include "net/include/net_define.h" #include "pstd/include/pstd_status.h" #include "pstd/include/xdebug.h" +#include "pstd/include/noncopyable.h" + using pstd::Status; + namespace net { // Default PBCli is block IO; @@ -28,8 +31,6 @@ class PbCli : public NetCli { // Read, parse and store the reply Status Recv(void* msg_res) override; - PbCli(const PbCli&) = delete; - void operator=(const PbCli&) = delete; private: // BuildWbuf need to access rbuf_, wbuf_; char* rbuf_; @@ -48,7 +49,7 @@ PbCli::~PbCli() { } Status PbCli::Send(void* msg) { - auto* req = reinterpret_cast(msg); + auto req = reinterpret_cast(msg); int wbuf_len = req->ByteSizeLong(); req->SerializeToArray(wbuf_ + kCommandHeaderLength, wbuf_len); @@ -60,7 +61,7 @@ Status PbCli::Send(void* msg) { } Status PbCli::Recv(void* msg_res) { - auto* res = reinterpret_cast(msg_res); + auto res = reinterpret_cast(msg_res); // Read Header size_t read_len = kCommandHeaderLength; diff --git a/src/net/src/pb_conn.cc b/src/net/src/pb_conn.cc index 4c96bd94b0..e0ff5f7ca0 100644 --- a/src/net/src/pb_conn.cc +++ b/src/net/src/pb_conn.cc @@ -64,7 +64,7 @@ ReadStatus PbConn::GetRequest() { uint32_t new_size = header_len_ + COMMAND_HEADER_LENGTH; if (new_size < kProtoMaxMessage) { rbuf_ = reinterpret_cast(realloc(rbuf_, sizeof(char) * new_size)); - if (rbuf_ == nullptr) { + if (!rbuf_) { return kFullError; } rbuf_len_ = new_size; diff --git a/src/net/src/redis_cli.cc b/src/net/src/redis_cli.cc index 588ba6b337..009335a29c 100644 --- a/src/net/src/redis_cli.cc +++ b/src/net/src/redis_cli.cc @@ -15,6 +15,7 @@ #include #include +#include "pstd/include/noncopyable.h" #include "net/include/net_cli.h" #include "net/include/net_define.h" @@ -33,10 +34,6 @@ class RedisCli : public NetCli { // Read, parse and store the reply Status Recv(void* trival = nullptr) override; - // No copyable - RedisCli(const RedisCli&) = delete; - void operator=(const RedisCli&) = delete; - private: RedisCmdArgsType argv_; // The parsed result @@ -87,7 +84,7 @@ Status RedisCli::Send(void* msg) { Status s; // TODO(anan) use socket_->SendRaw instead - auto* storage = reinterpret_cast(msg); + auto storage = reinterpret_cast(msg); const char* wbuf = storage->data(); size_t nleft = storage->size(); @@ -234,7 +231,7 @@ int RedisCli::ProcessLineItem() { char* p; int len; - if ((p = ReadLine(&len)) == nullptr) { + if (!(p = ReadLine(&len))) { return REDIS_HALF; } @@ -348,7 +345,7 @@ int RedisCli::GetReplyFromReader() { } char* p; - if ((p = ReadBytes(1)) == nullptr) { + if (!(p = ReadBytes(1))) { return REDIS_HALF; } diff --git a/src/net/src/redis_conn.cc b/src/net/src/redis_conn.cc index 18e66218b1..9cd5b3be2c 100644 --- a/src/net/src/redis_conn.cc +++ b/src/net/src/redis_conn.cc @@ -80,7 +80,7 @@ ReadStatus RedisConn::GetRequest() { return kFullError; } rbuf_ = static_cast(realloc(rbuf_, new_size)); // NOLINT - if (rbuf_ == nullptr) { + if (!rbuf_) { return kFullError; } rbuf_len_ = new_size; @@ -194,7 +194,7 @@ void RedisConn::NotifyEpoll(bool success) { } int RedisConn::ParserDealMessageCb(RedisParser* parser, const RedisCmdArgsType& argv) { - auto* conn = reinterpret_cast(parser->data); + auto conn = reinterpret_cast(parser->data); if (conn->GetHandleType() == HandleType::kSynchronous) { return conn->DealMessage(argv, &(conn->response_)); } else { @@ -203,7 +203,7 @@ int RedisConn::ParserDealMessageCb(RedisParser* parser, const RedisCmdArgsType& } int RedisConn::ParserCompleteCb(RedisParser* parser, const std::vector& argvs) { - auto* conn = reinterpret_cast(parser->data); + auto conn = reinterpret_cast(parser->data); bool async = conn->GetHandleType() == HandleType::kAsynchronous; conn->ProcessRedisCmds(argvs, async, &(conn->response_)); return 0; diff --git a/src/net/src/redis_parser.cc b/src/net/src/redis_parser.cc index cf9568d725..10d07fee53 100644 --- a/src/net/src/redis_parser.cc +++ b/src/net/src/redis_parser.cc @@ -303,7 +303,7 @@ void RedisParser::PrintCurrentStatus() { // } LOG(INFO) << "cur_pos : " << cur_pos_; LOG(INFO) << "input_buf_ is clean ? " << (input_buf_ == nullptr); - if (input_buf_ != nullptr) { + if (input_buf_) { LOG(INFO) << " input_buf " << input_buf_; } LOG(INFO) << "half_argv_ : " << half_argv_; diff --git a/src/net/src/server_socket.h b/src/net/src/server_socket.h index 3ab912c74f..9baef97081 100644 --- a/src/net/src/server_socket.h +++ b/src/net/src/server_socket.h @@ -12,9 +12,11 @@ #include #include +#include "pstd/include/noncopyable.h" + namespace net { -class ServerSocket { +class ServerSocket : public pstd::noncopyable { public: explicit ServerSocket(int port, bool is_block = false); @@ -49,13 +51,6 @@ class ServerSocket { void set_sockfd(int sockfd) { sockfd_ = sockfd; } - /* - * No allowed copy and copy assign operator - */ - - ServerSocket(const ServerSocket&) = delete; - void operator=(const ServerSocket&) = delete; - private: int SetNonBlock(); /* diff --git a/src/net/src/server_thread.cc b/src/net/src/server_thread.cc index dc5c2509ea..0a24662ba8 100644 --- a/src/net/src/server_thread.cc +++ b/src/net/src/server_thread.cc @@ -53,7 +53,7 @@ class DefaultServerHandle : public ServerHandle { }; static const ServerHandle* SanitizeHandle(const ServerHandle* raw_handle) { - if (raw_handle == nullptr) { + if (!raw_handle) { return new DefaultServerHandle(); } return raw_handle; @@ -106,9 +106,6 @@ ServerThread::~ServerThread() { EVP_cleanup(); } #endif - for (auto & server_socket : server_sockets_) { - delete server_socket; - } if (own_handle_) { delete handle_; } @@ -122,22 +119,24 @@ int ServerThread::SetTcpNoDelay(int connfd) { int ServerThread::StartThread() { int ret = 0; ret = InitHandle(); - if (ret != kSuccess) { return ret; -} + if (ret != kSuccess) { + return ret; + } return Thread::StartThread(); } int ServerThread::InitHandle() { int ret = 0; - ServerSocket* socket_p; + std::shared_ptr socket_p; if (ips_.find("0.0.0.0") != ips_.end()) { ips_.clear(); ips_.insert("0.0.0.0"); } - for (const auto & ip : ips_) { - socket_p = new ServerSocket(port_); - server_sockets_.push_back(socket_p); - ret = socket_p->Listen(ip); + + for (std::set::iterator iter = ips_.begin(); iter != ips_.end(); ++iter) { + socket_p = std::make_shared(port_); + server_sockets_.emplace_back(socket_p); + ret = socket_p->Listen(*iter); if (ret != kSuccess) { return ret; } @@ -257,9 +256,6 @@ void* ServerThread::ThreadMain() { } } - for (auto & server_socket : server_sockets_) { - delete server_socket; - } server_sockets_.clear(); server_fds_.clear(); @@ -314,7 +310,7 @@ int ServerThread::EnableSecurity(const std::string& cert_file, const std::string } if (SSL_CTX_use_PrivateKey_file(ssl_ctx_, key_file.c_str(), SSL_FILETYPE_PEM) != 1) { - LOG(WARNING) << "SSL_CTX_use_PrivateKey_file(" << key_file << ")"; + LOG(WARNING) << "SSL_CTX_use_PrivateKey_file(" << key_file << ")"; return -1; } diff --git a/src/net/src/simple_http_conn.cc b/src/net/src/simple_http_conn.cc index f405614209..c2e8b670e4 100644 --- a/src/net/src/simple_http_conn.cc +++ b/src/net/src/simple_http_conn.cc @@ -349,7 +349,7 @@ ReadStatus SimpleHTTPConn::GetRequest() { // So that strstr will not parse the expire char rbuf_[rbuf_pos_] = '\0'; char* sep_pos = strstr(rbuf_, "\r\n\r\n"); - if (sep_pos == nullptr) { + if (!sep_pos) { break; } header_len_ = sep_pos - rbuf_ + 4; @@ -364,8 +364,9 @@ ReadStatus SimpleHTTPConn::GetRequest() { response_->SetStatusCode(100); set_is_reply(true); conn_status_ = kPacket; - if (remain_packet_len_ > 0) { return kReadHalf; -} + if (remain_packet_len_ > 0) { + return kReadHalf; + } } conn_status_ = kPacket; } diff --git a/src/net/src/thread_pool.cc b/src/net/src/thread_pool.cc index 5366668f2d..4ea4b82125 100644 --- a/src/net/src/thread_pool.cc +++ b/src/net/src/thread_pool.cc @@ -13,7 +13,7 @@ namespace net { void* ThreadPool::Worker::WorkerMain(void* arg) { - auto* tp = static_cast(arg); + auto tp = static_cast(arg); tp->runInThread(); return nullptr; } diff --git a/src/net/src/worker_thread.cc b/src/net/src/worker_thread.cc index 43a3507af0..dff3295aff 100644 --- a/src/net/src/worker_thread.cc +++ b/src/net/src/worker_thread.cc @@ -105,7 +105,7 @@ void* WorkerThread::ThreadMain() { for (int i = 0; i < nfds; i++) { pfe = (net_multiplexer_->FiredEvents()) + i; - if (pfe == nullptr) { + if (!pfe) { continue; } if (pfe->fd == net_multiplexer_->NotifyReceiveFd()) { diff --git a/src/pika.cc b/src/pika.cc index bd3d9424c6..152b96b3a0 100644 --- a/src/pika.cc +++ b/src/pika.cc @@ -18,11 +18,12 @@ #include "pstd/include/env.h" -PikaConf* g_pika_conf; +std::unique_ptr g_pika_conf; +// todo : change to unique_ptr will coredump PikaServer* g_pika_server; -PikaReplicaManager* g_pika_rm; +std::unique_ptr g_pika_rm; -PikaCmdTableManager* g_pika_cmd_table_manager; +std::unique_ptr g_pika_cmd_table_manager; static void version() { char version[32]; @@ -37,7 +38,7 @@ static void version() { static void PikaConfInit(const std::string& path) { printf("path : %s\n", path.c_str()); - g_pika_conf = new PikaConf(path); + g_pika_conf = std::make_unique(path); if (g_pika_conf->Load() != 0) { LOG(FATAL) << "pika load conf error"; } @@ -63,8 +64,9 @@ static void PikaGlogInit() { } static void daemonize() { - if (fork() != 0) { exit(0); /* parent exits */ -} + if (fork() != 0) { + exit(0); /* parent exits */ + } setsid(); /* create a new session */ } @@ -184,9 +186,9 @@ int main(int argc, char* argv[]) { PikaSignalSetup(); LOG(INFO) << "Server at: " << path; - g_pika_cmd_table_manager = new PikaCmdTableManager(); + g_pika_cmd_table_manager = std::make_unique(); g_pika_server = new PikaServer(); - g_pika_rm = new PikaReplicaManager(); + g_pika_rm = std::make_unique(); if (g_pika_conf->daemonize()) { close_std(); @@ -204,10 +206,7 @@ int main(int argc, char* argv[]) { g_pika_rm->Stop(); delete g_pika_server; - delete g_pika_rm; - delete g_pika_cmd_table_manager; ::google::ShutdownGoogleLogging(); - delete g_pika_conf; return 0; } diff --git a/src/pika_admin.cc b/src/pika_admin.cc index 510477c911..0694443a61 100644 --- a/src/pika_admin.cc +++ b/src/pika_admin.cc @@ -22,7 +22,8 @@ using pstd::Status; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_conf; +extern std::unique_ptr g_pika_rm; static std::string ConstructPinginPubSubResp(const PikaCmdArgsType& argv) { if (argv.size() > 2) { @@ -921,7 +922,6 @@ void InfoCmd::InfoShardingReplication(std::string& info) { } void InfoCmd::InfoReplication(std::string& info) { - int host_role = g_pika_server->role(); std::stringstream tmp_stream; std::stringstream out_of_sync; @@ -1352,13 +1352,7 @@ void ConfigCmd::ConfigGet(std::string& ret) { EncodeInt32(&config_body, g_pika_conf->databases()); } - if (pstd::stringmatch(pattern.data(), "default-slot-num", 1) != 0) { - elements += 2; - EncodeString(&config_body, "default-slot-num"); - EncodeInt32(&config_body, g_pika_conf->default_slot_num()); - } - - if (pstd::stringmatch(pattern.data(), "daemonize", 1) != 0) { + if (pstd::stringmatch(pattern.data(), "daemonize", 1)) { elements += 2; EncodeString(&config_body, "daemonize"); EncodeString(&config_body, g_pika_conf->daemonize() ? "yes" : "no"); diff --git a/src/pika_auxiliary_thread.cc b/src/pika_auxiliary_thread.cc index f29dbe2688..3256f4bd74 100644 --- a/src/pika_auxiliary_thread.cc +++ b/src/pika_auxiliary_thread.cc @@ -10,7 +10,7 @@ #include "include/pika_server.h" extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; using namespace std::chrono_literals; diff --git a/src/pika_binlog.cc b/src/pika_binlog.cc index be1472c4ee..eaaf5c56e5 100644 --- a/src/pika_binlog.cc +++ b/src/pika_binlog.cc @@ -25,7 +25,7 @@ std::string NewFileName(const std::string& name, const uint32_t current) { /* * Version */ -Version::Version(pstd::RWFile* save) : save_(save) { +Version::Version(const std::shared_ptr& save) : save_(save) { assert(save_ != nullptr); } @@ -61,7 +61,6 @@ Status Version::Init() { */ Binlog::Binlog(std::string binlog_path, const int file_size) : opened_(false), - binlog_path_(std::move(binlog_path)), file_size_(file_size), binlog_io_error_(false) { @@ -81,24 +80,26 @@ Binlog::Binlog(std::string binlog_path, const int file_size) LOG(INFO) << "Binlog: Manifest file not exist, we create a new one."; profile = NewFileName(filename_, pro_num_); - s = pstd::NewWritableFile(profile, &queue_); + s = pstd::NewWritableFile(profile, queue_); if (!s.ok()) { LOG(FATAL) << "Binlog: new " << filename_ << " " << s.ToString(); } - - s = pstd::NewRWFile(manifest, &versionfile_); + std::unique_ptr tmp_file; + s = pstd::NewRWFile(manifest, tmp_file); + versionfile_.reset(tmp_file.release()); if (!s.ok()) { LOG(FATAL) << "Binlog: new versionfile error " << s.ToString(); } - version_ = new Version(versionfile_); + version_ = std::make_unique(versionfile_); version_->StableSave(); } else { LOG(INFO) << "Binlog: Find the exist file."; - - s = pstd::NewRWFile(manifest, &versionfile_); + std::unique_ptr tmp_file; + s = pstd::NewRWFile(manifest, tmp_file); + versionfile_.reset(tmp_file.release()); if (s.ok()) { - version_ = new Version(versionfile_); + version_ = std::make_unique(versionfile_); version_->Init(); pro_num_ = version_->pro_num_; @@ -110,7 +111,7 @@ Binlog::Binlog(std::string binlog_path, const int file_size) profile = NewFileName(filename_, pro_num_); DLOG(INFO) << "Binlog: open profile " << profile; - s = pstd::AppendWritableFile(profile, &queue_, version_->pro_offset_); + s = pstd::AppendWritableFile(profile, queue_, version_->pro_offset_); if (!s.ok()) { LOG(FATAL) << "Binlog: Open file " << profile << " error " << s.ToString(); } @@ -125,10 +126,6 @@ Binlog::Binlog(std::string binlog_path, const int file_size) Binlog::~Binlog() { std::lock_guard l(mutex_); Close(); - delete version_; - delete versionfile_; - - delete queue_; } void Binlog::Close() { @@ -185,15 +182,15 @@ Status Binlog::Put(const char* item, int len) { /* Check to roll log file */ uint64_t filesize = queue_->Filesize(); if (filesize > file_size_) { - pstd::WritableFile* queue = nullptr; + std::unique_ptr queue; std::string profile = NewFileName(filename_, pro_num_ + 1); - s = pstd::NewWritableFile(profile, &queue); + s = pstd::NewWritableFile(profile, queue); if (!s.ok()) { LOG(ERROR) << "Binlog: new " << filename_ << " " << s.ToString(); return s; } - delete queue_; - queue_ = queue; + queue_.reset(); + queue_ = std::move(queue); pro_num_++; { @@ -352,7 +349,7 @@ Status Binlog::SetProducerStatus(uint32_t pro_num, uint64_t pro_offset, uint32_t pro_offset = 0; } - delete queue_; + queue_.reset(); std::string init_profile = NewFileName(filename_, 0); if (pstd::FileExists(init_profile)) { @@ -364,8 +361,8 @@ Status Binlog::SetProducerStatus(uint32_t pro_num, uint64_t pro_offset, uint32_t pstd::DeleteFile(profile); } - pstd::NewWritableFile(profile, &queue_); - Binlog::AppendPadding(queue_, &pro_offset); + pstd::NewWritableFile(profile, queue_); + Binlog::AppendPadding(queue_.get(), &pro_offset); pro_num_ = pro_num; @@ -383,7 +380,7 @@ Status Binlog::SetProducerStatus(uint32_t pro_num, uint64_t pro_offset, uint32_t } Status Binlog::Truncate(uint32_t pro_num, uint64_t pro_offset, uint64_t index) { - delete queue_; + queue_.reset(); std::string profile = NewFileName(filename_, pro_num); const int fd = open(profile.c_str(), O_RDWR | O_CLOEXEC, 0644); if (fd < 0) { @@ -403,7 +400,7 @@ Status Binlog::Truncate(uint32_t pro_num, uint64_t pro_offset, uint64_t index) { version_->StableSave(); } - Status s = pstd::AppendWritableFile(profile, &queue_, version_->pro_offset_); + Status s = pstd::AppendWritableFile(profile, queue_, version_->pro_offset_); if (!s.ok()) { return s; } diff --git a/src/pika_binlog_reader.cc b/src/pika_binlog_reader.cc index 33b32b9349..b825d8864d 100644 --- a/src/pika_binlog_reader.cc +++ b/src/pika_binlog_reader.cc @@ -12,21 +12,15 @@ using pstd::Status; PikaBinlogReader::PikaBinlogReader(uint32_t cur_filenum, uint64_t cur_offset) : cur_filenum_(cur_filenum), cur_offset_(cur_offset), - logger_(nullptr), - - backing_store_(new char[kBlockSize]) { + backing_store_(std::make_unique(kBlockSize)), + buffer_() { last_record_offset_ = cur_offset % kBlockSize; } -PikaBinlogReader::PikaBinlogReader() : logger_(nullptr), backing_store_(new char[kBlockSize]) { +PikaBinlogReader::PikaBinlogReader() : backing_store_(std::make_unique(kBlockSize)), buffer_() { last_record_offset_ = 0 % kBlockSize; } -PikaBinlogReader::~PikaBinlogReader() { - delete[] backing_store_; - delete queue_; -} - void PikaBinlogReader::GetReaderStatus(uint32_t* cur_filenum, uint64_t* cur_offset) { std::shared_lock l(rwlock_); *cur_filenum = cur_filenum_; @@ -47,13 +41,15 @@ int PikaBinlogReader::Seek(const std::shared_ptr& logger, uint32_t filen LOG(WARNING) << confile << " not exits"; return -1; } - pstd::SequentialFile* readfile; - if (!pstd::NewSequentialFile(confile, &readfile).ok()) { + std::unique_ptr readfile; + if (!pstd::NewSequentialFile(confile, readfile).ok()) { LOG(WARNING) << "New swquential " << confile << " failed"; return -1; } - delete queue_; - queue_ = readfile; + if (queue_) { + queue_.reset(); + } + queue_ = std::move(readfile); logger_ = logger; std::lock_guard l(rwlock_); @@ -92,7 +88,7 @@ bool PikaBinlogReader::GetNext(uint64_t* size) { while (true) { buffer_.clear(); - s = queue_->Read(kHeaderSize, &buffer_, backing_store_); + s = queue_->Read(kHeaderSize, &buffer_, backing_store_.get()); if (!s.ok()) { is_error = true; return is_error; @@ -110,21 +106,21 @@ bool PikaBinlogReader::GetNext(uint64_t* size) { } if (type == kFullType) { - s = queue_->Read(length, &buffer_, backing_store_); + s = queue_->Read(length, &buffer_, backing_store_.get()); offset += kHeaderSize + length; break; } else if (type == kFirstType) { - s = queue_->Read(length, &buffer_, backing_store_); + s = queue_->Read(length, &buffer_, backing_store_.get()); offset += kHeaderSize + length; } else if (type == kMiddleType) { - s = queue_->Read(length, &buffer_, backing_store_); + s = queue_->Read(length, &buffer_, backing_store_.get()); offset += kHeaderSize + length; } else if (type == kLastType) { - s = queue_->Read(length, &buffer_, backing_store_); + s = queue_->Read(length, &buffer_, backing_store_.get()); offset += kHeaderSize + length; break; } else if (type == kBadRecord) { - s = queue_->Read(length, &buffer_, backing_store_); + s = queue_->Read(length, &buffer_, backing_store_.get()); offset += kHeaderSize + length; break; } else { @@ -145,7 +141,7 @@ unsigned int PikaBinlogReader::ReadPhysicalRecord(pstd::Slice* result, uint32_t* last_record_offset_ = 0; } buffer_.clear(); - s = queue_->Read(kHeaderSize, &buffer_, backing_store_); + s = queue_->Read(kHeaderSize, &buffer_, backing_store_.get()); if (s.IsEndFile()) { return kEof; } else if (!s.ok()) { @@ -159,8 +155,9 @@ unsigned int PikaBinlogReader::ReadPhysicalRecord(pstd::Slice* result, uint32_t* const unsigned int type = header[7]; const uint32_t length = a | (b << 8) | (c << 16); - if (length > (kBlockSize - kHeaderSize)) { return kBadRecord; -} + if (length > (kBlockSize - kHeaderSize)) { + return kBadRecord; + } if (type == kZeroType || length == 0) { buffer_.clear(); @@ -168,7 +165,7 @@ unsigned int PikaBinlogReader::ReadPhysicalRecord(pstd::Slice* result, uint32_t* } buffer_.clear(); - s = queue_->Read(length, &buffer_, backing_store_); + s = queue_->Read(length, &buffer_, backing_store_.get()); *result = pstd::Slice(buffer_.data(), buffer_.size()); last_record_offset_ += kHeaderSize + length; if (s.ok()) { @@ -227,7 +224,7 @@ Status PikaBinlogReader::Consume(std::string* scratch, uint32_t* filenum, uint64 // Append to scratch; // the status will be OK, IOError or Corruption, EndFile; Status PikaBinlogReader::Get(std::string* scratch, uint32_t* filenum, uint64_t* offset) { - if (logger_ == nullptr || queue_ == nullptr) { + if (!logger_ || !queue_) { return Status::Corruption("Not seek"); } scratch->clear(); @@ -247,10 +244,10 @@ Status PikaBinlogReader::Get(std::string* scratch, uint32_t* filenum, uint64_t* // Roll to next file need retry; if (pstd::FileExists(confile)) { DLOG(INFO) << "BinlogSender roll to new binlog" << confile; - delete queue_; + queue_.reset(); queue_ = nullptr; - pstd::NewSequentialFile(confile, &(queue_)); + pstd::NewSequentialFile(confile, queue_); { std::lock_guard l(rwlock_); cur_filenum_++; diff --git a/src/pika_client_conn.cc b/src/pika_client_conn.cc index 4464e14e3a..fc7e4a090e 100644 --- a/src/pika_client_conn.cc +++ b/src/pika_client_conn.cc @@ -17,17 +17,16 @@ #include "include/pika_rm.h" #include "include/pika_server.h" - +extern std::unique_ptr g_pika_conf; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; -extern PikaCmdTableManager* g_pika_cmd_table_manager; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; PikaClientConn::PikaClientConn(int fd, const std::string& ip_port, net::Thread* thread, net::NetMultiplexer* mpx, const net::HandleType& handle_type, int max_conn_rbuf_size) : RedisConn(fd, ip_port, thread, mpx, handle_type, max_conn_rbuf_size), server_thread_(reinterpret_cast(thread)), - current_table_(g_pika_conf->default_table()) - { + current_table_(g_pika_conf->default_table()) { auth_stat_.Init(); } @@ -172,7 +171,7 @@ void PikaClientConn::ProcessMonitor(const PikaCmdArgsType& argv) { void PikaClientConn::ProcessRedisCmds(const std::vector& argvs, bool async, std::string* response) { if (async) { - auto* arg = new BgTaskArg(); + auto arg = new BgTaskArg(); arg->redis_cmds = argvs; arg->conn_ptr = std::dynamic_pointer_cast(shared_from_this()); g_pika_server->ScheduleClientPool(&DoBackgroundTask, arg); @@ -182,34 +181,31 @@ void PikaClientConn::ProcessRedisCmds(const std::vector& } void PikaClientConn::DoBackgroundTask(void* arg) { - auto* bg_arg = reinterpret_cast(arg); + std::unique_ptr bg_arg(static_cast(arg)); std::shared_ptr conn_ptr = bg_arg->conn_ptr; if (bg_arg->redis_cmds.empty()) { - delete bg_arg; conn_ptr->NotifyEpoll(false); return; } for (const auto& argv : bg_arg->redis_cmds) { if (argv.empty()) { - delete bg_arg; conn_ptr->NotifyEpoll(false); return; } } conn_ptr->BatchExecRedisCmd(bg_arg->redis_cmds); - delete bg_arg; } void PikaClientConn::DoExecTask(void* arg) { - auto* bg_arg = reinterpret_cast(arg); + std::unique_ptr bg_arg(static_cast(arg)); std::shared_ptr cmd_ptr = bg_arg->cmd_ptr; std::shared_ptr conn_ptr = bg_arg->conn_ptr; std::shared_ptr resp_ptr = bg_arg->resp_ptr; LogOffset offset = bg_arg->offset; std::string table_name = bg_arg->table_name; uint32_t partition_id = bg_arg->partition_id; - delete bg_arg; + bg_arg.reset(); uint64_t start_us = 0; if (g_pika_conf->slowlog_slower_than() >= 0) { @@ -223,13 +219,13 @@ void PikaClientConn::DoExecTask(void* arg) { std::shared_ptr partition = g_pika_rm->GetSyncMasterPartitionByName(PartitionInfo(table_name, partition_id)); - if (partition == nullptr) { + if (!partition) { LOG(WARNING) << "Sync Master Partition not exist " << table_name << partition_id; return; } partition->ConsensusUpdateAppliedIndex(offset); - if (conn_ptr == nullptr || resp_ptr == nullptr) { + if (!conn_ptr || !resp_ptr) { return; } diff --git a/src/pika_client_processor.cc b/src/pika_client_processor.cc index 2c450f77ce..12e608207e 100644 --- a/src/pika_client_processor.cc +++ b/src/pika_client_processor.cc @@ -8,19 +8,14 @@ #include PikaClientProcessor::PikaClientProcessor(size_t worker_num, size_t max_queue_size, const std::string& name_prefix) { - pool_ = new net::ThreadPool(worker_num, max_queue_size, name_prefix + "Pool"); + pool_ = std::make_unique(worker_num, max_queue_size, name_prefix + "Pool"); for (size_t i = 0; i < worker_num; ++i) { - auto* bg_thread = new net::BGThread(max_queue_size); - bg_threads_.push_back(bg_thread); - bg_thread->set_thread_name(name_prefix + "BgThread"); + bg_threads_.push_back(std::make_unique(max_queue_size)); + bg_threads_.back()->set_thread_name(name_prefix + "BgThread"); } } PikaClientProcessor::~PikaClientProcessor() { - delete pool_; - for (auto & bg_thread : bg_threads_) { - delete bg_thread; - } LOG(INFO) << "PikaClientProcessor exit!!!"; } diff --git a/src/pika_cmd_table_manager.cc b/src/pika_cmd_table_manager.cc index 61d972fd57..2fd457c898 100644 --- a/src/pika_cmd_table_manager.cc +++ b/src/pika_cmd_table_manager.cc @@ -11,20 +11,12 @@ #include "include/pika_conf.h" #include "pstd/include/pstd_mutex.h" -extern PikaConf* g_pika_conf; +extern std::unique_ptr g_pika_conf; PikaCmdTableManager::PikaCmdTableManager() { - cmds_ = new CmdTable(); + cmds_ = std::make_unique(); cmds_->reserve(300); - InitCmdTable(cmds_); -} - -PikaCmdTableManager::~PikaCmdTableManager() { - for (const auto& item : thread_distribution_map_) { - delete item.second; - } - DestoryCmdTable(cmds_); - delete cmds_; + InitCmdTable(cmds_.get()); } std::shared_ptr PikaCmdTableManager::GetCmd(const std::string& opt) { @@ -47,21 +39,18 @@ bool PikaCmdTableManager::CheckCurrentThreadDistributionMapExist(const std::thre void PikaCmdTableManager::InsertCurrentThreadDistributionMap() { auto tid = std::this_thread::get_id(); - PikaDataDistribution* distribution = nullptr; - distribution = new HashModulo(); + std::unique_ptr distribution = std::make_unique(); distribution->Init(); std::lock_guard l(map_protector_); - thread_distribution_map_.emplace(tid, distribution); + thread_distribution_map_.emplace(tid, std::move(distribution)); } uint32_t PikaCmdTableManager::DistributeKey(const std::string& key, uint32_t partition_num) { auto tid = std::this_thread::get_id(); - PikaDataDistribution* data_dist = nullptr; if (!CheckCurrentThreadDistributionMapExist(tid)) { InsertCurrentThreadDistributionMap(); } std::shared_lock l(map_protector_); - data_dist = thread_distribution_map_[tid]; - return data_dist->Distribute(key, partition_num); + return thread_distribution_map_[tid]->Distribute(key, partition_num); } diff --git a/src/pika_command.cc b/src/pika_command.cc index 993e846c29..cae2805c46 100644 --- a/src/pika_command.cc +++ b/src/pika_command.cc @@ -24,483 +24,455 @@ using pstd::Status; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; -extern PikaCmdTableManager* g_pika_cmd_table_manager; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; -void InitCmdTable(std::unordered_map* cmd_table) { +void InitCmdTable(CmdTable* cmd_table) { // Admin ////Slaveof - Cmd* slaveofptr = new SlaveofCmd(kCmdNameSlaveof, -3, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlaveof, slaveofptr)); - Cmd* dbslaveofptr = new DbSlaveofCmd(kCmdNameDbSlaveof, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameDbSlaveof, dbslaveofptr)); - Cmd* authptr = new AuthCmd(kCmdNameAuth, 2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameAuth, authptr)); - Cmd* bgsaveptr = new BgsaveCmd(kCmdNameBgsave, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSuspend); - cmd_table->insert(std::pair(kCmdNameBgsave, bgsaveptr)); - Cmd* compactptr = new CompactCmd(kCmdNameCompact, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameCompact, compactptr)); - Cmd* purgelogsto = new PurgelogstoCmd(kCmdNamePurgelogsto, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePurgelogsto, purgelogsto)); - Cmd* pingptr = new PingCmd(kCmdNamePing, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePing, pingptr)); - Cmd* helloptr = new HelloCmd(kCmdNameHello, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameHello, helloptr)); - Cmd* selectptr = new SelectCmd(kCmdNameSelect, 2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSelect, selectptr)); - Cmd* flushallptr = new FlushallCmd(kCmdNameFlushall, 1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameFlushall, flushallptr)); - Cmd* flushdbptr = new FlushdbCmd(kCmdNameFlushdb, -1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameFlushdb, flushdbptr)); - Cmd* clientptr = new ClientCmd(kCmdNameClient, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameClient, clientptr)); - Cmd* shutdownptr = new ShutdownCmd(kCmdNameShutdown, 1, kCmdFlagsRead | kCmdFlagsLocal | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameShutdown, shutdownptr)); - Cmd* infoptr = new InfoCmd(kCmdNameInfo, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameInfo, infoptr)); - Cmd* configptr = new ConfigCmd(kCmdNameConfig, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameConfig, configptr)); - Cmd* monitorptr = new MonitorCmd(kCmdNameMonitor, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameMonitor, monitorptr)); - Cmd* dbsizeptr = new DbsizeCmd(kCmdNameDbsize, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameDbsize, dbsizeptr)); - Cmd* timeptr = new TimeCmd(kCmdNameTime, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameTime, timeptr)); - Cmd* delbackupptr = new DelbackupCmd(kCmdNameDelbackup, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameDelbackup, delbackupptr)); - Cmd* echoptr = new EchoCmd(kCmdNameEcho, 2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameEcho, echoptr)); - Cmd* scandbptr = new ScandbCmd(kCmdNameScandb, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameScandb, scandbptr)); - Cmd* slowlogptr = new SlowlogCmd(kCmdNameSlowlog, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlowlog, slowlogptr)); - Cmd* paddingptr = new PaddingCmd(kCmdNamePadding, 2, kCmdFlagsWrite | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePadding, paddingptr)); - Cmd* pkpatternmatchdelptr = new PKPatternMatchDelCmd(kCmdNamePKPatternMatchDel, 3, kCmdFlagsWrite | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePKPatternMatchDel, pkpatternmatchdelptr)); - Cmd* dummyptr = new DummyCmd(kCmdDummy, 0, kCmdFlagsWrite | kCmdFlagsSinglePartition); - cmd_table->insert(std::pair(kCmdDummy, dummyptr)); - Cmd* quitptr = new QuitCmd(kCmdNameQuit, 1, kCmdFlagsRead); - cmd_table->insert(std::pair(kCmdNameQuit, quitptr)); + std::unique_ptr slaveofptr = std::make_unique(kCmdNameSlaveof, -3, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameSlaveof, std::move(slaveofptr))); + std::unique_ptr dbslaveofptr = std::make_unique(kCmdNameDbSlaveof, -2, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameDbSlaveof, std::move(dbslaveofptr))); + std::unique_ptr authptr = std::make_unique(kCmdNameAuth, 2, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameAuth, std::move(authptr))); + std::unique_ptr bgsaveptr = std::make_unique(kCmdNameBgsave, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSuspend); + cmd_table->insert(std::pair>(kCmdNameBgsave, std::move(bgsaveptr))); + std::unique_ptr compactptr = std::make_unique(kCmdNameCompact, -1, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameCompact, std::move(compactptr))); + std::unique_ptr purgelogsto = std::make_unique(kCmdNamePurgelogsto, -2, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNamePurgelogsto, std::move(purgelogsto))); + std::unique_ptr pingptr = std::make_unique(kCmdNamePing, 1, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNamePing, std::move(pingptr))); + std::unique_ptr helloptr = std::make_unique(kCmdNameHello, -1, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameHello, std::move(helloptr))); + std::unique_ptr selectptr = std::make_unique(kCmdNameSelect, 2, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameSelect, std::move(selectptr))); + std::unique_ptr flushallptr = std::make_unique(kCmdNameFlushall, 1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameFlushall, std::move(flushallptr))); + std::unique_ptr flushdbptr = std::make_unique(kCmdNameFlushdb, -1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameFlushdb, std::move(flushdbptr))); + std::unique_ptr clientptr = std::make_unique(kCmdNameClient, -2, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameClient, std::move(clientptr))); + std::unique_ptr shutdownptr = std::make_unique(kCmdNameShutdown, 1, kCmdFlagsRead | kCmdFlagsLocal | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameShutdown, std::move(shutdownptr))); + std::unique_ptr infoptr = std::make_unique(kCmdNameInfo, -1, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameInfo, std::move(infoptr))); + std::unique_ptr configptr = std::make_unique(kCmdNameConfig, -2, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameConfig, std::move(configptr))); + std::unique_ptr monitorptr = std::make_unique(kCmdNameMonitor, -1, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameMonitor, std::move(monitorptr))); + std::unique_ptr dbsizeptr = std::make_unique(kCmdNameDbsize, 1, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameDbsize, std::move(dbsizeptr))); + std::unique_ptr timeptr = std::make_unique(kCmdNameTime, 1, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameTime, std::move(timeptr))); + std::unique_ptr delbackupptr = std::make_unique(kCmdNameDelbackup, 1, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameDelbackup, std::move(delbackupptr))); + std::unique_ptr echoptr = std::make_unique(kCmdNameEcho, 2, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameEcho, std::move(echoptr))); + std::unique_ptr scandbptr = std::make_unique(kCmdNameScandb, -1, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameScandb, std::move(scandbptr))); + std::unique_ptr slowlogptr = std::make_unique(kCmdNameSlowlog, -2, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameSlowlog, std::move(slowlogptr))); + std::unique_ptr paddingptr = std::make_unique(kCmdNamePadding, 2, kCmdFlagsWrite | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNamePadding, std::move(paddingptr))); + std::unique_ptr pkpatternmatchdelptr = std::make_unique(kCmdNamePKPatternMatchDel, 3, kCmdFlagsWrite | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNamePKPatternMatchDel, std::move(pkpatternmatchdelptr))); + std::unique_ptr dummyptr = std::make_unique(kCmdDummy, 0, kCmdFlagsWrite | kCmdFlagsSinglePartition); + cmd_table->insert(std::pair>(kCmdDummy, std::move(dummyptr))); + std::unique_ptr quitptr = std::make_unique(kCmdNameQuit, 1, kCmdFlagsRead); + cmd_table->insert(std::pair>(kCmdNameQuit, std::move(quitptr))); // Kv ////SetCmd - Cmd* setptr = new SetCmd(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameSet, setptr)); + std::unique_ptr setptr = std::make_unique(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameSet, std::move(setptr))); ////GetCmd - Cmd* getptr = new GetCmd(kCmdNameGet, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameGet, getptr)); + std::unique_ptr getptr = std::make_unique(kCmdNameGet, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameGet, std::move(getptr))); ////DelCmd - Cmd* delptr = new DelCmd(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameDel, delptr)); - Cmd* Unlinkptr = new DelCmd(kCmdNameUnlink, -2, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameUnlink, Unlinkptr)); + std::unique_ptr delptr = std::make_unique(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameDel, std::move(delptr))); + std::unique_ptr Unlinkptr = std::make_unique(kCmdNameUnlink, -2, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameUnlink, std::move(Unlinkptr))); ////IncrCmd - Cmd* incrptr = new IncrCmd(kCmdNameIncr, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameIncr, incrptr)); + std::unique_ptr incrptr = std::make_unique(kCmdNameIncr, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameIncr, std::move(incrptr))); ////IncrbyCmd - Cmd* incrbyptr = new IncrbyCmd(kCmdNameIncrby, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameIncrby, incrbyptr)); + std::unique_ptr incrbyptr = std::make_unique(kCmdNameIncrby, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameIncrby, std::move(incrbyptr))); ////IncrbyfloatCmd - Cmd* incrbyfloatptr = - new IncrbyfloatCmd(kCmdNameIncrbyfloat, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameIncrbyfloat, incrbyfloatptr)); + std::unique_ptr incrbyfloatptr = std::make_unique(kCmdNameIncrbyfloat, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameIncrbyfloat, std::move(incrbyfloatptr))); ////DecrCmd - Cmd* decrptr = new DecrCmd(kCmdNameDecr, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameDecr, decrptr)); + std::unique_ptr decrptr = std::make_unique(kCmdNameDecr, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameDecr, std::move(decrptr))); ////DecrbyCmd - Cmd* decrbyptr = new DecrbyCmd(kCmdNameDecrby, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameDecrby, decrbyptr)); + std::unique_ptr decrbyptr = std::make_unique(kCmdNameDecrby, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameDecrby, std::move(decrbyptr))); ////GetsetCmd - Cmd* getsetptr = new GetsetCmd(kCmdNameGetset, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameGetset, getsetptr)); + std::unique_ptr getsetptr = std::make_unique(kCmdNameGetset, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameGetset, std::move(getsetptr))); ////AppendCmd - Cmd* appendptr = new AppendCmd(kCmdNameAppend, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameAppend, appendptr)); + std::unique_ptr appendptr = std::make_unique(kCmdNameAppend, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameAppend, std::move(appendptr))); ////MgetCmd - Cmd* mgetptr = new MgetCmd(kCmdNameMget, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameMget, mgetptr)); + std::unique_ptr mgetptr = std::make_unique(kCmdNameMget, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameMget, std::move(mgetptr))); ////KeysCmd - Cmd* keysptr = new KeysCmd(kCmdNameKeys, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameKeys, keysptr)); + std::unique_ptr keysptr = std::make_unique(kCmdNameKeys, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameKeys, std::move(keysptr))); ////SetnxCmd - Cmd* setnxptr = new SetnxCmd(kCmdNameSetnx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameSetnx, setnxptr)); + std::unique_ptr setnxptr = std::make_unique(kCmdNameSetnx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameSetnx, std::move(setnxptr))); ////SetexCmd - Cmd* setexptr = new SetexCmd(kCmdNameSetex, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameSetex, setexptr)); + std::unique_ptr setexptr = std::make_unique(kCmdNameSetex, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameSetex, std::move(setexptr))); ////PsetexCmd - Cmd* psetexptr = new PsetexCmd(kCmdNamePsetex, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePsetex, psetexptr)); + std::unique_ptr psetexptr = std::make_unique(kCmdNamePsetex, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNamePsetex, std::move(psetexptr))); ////DelvxCmd - Cmd* delvxptr = new DelvxCmd(kCmdNameDelvx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameDelvx, delvxptr)); + std::unique_ptr delvxptr = std::make_unique(kCmdNameDelvx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameDelvx, std::move(delvxptr))); ////MSetCmd - Cmd* msetptr = new MsetCmd(kCmdNameMset, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameMset, msetptr)); + std::unique_ptr msetptr = std::make_unique(kCmdNameMset, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameMset, std::move(msetptr))); ////MSetnxCmd - Cmd* msetnxptr = new MsetnxCmd(kCmdNameMsetnx, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameMsetnx, msetnxptr)); + std::unique_ptr msetnxptr = std::make_unique(kCmdNameMsetnx, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameMsetnx, std::move(msetnxptr))); ////GetrangeCmd - Cmd* getrangeptr = new GetrangeCmd(kCmdNameGetrange, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameGetrange, getrangeptr)); + std::unique_ptr getrangeptr = std::make_unique(kCmdNameGetrange, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameGetrange, std::move(getrangeptr))); ////SetrangeCmd - Cmd* setrangeptr = new SetrangeCmd(kCmdNameSetrange, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameSetrange, setrangeptr)); + std::unique_ptr setrangeptr = std::make_unique(kCmdNameSetrange, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameSetrange, std::move(setrangeptr))); ////StrlenCmd - Cmd* strlenptr = new StrlenCmd(kCmdNameStrlen, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameStrlen, strlenptr)); + std::unique_ptr strlenptr = std::make_unique(kCmdNameStrlen, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameStrlen, std::move(strlenptr))); ////ExistsCmd - Cmd* existsptr = new ExistsCmd(kCmdNameExists, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameExists, existsptr)); + std::unique_ptr existsptr = std::make_unique(kCmdNameExists, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameExists, std::move(existsptr))); ////ExpireCmd - Cmd* expireptr = new ExpireCmd(kCmdNameExpire, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameExpire, expireptr)); + std::unique_ptr expireptr = std::make_unique(kCmdNameExpire, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameExpire, std::move(expireptr))); ////PexpireCmd - Cmd* pexpireptr = new PexpireCmd(kCmdNamePexpire, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePexpire, pexpireptr)); + std::unique_ptr pexpireptr = std::make_unique(kCmdNamePexpire, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNamePexpire, std::move(pexpireptr))); ////ExpireatCmd - Cmd* expireatptr = new ExpireatCmd(kCmdNameExpireat, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameExpireat, expireatptr)); + std::unique_ptr expireatptr = std::make_unique(kCmdNameExpireat, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameExpireat, std::move(expireatptr))); ////PexpireatCmd - Cmd* pexpireatptr = new PexpireatCmd(kCmdNamePexpireat, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePexpireat, pexpireatptr)); + std::unique_ptr pexpireatptr = std::make_unique(kCmdNamePexpireat, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNamePexpireat, std::move(pexpireatptr))); ////TtlCmd - Cmd* ttlptr = new TtlCmd(kCmdNameTtl, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameTtl, ttlptr)); + std::unique_ptr ttlptr = std::make_unique(kCmdNameTtl, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameTtl, std::move(ttlptr))); ////PttlCmd - Cmd* pttlptr = new PttlCmd(kCmdNamePttl, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePttl, pttlptr)); + std::unique_ptr pttlptr = std::make_unique(kCmdNamePttl, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNamePttl, std::move(pttlptr))); ////PersistCmd - Cmd* persistptr = new PersistCmd(kCmdNamePersist, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePersist, persistptr)); + std::unique_ptr persistptr = std::make_unique(kCmdNamePersist, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNamePersist, std::move(persistptr))); ////TypeCmd - Cmd* typeptr = new TypeCmd(kCmdNameType, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameType, typeptr)); + std::unique_ptr typeptr = std::make_unique(kCmdNameType, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameType, std::move(typeptr))); ////ScanCmd - Cmd* scanptr = new ScanCmd(kCmdNameScan, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameScan, scanptr)); + std::unique_ptr scanptr = std::make_unique(kCmdNameScan, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameScan, std::move(scanptr))); ////ScanxCmd - Cmd* scanxptr = new ScanxCmd(kCmdNameScanx, -3, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameScanx, scanxptr)); + std::unique_ptr scanxptr = std::make_unique(kCmdNameScanx, -3, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNameScanx, std::move(scanxptr))); ////PKSetexAtCmd - Cmd* pksetexatptr = new PKSetexAtCmd(kCmdNamePKSetexAt, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePKSetexAt, pksetexatptr)); + std::unique_ptr pksetexatptr = std::make_unique(kCmdNamePKSetexAt, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNamePKSetexAt, std::move(pksetexatptr))); ////PKScanRange - Cmd* pkscanrangeptr = - new PKScanRangeCmd(kCmdNamePKScanRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePKScanRange, pkscanrangeptr)); + std::unique_ptr pkscanrangeptr = std::make_unique(kCmdNamePKScanRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNamePKScanRange, std::move(pkscanrangeptr))); ////PKRScanRange - Cmd* pkrscanrangeptr = - new PKRScanRangeCmd(kCmdNamePKRScanRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePKRScanRange, pkrscanrangeptr)); + std::unique_ptr pkrscanrangeptr = std::make_unique(kCmdNamePKRScanRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); + cmd_table->insert(std::pair>(kCmdNamePKRScanRange, std::move(pkrscanrangeptr))); // Hash ////HDelCmd - Cmd* hdelptr = new HDelCmd(kCmdNameHDel, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHDel, hdelptr)); + std::unique_ptr hdelptr = std::make_unique(kCmdNameHDel, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHDel, std::move(hdelptr))); ////HSetCmd - Cmd* hsetptr = new HSetCmd(kCmdNameHSet, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHSet, hsetptr)); + std::unique_ptr hsetptr = std::make_unique(kCmdNameHSet, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHSet, std::move(hsetptr))); ////HGetCmd - Cmd* hgetptr = new HGetCmd(kCmdNameHGet, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHGet, hgetptr)); + std::unique_ptr hgetptr = std::make_unique(kCmdNameHGet, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHGet, std::move(hgetptr))); ////HGetallCmd - Cmd* hgetallptr = new HGetallCmd(kCmdNameHGetall, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHGetall, hgetallptr)); + std::unique_ptr hgetallptr = std::make_unique(kCmdNameHGetall, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHGetall, std::move(hgetallptr))); ////HExistsCmd - Cmd* hexistsptr = new HExistsCmd(kCmdNameHExists, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHExists, hexistsptr)); + std::unique_ptr hexistsptr = std::make_unique(kCmdNameHExists, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHExists, std::move(hexistsptr))); ////HIncrbyCmd - Cmd* hincrbyptr = new HIncrbyCmd(kCmdNameHIncrby, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHIncrby, hincrbyptr)); + std::unique_ptr hincrbyptr = std::make_unique(kCmdNameHIncrby, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHIncrby, std::move(hincrbyptr))); ////HIncrbyfloatCmd - Cmd* hincrbyfloatptr = - new HIncrbyfloatCmd(kCmdNameHIncrbyfloat, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHIncrbyfloat, hincrbyfloatptr)); + std::unique_ptr hincrbyfloatptr = std::make_unique(kCmdNameHIncrbyfloat, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHIncrbyfloat, std::move(hincrbyfloatptr))); ////HKeysCmd - Cmd* hkeysptr = new HKeysCmd(kCmdNameHKeys, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHKeys, hkeysptr)); + std::unique_ptr hkeysptr = std::make_unique(kCmdNameHKeys, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHKeys, std::move(hkeysptr))); ////HLenCmd - Cmd* hlenptr = new HLenCmd(kCmdNameHLen, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHLen, hlenptr)); + std::unique_ptr hlenptr = std::make_unique(kCmdNameHLen, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHLen, std::move(hlenptr))); ////HMgetCmd - Cmd* hmgetptr = new HMgetCmd(kCmdNameHMget, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHMget, hmgetptr)); + std::unique_ptr hmgetptr = std::make_unique(kCmdNameHMget, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHMget, std::move(hmgetptr))); ////HMsetCmd - Cmd* hmsetptr = new HMsetCmd(kCmdNameHMset, -4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHMset, hmsetptr)); + std::unique_ptr hmsetptr = std::make_unique(kCmdNameHMset, -4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHMset, std::move(hmsetptr))); ////HSetnxCmd - Cmd* hsetnxptr = new HSetnxCmd(kCmdNameHSetnx, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHSetnx, hsetnxptr)); + std::unique_ptr hsetnxptr = std::make_unique(kCmdNameHSetnx, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHSetnx, std::move(hsetnxptr))); ////HStrlenCmd - Cmd* hstrlenptr = new HStrlenCmd(kCmdNameHStrlen, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHStrlen, hstrlenptr)); + std::unique_ptr hstrlenptr = std::make_unique(kCmdNameHStrlen, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHStrlen, std::move(hstrlenptr))); ////HValsCmd - Cmd* hvalsptr = new HValsCmd(kCmdNameHVals, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHVals, hvalsptr)); + std::unique_ptr hvalsptr = std::make_unique(kCmdNameHVals, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHVals, std::move(hvalsptr))); ////HScanCmd - Cmd* hscanptr = new HScanCmd(kCmdNameHScan, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHScan, hscanptr)); + std::unique_ptr hscanptr = std::make_unique(kCmdNameHScan, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHScan, std::move(hscanptr))); ////HScanxCmd - Cmd* hscanxptr = new HScanxCmd(kCmdNameHScanx, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHScanx, hscanxptr)); + std::unique_ptr hscanxptr = std::make_unique(kCmdNameHScanx, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNameHScanx, std::move(hscanxptr))); ////PKHScanRange - Cmd* pkhscanrangeptr = - new PKHScanRangeCmd(kCmdNamePKHScanRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNamePKHScanRange, pkhscanrangeptr)); + std::unique_ptr pkhscanrangeptr = std::make_unique(kCmdNamePKHScanRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNamePKHScanRange, std::move(pkhscanrangeptr))); ////PKHRScanRange - Cmd* pkhrscanrangeptr = - new PKHRScanRangeCmd(kCmdNamePKHRScanRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNamePKHRScanRange, pkhrscanrangeptr)); + std::unique_ptr pkhrscanrangeptr = std::make_unique(kCmdNamePKHRScanRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); + cmd_table->insert(std::pair>(kCmdNamePKHRScanRange, std::move(pkhrscanrangeptr))); // List - Cmd* lindexptr = new LIndexCmd(kCmdNameLIndex, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLIndex, lindexptr)); - Cmd* linsertptr = new LInsertCmd(kCmdNameLInsert, 5, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLInsert, linsertptr)); - Cmd* llenptr = new LLenCmd(kCmdNameLLen, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLLen, llenptr)); - Cmd* lpopptr = new LPopCmd(kCmdNameLPop, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLPop, lpopptr)); - Cmd* lpushptr = new LPushCmd(kCmdNameLPush, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLPush, lpushptr)); - Cmd* lpushxptr = new LPushxCmd(kCmdNameLPushx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLPushx, lpushxptr)); - Cmd* lrangeptr = new LRangeCmd(kCmdNameLRange, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLRange, lrangeptr)); - Cmd* lremptr = new LRemCmd(kCmdNameLRem, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLRem, lremptr)); - Cmd* lsetptr = new LSetCmd(kCmdNameLSet, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLSet, lsetptr)); - Cmd* ltrimptr = new LTrimCmd(kCmdNameLTrim, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLTrim, ltrimptr)); - Cmd* rpopptr = new RPopCmd(kCmdNameRPop, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameRPop, rpopptr)); - Cmd* rpoplpushptr = new RPopLPushCmd(kCmdNameRPopLPush, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameRPopLPush, rpoplpushptr)); - Cmd* rpushptr = new RPushCmd(kCmdNameRPush, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameRPush, rpushptr)); - Cmd* rpushxptr = new RPushxCmd(kCmdNameRPushx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameRPushx, rpushxptr)); + std::unique_ptr lindexptr = std::make_unique(kCmdNameLIndex, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameLIndex, std::move(lindexptr))); + std::unique_ptr linsertptr = std::make_unique(kCmdNameLInsert, 5, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameLInsert, std::move(linsertptr))); + std::unique_ptr llenptr = std::make_unique(kCmdNameLLen, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameLLen, std::move(llenptr))); + std::unique_ptr lpopptr = std::make_unique(kCmdNameLPop, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameLPop, std::move(lpopptr))); + std::unique_ptr lpushptr = std::make_unique(kCmdNameLPush, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameLPush, std::move(lpushptr))); + std::unique_ptr lpushxptr = std::make_unique(kCmdNameLPushx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameLPushx, std::move(lpushxptr))); + std::unique_ptr lrangeptr = std::make_unique(kCmdNameLRange, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameLRange, std::move(lrangeptr))); + std::unique_ptr lremptr = std::make_unique(kCmdNameLRem, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameLRem, std::move(lremptr))); + std::unique_ptr lsetptr = std::make_unique(kCmdNameLSet, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameLSet, std::move(lsetptr))); + std::unique_ptr ltrimptr = std::make_unique(kCmdNameLTrim, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameLTrim, std::move(ltrimptr))); + std::unique_ptr rpopptr = std::make_unique(kCmdNameRPop, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameRPop, std::move(rpopptr))); + std::unique_ptr rpoplpushptr = std::make_unique(kCmdNameRPopLPush, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameRPopLPush, std::move(rpoplpushptr))); + std::unique_ptr rpushptr = std::make_unique(kCmdNameRPush, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameRPush, std::move(rpushptr))); + std::unique_ptr rpushxptr = std::make_unique(kCmdNameRPushx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); + cmd_table->insert(std::pair>(kCmdNameRPushx, std::move(rpushxptr))); // Zset ////ZAddCmd - Cmd* zaddptr = new ZAddCmd(kCmdNameZAdd, -4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZAdd, zaddptr)); + std::unique_ptr zaddptr = std::make_unique(kCmdNameZAdd, -4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZAdd, std::move(zaddptr))); ////ZCardCmd - Cmd* zcardptr = new ZCardCmd(kCmdNameZCard, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZCard, zcardptr)); + std::unique_ptr zcardptr = std::make_unique(kCmdNameZCard, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZCard, std::move(zcardptr))); ////ZScanCmd - Cmd* zscanptr = new ZScanCmd(kCmdNameZScan, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZScan, zscanptr)); + std::unique_ptr zscanptr = std::make_unique(kCmdNameZScan, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZScan, std::move(zscanptr))); ////ZIncrbyCmd - Cmd* zincrbyptr = new ZIncrbyCmd(kCmdNameZIncrby, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZIncrby, zincrbyptr)); + std::unique_ptr zincrbyptr = std::make_unique(kCmdNameZIncrby, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZIncrby, std::move(zincrbyptr))); ////ZRangeCmd - Cmd* zrangeptr = new ZRangeCmd(kCmdNameZRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRange, zrangeptr)); + std::unique_ptr zrangeptr = std::make_unique(kCmdNameZRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZRange, std::move(zrangeptr))); ////ZRevrangeCmd - Cmd* zrevrangeptr = new ZRevrangeCmd(kCmdNameZRevrange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRevrange, zrevrangeptr)); + std::unique_ptr zrevrangeptr = std::make_unique(kCmdNameZRevrange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZRevrange, std::move(zrevrangeptr))); ////ZRangebyscoreCmd - Cmd* zrangebyscoreptr = - new ZRangebyscoreCmd(kCmdNameZRangebyscore, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRangebyscore, zrangebyscoreptr)); + std::unique_ptr zrangebyscoreptr = std::make_unique(kCmdNameZRangebyscore, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZRangebyscore, std::move(zrangebyscoreptr))); ////ZRevrangebyscoreCmd - Cmd* zrevrangebyscoreptr = - new ZRevrangebyscoreCmd(kCmdNameZRevrangebyscore, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRevrangebyscore, zrevrangebyscoreptr)); + std::unique_ptr zrevrangebyscoreptr = std::make_unique(kCmdNameZRevrangebyscore, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZRevrangebyscore, std::move(zrevrangebyscoreptr))); ////ZCountCmd - Cmd* zcountptr = new ZCountCmd(kCmdNameZCount, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZCount, zcountptr)); + std::unique_ptr zcountptr = std::make_unique(kCmdNameZCount, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZCount, std::move(zcountptr))); ////ZRemCmd - Cmd* zremptr = new ZRemCmd(kCmdNameZRem, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRem, zremptr)); + std::unique_ptr zremptr = std::make_unique(kCmdNameZRem, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZRem, std::move(zremptr))); ////ZUnionstoreCmd - Cmd* zunionstoreptr = - new ZUnionstoreCmd(kCmdNameZUnionstore, -4, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZUnionstore, zunionstoreptr)); + std::unique_ptr zunionstoreptr = std::make_unique(kCmdNameZUnionstore, -4, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZUnionstore, std::move(zunionstoreptr))); ////ZInterstoreCmd - Cmd* zinterstoreptr = - new ZInterstoreCmd(kCmdNameZInterstore, -4, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZInterstore, zinterstoreptr)); + std::unique_ptr zinterstoreptr = std::make_unique(kCmdNameZInterstore, -4, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZInterstore, std::move(zinterstoreptr))); ////ZRankCmd - Cmd* zrankptr = new ZRankCmd(kCmdNameZRank, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRank, zrankptr)); + std::unique_ptr zrankptr = std::make_unique(kCmdNameZRank, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZRank, std::move(zrankptr))); ////ZRevrankCmd - Cmd* zrevrankptr = new ZRevrankCmd(kCmdNameZRevrank, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRevrank, zrevrankptr)); + std::unique_ptr zrevrankptr = std::make_unique(kCmdNameZRevrank, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZRevrank, std::move(zrevrankptr))); ////ZScoreCmd - Cmd* zscoreptr = new ZScoreCmd(kCmdNameZScore, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZScore, zscoreptr)); + std::unique_ptr zscoreptr = std::make_unique(kCmdNameZScore, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZScore, std::move(zscoreptr))); ////ZRangebylexCmd - Cmd* zrangebylexptr = - new ZRangebylexCmd(kCmdNameZRangebylex, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRangebylex, zrangebylexptr)); + std::unique_ptr zrangebylexptr = std::make_unique(kCmdNameZRangebylex, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZRangebylex, std::move(zrangebylexptr))); ////ZRevrangebylexCmd - Cmd* zrevrangebylexptr = - new ZRevrangebylexCmd(kCmdNameZRevrangebylex, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRevrangebylex, zrevrangebylexptr)); + std::unique_ptr zrevrangebylexptr = std::make_unique(kCmdNameZRevrangebylex, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZRevrangebylex, std::move(zrevrangebylexptr))); ////ZLexcountCmd - Cmd* zlexcountptr = new ZLexcountCmd(kCmdNameZLexcount, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZLexcount, zlexcountptr)); + std::unique_ptr zlexcountptr = std::make_unique(kCmdNameZLexcount, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZLexcount, std::move(zlexcountptr))); ////ZRemrangebyrankCmd - Cmd* zremrangebyrankptr = - new ZRemrangebyrankCmd(kCmdNameZRemrangebyrank, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRemrangebyrank, zremrangebyrankptr)); + std::unique_ptr zremrangebyrankptr = std::make_unique(kCmdNameZRemrangebyrank, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZRemrangebyrank, std::move(zremrangebyrankptr))); ////ZRemrangebyscoreCmd - Cmd* zremrangebyscoreptr = - new ZRemrangebyscoreCmd(kCmdNameZRemrangebyscore, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRemrangebyscore, zremrangebyscoreptr)); + std::unique_ptr zremrangebyscoreptr = std::make_unique(kCmdNameZRemrangebyscore, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZRemrangebyscore, std::move(zremrangebyscoreptr))); ////ZRemrangebylexCmd - Cmd* zremrangebylexptr = - new ZRemrangebylexCmd(kCmdNameZRemrangebylex, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRemrangebylex, zremrangebylexptr)); + std::unique_ptr zremrangebylexptr = std::make_unique(kCmdNameZRemrangebylex, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZRemrangebylex, std::move(zremrangebylexptr))); ////ZPopmax - Cmd* zpopmaxptr = new ZPopmaxCmd(kCmdNameZPopmax, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZPopmax, zpopmaxptr)); + std::unique_ptr zpopmaxptr = std::make_unique(kCmdNameZPopmax, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZPopmax, std::move(zpopmaxptr))); ////ZPopmin - Cmd* zpopminptr = new ZPopminCmd(kCmdNameZPopmin, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZPopmin, zpopminptr)); + std::unique_ptr zpopminptr = std::make_unique(kCmdNameZPopmin, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); + cmd_table->insert(std::pair>(kCmdNameZPopmin, std::move(zpopminptr))); // Set ////SAddCmd - Cmd* saddptr = new SAddCmd(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSAdd, saddptr)); + std::unique_ptr saddptr = std::make_unique(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSAdd, std::move(saddptr))); ////SPopCmd - Cmd* spopptr = new SPopCmd(kCmdNameSPop, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSPop, spopptr)); + std::unique_ptr spopptr = std::make_unique(kCmdNameSPop, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSPop, std::move(spopptr))); ////SCardCmd - Cmd* scardptr = new SCardCmd(kCmdNameSCard, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSCard, scardptr)); + std::unique_ptr scardptr = std::make_unique(kCmdNameSCard, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSCard, std::move(scardptr))); ////SMembersCmd - Cmd* smembersptr = new SMembersCmd(kCmdNameSMembers, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSMembers, smembersptr)); + std::unique_ptr smembersptr = std::make_unique(kCmdNameSMembers, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSMembers, std::move(smembersptr))); ////SScanCmd - Cmd* sscanptr = new SScanCmd(kCmdNameSScan, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSScan, sscanptr)); + std::unique_ptr sscanptr = std::make_unique(kCmdNameSScan, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSScan, std::move(sscanptr))); ////SRemCmd - Cmd* sremptr = new SRemCmd(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSRem, sremptr)); + std::unique_ptr sremptr = std::make_unique(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSRem, std::move(sremptr))); ////SUnionCmd - Cmd* sunionptr = new SUnionCmd(kCmdNameSUnion, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSUnion, sunionptr)); + std::unique_ptr sunionptr = std::make_unique(kCmdNameSUnion, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSUnion, std::move(sunionptr))); ////SUnionstoreCmd - Cmd* sunionstoreptr = - new SUnionstoreCmd(kCmdNameSUnionstore, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSUnionstore, sunionstoreptr)); + std::unique_ptr sunionstoreptr = std::make_unique(kCmdNameSUnionstore, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSUnionstore, std::move(sunionstoreptr))); ////SInterCmd - Cmd* sinterptr = new SInterCmd(kCmdNameSInter, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSInter, sinterptr)); + std::unique_ptr sinterptr = std::make_unique(kCmdNameSInter, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSInter, std::move(sinterptr))); ////SInterstoreCmd - Cmd* sinterstoreptr = - new SInterstoreCmd(kCmdNameSInterstore, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSInterstore, sinterstoreptr)); + std::unique_ptr sinterstoreptr = std::make_unique(kCmdNameSInterstore, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSInterstore, std::move(sinterstoreptr))); ////SIsmemberCmd - Cmd* sismemberptr = new SIsmemberCmd(kCmdNameSIsmember, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSIsmember, sismemberptr)); + std::unique_ptr sismemberptr = std::make_unique(kCmdNameSIsmember, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSIsmember, std::move(sismemberptr))); ////SDiffCmd - Cmd* sdiffptr = new SDiffCmd(kCmdNameSDiff, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSDiff, sdiffptr)); + std::unique_ptr sdiffptr = std::make_unique(kCmdNameSDiff, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSDiff, std::move(sdiffptr))); ////SDiffstoreCmd - Cmd* sdiffstoreptr = - new SDiffstoreCmd(kCmdNameSDiffstore, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSDiffstore, sdiffstoreptr)); + std::unique_ptr sdiffstoreptr = std::make_unique(kCmdNameSDiffstore, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSDiffstore, std::move(sdiffstoreptr))); ////SMoveCmd - Cmd* smoveptr = new SMoveCmd(kCmdNameSMove, 4, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSMove, smoveptr)); + std::unique_ptr smoveptr = std::make_unique(kCmdNameSMove, 4, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSMove, std::move(smoveptr))); ////SRandmemberCmd - Cmd* srandmemberptr = - new SRandmemberCmd(kCmdNameSRandmember, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSRandmember, srandmemberptr)); + std::unique_ptr srandmemberptr = std::make_unique(kCmdNameSRandmember, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); + cmd_table->insert(std::pair>(kCmdNameSRandmember, std::move(srandmemberptr))); // BitMap ////bitsetCmd - Cmd* bitsetptr = new BitSetCmd(kCmdNameBitSet, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitSet, bitsetptr)); + std::unique_ptr bitsetptr = std::make_unique(kCmdNameBitSet, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsBit); + cmd_table->insert(std::pair>(kCmdNameBitSet, std::move(bitsetptr))); ////bitgetCmd - Cmd* bitgetptr = new BitGetCmd(kCmdNameBitGet, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitGet, bitgetptr)); + std::unique_ptr bitgetptr = std::make_unique(kCmdNameBitGet, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsBit); + cmd_table->insert(std::pair>(kCmdNameBitGet, std::move(bitgetptr))); ////bitcountCmd - Cmd* bitcountptr = new BitCountCmd(kCmdNameBitCount, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitCount, bitcountptr)); + std::unique_ptr bitcountptr = std::make_unique(kCmdNameBitCount, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsBit); + cmd_table->insert(std::pair>(kCmdNameBitCount, std::move(bitcountptr))); ////bitposCmd - Cmd* bitposptr = new BitPosCmd(kCmdNameBitPos, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitPos, bitposptr)); + std::unique_ptr bitposptr = std::make_unique(kCmdNameBitPos, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsBit); + cmd_table->insert(std::pair>(kCmdNameBitPos, std::move(bitposptr))); ////bitopCmd - Cmd* bitopptr = new BitOpCmd(kCmdNameBitOp, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitOp, bitopptr)); + std::unique_ptr bitopptr = std::make_unique(kCmdNameBitOp, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsBit); + cmd_table->insert(std::pair>(kCmdNameBitOp, std::move(bitopptr))); // HyperLogLog ////pfaddCmd - Cmd* pfaddptr = new PfAddCmd(kCmdNamePfAdd, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHyperLogLog); - cmd_table->insert(std::pair(kCmdNamePfAdd, pfaddptr)); + std::unique_ptr pfaddptr = std::make_unique(kCmdNamePfAdd, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHyperLogLog); + cmd_table->insert(std::pair>(kCmdNamePfAdd, std::move(pfaddptr))); ////pfcountCmd - Cmd* pfcountptr = new PfCountCmd(kCmdNamePfCount, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsHyperLogLog); - cmd_table->insert(std::pair(kCmdNamePfCount, pfcountptr)); + std::unique_ptr pfcountptr = std::make_unique(kCmdNamePfCount, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsHyperLogLog); + cmd_table->insert(std::pair>(kCmdNamePfCount, std::move(pfcountptr))); ////pfmergeCmd - Cmd* pfmergeptr = - new PfMergeCmd(kCmdNamePfMerge, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsHyperLogLog); - cmd_table->insert(std::pair(kCmdNamePfMerge, pfmergeptr)); + std::unique_ptr pfmergeptr = std::make_unique(kCmdNamePfMerge, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsHyperLogLog); + cmd_table->insert(std::pair>(kCmdNamePfMerge, std::move(pfmergeptr))); // GEO ////GepAdd - Cmd* geoaddptr = new GeoAddCmd(kCmdNameGeoAdd, -5, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoAdd, geoaddptr)); + std::unique_ptr geoaddptr = std::make_unique(kCmdNameGeoAdd, -5, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsGeo); + cmd_table->insert(std::pair>(kCmdNameGeoAdd, std::move(geoaddptr))); ////GeoPos - Cmd* geoposptr = new GeoPosCmd(kCmdNameGeoPos, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoPos, geoposptr)); + std::unique_ptr geoposptr = std::make_unique(kCmdNameGeoPos, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsGeo); + cmd_table->insert(std::pair>(kCmdNameGeoPos, std::move(geoposptr))); ////GeoDist - Cmd* geodistptr = new GeoDistCmd(kCmdNameGeoDist, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoDist, geodistptr)); + std::unique_ptr geodistptr = std::make_unique(kCmdNameGeoDist, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsGeo); + cmd_table->insert(std::pair>(kCmdNameGeoDist, std::move(geodistptr))); ////GeoHash - Cmd* geohashptr = new GeoHashCmd(kCmdNameGeoHash, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoHash, geohashptr)); + std::unique_ptr geohashptr = std::make_unique(kCmdNameGeoHash, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsGeo); + cmd_table->insert(std::pair>(kCmdNameGeoHash, std::move(geohashptr))); ////GeoRadius - Cmd* georadiusptr = new GeoRadiusCmd(kCmdNameGeoRadius, -6, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoRadius, georadiusptr)); + std::unique_ptr georadiusptr = std::make_unique(kCmdNameGeoRadius, -6, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsGeo); + cmd_table->insert(std::pair>(kCmdNameGeoRadius, std::move(georadiusptr))); ////GeoRadiusByMember - Cmd* georadiusbymemberptr = - new GeoRadiusByMemberCmd(kCmdNameGeoRadiusByMember, -5, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoRadiusByMember, georadiusbymemberptr)); + std::unique_ptr georadiusbymemberptr = std::make_unique(kCmdNameGeoRadiusByMember, -5, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsGeo); + cmd_table->insert(std::pair>(kCmdNameGeoRadiusByMember, std::move(georadiusbymemberptr))); // PubSub ////Publish - Cmd* publishptr = new PublishCmd(kCmdNamePublish, 3, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNamePublish, publishptr)); + std::unique_ptr publishptr = std::make_unique(kCmdNamePublish, 3, kCmdFlagsRead | kCmdFlagsPubSub); + cmd_table->insert(std::pair>(kCmdNamePublish, std::move(publishptr))); ////Subscribe - Cmd* subscribeptr = new SubscribeCmd(kCmdNameSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNameSubscribe, subscribeptr)); + std::unique_ptr subscribeptr = std::make_unique(kCmdNameSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub); + cmd_table->insert(std::pair>(kCmdNameSubscribe, std::move(subscribeptr))); ////UnSubscribe - Cmd* unsubscribeptr = new UnSubscribeCmd(kCmdNameUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNameUnSubscribe, unsubscribeptr)); + std::unique_ptr unsubscribeptr = std::make_unique(kCmdNameUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub); + cmd_table->insert(std::pair>(kCmdNameUnSubscribe, std::move(unsubscribeptr))); ////PSubscribe - Cmd* psubscribeptr = new PSubscribeCmd(kCmdNamePSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNamePSubscribe, psubscribeptr)); + std::unique_ptr psubscribeptr = std::make_unique(kCmdNamePSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub); + cmd_table->insert(std::pair>(kCmdNamePSubscribe, std::move(psubscribeptr))); ////PUnSubscribe - Cmd* punsubscribeptr = new PUnSubscribeCmd(kCmdNamePUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNamePUnSubscribe, punsubscribeptr)); + std::unique_ptr punsubscribeptr = std::make_unique(kCmdNamePUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub); + cmd_table->insert(std::pair>(kCmdNamePUnSubscribe, std::move(punsubscribeptr))); ////PubSub - Cmd* pubsubptr = new PubSubCmd(kCmdNamePubSub, -2, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNamePubSub, pubsubptr)); + std::unique_ptr pubsubptr = std::make_unique(kCmdNamePubSub, -2, kCmdFlagsRead | kCmdFlagsPubSub); + cmd_table->insert(std::pair>(kCmdNamePubSub, std::move(pubsubptr))); } Cmd* GetCmdFromTable(const std::string& opt, const CmdTable& cmd_table) { auto it = cmd_table.find(opt); if (it != cmd_table.end()) { - return it->second; + return it->second.get(); } return nullptr; } -void DestoryCmdTable(CmdTable* cmd_table) { - auto it = cmd_table->begin(); - for (; it != cmd_table->end(); ++it) { - delete it->second; - } -} - void Cmd::Initial(const PikaCmdArgsType& argv, const std::string& table_name) { argv_ = argv; table_name_ = table_name; @@ -522,12 +494,8 @@ void Cmd::Execute() { ProcessFlushAllCmd(); } else if (name_ == kCmdNameInfo || name_ == kCmdNameConfig) { ProcessDoNotSpecifyPartitionCmd(); - } else if (is_single_partition()) { - ProcessSinglePartitionCmd(); - } else if (is_multi_partition()) { - ProcessMultiPartitionCmd(); } else { - ProcessDoNotSpecifyPartitionCmd(); + ProcessSinglePartitionCmd(); } } @@ -582,7 +550,6 @@ void Cmd::ProcessFlushAllCmd() { void Cmd::ProcessSinglePartitionCmd() { std::shared_ptr partition; - //a table has only one partition partition = g_pika_server->GetPartitionByDbName(table_name_); if (!partition) { @@ -638,6 +605,7 @@ void Cmd::DoCommand(const std::shared_ptr& partition, const HintKeys& if (!is_suspend()) { partition->DbRWLockReader(); } + Do(partition); if (!is_suspend()) { @@ -738,7 +706,7 @@ bool Cmd::is_single_partition() const { return ((flag_ & kCmdFlagsMaskPartition) bool Cmd::is_multi_partition() const { return ((flag_ & kCmdFlagsMaskPartition) == kCmdFlagsMultiPartition); } bool Cmd::HashtagIsConsistent(const std::string& lhs, const std::string& rhs) const { - return GetHashkey(lhs) == GetHashkey(rhs); + return true; } std::string Cmd::name() const { return name_; } diff --git a/src/pika_conf.cc b/src/pika_conf.cc index 2e4bce81c0..a81272509e 100644 --- a/src/pika_conf.cc +++ b/src/pika_conf.cc @@ -16,9 +16,8 @@ using pstd::Status; -PikaConf::PikaConf(const std::string& path) : pstd::BaseConf(path), conf_path_(path) { local_meta_ = new PikaMeta(); } - -PikaConf::~PikaConf() { delete local_meta_; } +PikaConf::PikaConf(const std::string& path) + : pstd::BaseConf(path), conf_path_(path), local_meta_(std::make_unique()) {} Status PikaConf::InternalGetTargetTable(const std::string& table_name, uint32_t* const target) { int32_t table_index = -1; @@ -136,7 +135,7 @@ Status PikaConf::DelTableSanityCheck(const std::string& table_name) { int PikaConf::Load() { int ret = LoadConf(); - if (ret != 0) { + if (ret) { return ret; } @@ -242,14 +241,19 @@ int PikaConf::Load() { sync_thread_num_ = 24; } - GetConfInt("databases", &databases_); - if (databases_ < 1 || databases_ > 8) { - LOG(FATAL) << "config databases error, limit [1 ~ 8], the actual is: " << databases_; - } - for (int idx = 0; idx < databases_; ++idx) { - table_structs_.push_back({"db" + std::to_string(idx), 1, {0}}); - } + std::string instance_mode; + GetConfStr("instance-mode", &instance_mode); + classic_mode_.store(instance_mode.empty() || !strcasecmp(instance_mode.data(), "classic")); + if (classic_mode_.load()) { + GetConfInt("databases", &databases_); + if (databases_ < 1 || databases_ > 8) { + LOG(FATAL) << "config databases error, limit [1 ~ 8], the actual is: " << databases_; + } + for (int idx = 0; idx < databases_; ++idx) { + table_structs_.push_back({"db" + std::to_string(idx), 1, {0}}); + } + } default_table_ = table_structs_[0].table_name; int tmp_replication_num = 0; @@ -268,7 +272,7 @@ int PikaConf::Load() { << " [0..." << replication_num_.load() << "]"; } consensus_level_.store(tmp_consensus_level); - if ((consensus_level_.load() != 0 || replication_num_.load() != 0)) { + if (classic_mode_.load() && (consensus_level_.load() != 0 || replication_num_.load() != 0)) { LOG(FATAL) << "consensus-level & replication-num only configurable under sharding mode," << " set it to be 0 if you are using classic mode"; } @@ -349,13 +353,13 @@ int PikaConf::Load() { // rate-limiter-refill-period-us GetConfInt64("rate-limiter-refill-period-us", &rate_limiter_refill_period_us_); - if (rate_limiter_refill_period_us_ <= 0 ) { + if (rate_limiter_refill_period_us_ <= 0) { rate_limiter_refill_period_us_ = 100 * 1000; } // rate-limiter-fairness GetConfInt64("rate-limiter-fairness", &rate_limiter_fairness_); - if (rate_limiter_fairness_ <= 0 ) { + if (rate_limiter_fairness_ <= 0) { rate_limiter_fairness_ = 10; } diff --git a/src/pika_consensus.cc b/src/pika_consensus.cc index 22393b0528..f8c832f970 100644 --- a/src/pika_consensus.cc +++ b/src/pika_consensus.cc @@ -16,16 +16,14 @@ using pstd::Status; extern PikaServer* g_pika_server; - -extern PikaReplicaManager* g_pika_rm; -extern PikaCmdTableManager* g_pika_cmd_table_manager; +extern std::unique_ptr g_pika_conf; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; /* Context */ Context::Context(std::string path) : path_(std::move(path)) {} -Context::~Context() { delete save_; } - Status Context::StableSave() { char* p = save_->GetData(); memcpy(p, &(applied_index_.b_offset.filenum), sizeof(uint32_t)); @@ -40,13 +38,15 @@ Status Context::StableSave() { Status Context::Init() { if (!pstd::FileExists(path_)) { - Status s = pstd::NewRWFile(path_, &save_); + Status s = pstd::NewRWFile(path_, save_); if (!s.ok()) { LOG(FATAL) << "Context new file failed " << s.ToString(); } StableSave(); } else { - Status s = pstd::NewRWFile(path_, &save_); + std::unique_ptr tmp_file; + Status s = pstd::NewRWFile(path_, tmp_file); + save_.reset(tmp_file.release()); if (!s.ok()) { LOG(FATAL) << "Context new file failed " << s.ToString(); } @@ -271,7 +271,8 @@ int MemLog::InternalFindLogByBinlogOffset(const LogOffset& offset) { ConsensusCoordinator::ConsensusCoordinator(const std::string& table_name, uint32_t partition_id) : table_name_(table_name), partition_id_(partition_id) { std::string table_log_path = g_pika_conf->log_path() + "log_" + table_name + "/"; - std::string log_path = table_log_path; + std::string log_path = + g_pika_conf->classic_mode() ? table_log_path : table_log_path + std::to_string(partition_id) + "/"; context_ = std::make_shared(log_path + kContext); stable_logger_ = std::make_shared(table_name, partition_id, log_path); mem_logger_ = std::make_shared(); @@ -334,7 +335,7 @@ void ConsensusCoordinator::Init() { LOG(FATAL) << PartitionInfo(table_name_, partition_id_).ToString() << "Redis parser parse failed"; return; } - auto* arg = static_cast(redis_parser.data); + auto arg = static_cast(redis_parser.data); std::shared_ptr cmd_ptr = arg->cmd_ptr; delete arg; redis_parser.data = nullptr; @@ -600,7 +601,7 @@ bool ConsensusCoordinator::MatchConsensusLevel() { } void ConsensusCoordinator::InternalApply(const MemLog::LogItem& log) { - auto* arg = new PikaClientConn::BgTaskArg(); + auto arg = new PikaClientConn::BgTaskArg(); arg->cmd_ptr = log.cmd_ptr; arg->conn_ptr = log.conn_ptr; arg->resp_ptr = log.resp_ptr; @@ -615,7 +616,7 @@ void ConsensusCoordinator::InternalApplyFollower(const MemLog::LogItem& log) { } int ConsensusCoordinator::InitCmd(net::RedisParser* parser, const net::RedisCmdArgsType& argv) { - auto* table_name = static_cast(parser->data); + auto table_name = static_cast(parser->data); std::string opt = argv[0]; std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(pstd::StringToLower(opt)); if (!c_ptr) { diff --git a/src/pika_dispatch_thread.cc b/src/pika_dispatch_thread.cc index 8a312db514..2904d18ba7 100644 --- a/src/pika_dispatch_thread.cc +++ b/src/pika_dispatch_thread.cc @@ -11,7 +11,7 @@ #include "include/pika_server.h" #include "pstd/include/testutil.h" - +extern std::unique_ptr g_pika_conf; extern PikaServer* g_pika_server; PikaDispatchThread::PikaDispatchThread(std::set& ips, int port, int work_num, int cron_interval, diff --git a/src/pika_geohash.cc b/src/pika_geohash.cc index b42e48d0d5..1874acf36a 100644 --- a/src/pika_geohash.cc +++ b/src/pika_geohash.cc @@ -119,13 +119,15 @@ void geohashGetCoordRange(GeoHashRange* long_range, GeoHashRange* lat_range) { int geohashEncode(const GeoHashRange* long_range, const GeoHashRange* lat_range, double longitude, double latitude, uint8_t step, GeoHashBits* hash) { /* Check basic arguments sanity. */ - if (hash == nullptr || step > 32 || step == 0 || RANGEPISZERO(lat_range) || RANGEPISZERO(long_range)) { return 0; -} + if (!hash || step > 32 || step == 0 || RANGEPISZERO(lat_range) || RANGEPISZERO(long_range)) { + return 0; + } /* Return an error when trying to index outside the supported * constraints. */ - if (longitude > 180 || longitude < -180 || latitude > 85.05112878 || latitude < -85.05112878) { return 0; -} + if (longitude > 180 || longitude < -180 || latitude > 85.05112878 || latitude < -85.05112878) { + return 0; + } hash->bits = 0; hash->step = step; @@ -191,8 +193,9 @@ int geohashDecodeType(const GeoHashBits hash, GeoHashArea* area) { int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea* area) { return geohashDecodeType(hash, area); } int geohashDecodeAreaToLongLat(const GeoHashArea* area, double* xy) { - if (xy == nullptr) { return 0; -} + if (!xy) { + return 0; + } xy[0] = (area->longitude.min + area->longitude.max) / 2; xy[1] = (area->latitude.min + area->latitude.max) / 2; return 1; @@ -200,16 +203,18 @@ int geohashDecodeAreaToLongLat(const GeoHashArea* area, double* xy) { int geohashDecodeToLongLatType(const GeoHashBits hash, double* xy) { GeoHashArea area = {{0}}; - if ((xy == nullptr) || (geohashDecodeType(hash, &area) == 0)) { return 0; -} + if (!xy || !(geohashDecodeType(hash, &area))) { + return 0; + } return geohashDecodeAreaToLongLat(&area, xy); } int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double* xy) { return geohashDecodeToLongLatType(hash, xy); } static void geohash_move_x(GeoHashBits* hash, int8_t d) { - if (d == 0) { return; -} + if (d == 0) { + return; + } uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; uint64_t y = hash->bits & 0x5555555555555555ULL; @@ -228,8 +233,9 @@ static void geohash_move_x(GeoHashBits* hash, int8_t d) { } static void geohash_move_y(GeoHashBits* hash, int8_t d) { - if (d == 0) { return; -} + if (d == 0) { + return; + } uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; uint64_t y = hash->bits & 0x5555555555555555ULL; diff --git a/src/pika_geohash_helper.cc b/src/pika_geohash_helper.cc index 16ae0901f8..e7f861659b 100644 --- a/src/pika_geohash_helper.cc +++ b/src/pika_geohash_helper.cc @@ -60,8 +60,9 @@ static inline double rad_deg(double ang) { return ang / D_R; } /* This function is used in order to estimate the step (bits precision) * of the 9 search area boxes during radius queries. */ uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) { - if (range_meters == 0) { return 26; -} + if (range_meters == 0) { + return 26; + } int step = 1; while (range_meters < MERCATOR_MAX) { range_meters *= 2; @@ -74,15 +75,18 @@ uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) { * at this latitude, but this does the trick for now. */ if (lat > 66 || lat < -66) { step--; - if (lat > 80 || lat < -80) { step--; -} + if (lat > 80 || lat < -80) { + step--; + } } /* Frame to valid range. */ - if (step < 1) { step = 1; -} - if (step > 26) { step = 26; -} + if (step < 1) { + step = 1; + } + if (step > 26) { + step = 26; + } return step; } @@ -105,8 +109,9 @@ uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) { * optimization is not used for very big radiuses, however the function * should be fixed. */ int geohashBoundingBox(double longitude, double latitude, double radius_meters, double* bounds) { - if (bounds == nullptr) { return 0; -} + if (!bounds) { + return 0; + } bounds[0] = longitude - rad_deg(radius_meters / EARTH_RADIUS_IN_METERS / cos(deg_rad(latitude))); bounds[2] = longitude + rad_deg(radius_meters / EARTH_RADIUS_IN_METERS / cos(deg_rad(latitude))); @@ -211,7 +216,7 @@ GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, do return geohashGetAreasByRadius(longitude, latitude, radius_meters); } -GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits &hash) { +GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits& hash) { uint64_t bits = hash.bits; bits <<= (52 - hash.step * 2); return bits; @@ -236,8 +241,9 @@ double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d int geohashGetDistanceIfInRadius(double x1, double y1, double x2, double y2, double radius, double* distance) { *distance = geohashGetDistance(x1, y1, x2, y2); - if (*distance > radius) { return 0; -} + if (*distance > radius) { + return 0; + } return 1; } diff --git a/src/pika_hash.cc b/src/pika_hash.cc index 2e5f5d2578..c192d0a1c1 100644 --- a/src/pika_hash.cc +++ b/src/pika_hash.cc @@ -9,7 +9,7 @@ #include "include/pika_conf.h" -extern PikaConf* g_pika_conf; +extern std::unique_ptr g_pika_conf; void HDelCmd::DoInitial() { if (!CheckArg(argv_.size())) { diff --git a/src/pika_kv.cc b/src/pika_kv.cc index 81fd70a0f7..dc0d665315 100644 --- a/src/pika_kv.cc +++ b/src/pika_kv.cc @@ -11,7 +11,7 @@ #include "include/pika_conf.h" #include "include/pika_data_distribution.h" -extern PikaConf* g_pika_conf; +extern std::unique_ptr g_pika_conf; /* SET key value [NX] [XX] [EX ] [PX ] */ void SetCmd::DoInitial() { diff --git a/src/pika_meta.cc b/src/pika_meta.cc index 114921eb73..59c280ba62 100644 --- a/src/pika_meta.cc +++ b/src/pika_meta.cc @@ -27,11 +27,10 @@ Status PikaMeta::StableSave(const std::vector& table_structs) { std::string tmp_file = local_meta_file; tmp_file.append("_tmp"); - pstd::RWFile* saver = nullptr; + std::unique_ptr saver; pstd::CreatePath(local_meta_path_); - Status s = pstd::NewRWFile(tmp_file, &saver); + Status s = pstd::NewRWFile(tmp_file, saver); if (!s.ok()) { - delete saver; LOG(WARNING) << "Open local meta file failed"; return Status::Corruption("open local meta file failed"); } @@ -48,7 +47,6 @@ Status PikaMeta::StableSave(const std::vector& table_structs) { std::string meta_str; if (!meta.SerializeToString(&meta_str)) { - delete saver; LOG(WARNING) << "Serialize meta string failed"; return Status::Corruption("serialize meta string failed"); } @@ -60,7 +58,6 @@ Status PikaMeta::StableSave(const std::vector& table_structs) { memcpy(p, &meta_str_size, sizeof(uint32_t)); p += sizeof(uint32_t); strncpy(p, meta_str.data(), meta_str.size()); - delete saver; pstd::DeleteFile(local_meta_file); if (pstd::RenameFile(tmp_file, local_meta_file) != 0) { @@ -78,16 +75,14 @@ Status PikaMeta::ParseMeta(std::vector* const table_structs) { return Status::Corruption("meta file not found"); } - pstd::RWFile* reader = nullptr; - Status s = pstd::NewRWFile(local_meta_file, &reader); + std::unique_ptr reader; + Status s = pstd::NewRWFile(local_meta_file, reader); if (!s.ok()) { - delete reader; LOG(WARNING) << "Open local meta file failed"; return Status::Corruption("open local meta file failed"); } - if (reader->GetData() == nullptr) { - delete reader; + if (!reader->GetData()) { LOG(WARNING) << "Meta file init error"; return Status::Corruption("meta file init error"); } @@ -96,18 +91,15 @@ Status PikaMeta::ParseMeta(std::vector* const table_structs) { uint32_t meta_size = 0; memcpy(reinterpret_cast(&version), reader->GetData(), sizeof(uint32_t)); memcpy(reinterpret_cast(&meta_size), reader->GetData() + sizeof(uint32_t), sizeof(uint32_t)); - char* const buf = new char[meta_size]; + auto const buf_ptr = std::make_unique(meta_size); + char* const buf = buf_ptr.get(); memcpy(buf, reader->GetData() + 2 * sizeof(uint32_t), meta_size); InnerMessage::PikaMeta meta; if (!meta.ParseFromArray(buf, meta_size)) { - delete[] buf; - delete reader; LOG(WARNING) << "Parse meta string failed"; return Status::Corruption("parse meta string failed"); } - delete[] buf; - delete reader; table_structs->clear(); for (int idx = 0; idx < meta.table_infos_size(); ++idx) { diff --git a/src/pika_partition.cc b/src/pika_partition.cc index 0adedb7d2c..6ecc9626b4 100644 --- a/src/pika_partition.cc +++ b/src/pika_partition.cc @@ -15,8 +15,10 @@ #include "pstd/include/mutex_impl.h" using pstd::Status; + +extern std::unique_ptr g_pika_conf; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; std::string PartitionPath(const std::string& table_path, uint32_t partition_id) { char buf[100]; @@ -49,12 +51,12 @@ Partition::Partition(const std::string& table_name, uint32_t partition_id, const db_path_ = table_db_path; bgsave_sub_path_ = table_name; dbsync_path_ = DbSyncPath(g_pika_conf->db_sync_path(), table_name_, partition_id_); - partition_name_ = table_name ; + partition_name_ = table_name; db_ = std::make_shared(); rocksdb::Status s = db_->Open(g_pika_server->storage_options(), db_path_); - lock_mgr_ = new pstd::lock::LockMgr(1000, 0, std::make_shared()); + lock_mgr_ = std::make_shared(1000, 0, std::make_shared()); opened_ = s.ok(); assert(db_); @@ -64,8 +66,6 @@ Partition::Partition(const std::string& table_name, uint32_t partition_id, const Partition::~Partition() { Close(); - delete bgsave_engine_; - delete lock_mgr_; } void Partition::Leave() { @@ -79,6 +79,7 @@ void Partition::Close() { } std::lock_guard lock(db_rwlock_); db_.reset(); + lock_mgr_.reset(); opened_ = false; } @@ -112,8 +113,9 @@ std::string Partition::GetPartitionName() const { return partition_name_; } std::shared_ptr Partition::db() const { return db_; } void Partition::Compact(const storage::DataType& type) { - if (!opened_) { return; -} + if (!opened_) { + return; + } db_->Compact(type); } @@ -123,7 +125,7 @@ void Partition::DbRWLockReader() { db_rwlock_.lock_shared(); } void Partition::DbRWUnLock() { db_rwlock_.unlock(); } -pstd::lock::LockMgr* Partition::LockMgr() { return lock_mgr_; } +std::shared_ptr Partition::LockMgr() { return lock_mgr_; } void Partition::PrepareRsync() { pstd::DeleteDirIfExist(dbsync_path_); @@ -285,7 +287,7 @@ void Partition::BgSavePartition() { return; } bgsave_info_.bgsaving = true; - auto* bg_task_arg = new BgTaskArg(); + auto bg_task_arg = new BgTaskArg(); bg_task_arg->partition = shared_from_this(); g_pika_server->BGSaveTaskSchedule(&DoBgSave, static_cast(bg_task_arg)); } @@ -296,7 +298,7 @@ BgSaveInfo Partition::bgsave_info() { } void Partition::DoBgSave(void* arg) { - auto* bg_task_arg = static_cast(arg); + std::unique_ptr bg_task_arg(static_cast(arg)); // Do BgSave bool success = bg_task_arg->partition->RunBgsaveEngine(); @@ -322,7 +324,6 @@ void Partition::DoBgSave(void* arg) { } bg_task_arg->partition->FinishBgsave(); - delete bg_task_arg; } bool Partition::RunBgsaveEngine() { @@ -374,8 +375,8 @@ bool Partition::InitBgsaveEnv() { // Prepare bgsave env, need bgsave_protector protect bool Partition::InitBgsaveEngine() { - delete bgsave_engine_; - rocksdb::Status s = storage::BackupEngine::Open(db().get(), &bgsave_engine_); + bgsave_engine_.reset(); + rocksdb::Status s = storage::BackupEngine::Open(db().get(), bgsave_engine_); if (!s.ok()) { LOG(WARNING) << partition_name_ << " open backup engine failed " << s.ToString(); return false; @@ -506,3 +507,4 @@ Status Partition::GetKeyNum(std::vector* key_info) { key_scan_info_.duration = time(nullptr) - key_scan_info_.start_time; return Status::OK(); } + diff --git a/src/pika_repl_bgworker.cc b/src/pika_repl_bgworker.cc index ebcdb6dc5a..f0a26a8acc 100644 --- a/src/pika_repl_bgworker.cc +++ b/src/pika_repl_bgworker.cc @@ -11,11 +11,12 @@ #include "include/pika_conf.h" #include "include/pika_rm.h" #include "include/pika_server.h" +#include "pstd/include/pstd_defer.h" - +extern std::unique_ptr g_pika_conf; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; -extern PikaCmdTableManager* g_pika_cmd_table_manager; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; PikaReplBgWorker::PikaReplBgWorker(int queue_size) : bg_thread_(queue_size) { bg_thread_.set_thread_name("ReplBgWorker"); @@ -43,13 +44,18 @@ void PikaReplBgWorker::ParseBinlogOffset(const InnerMessage::BinlogOffset& pb_of } void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { - auto* task_arg = static_cast(arg); + auto task_arg = static_cast(arg); const std::shared_ptr res = task_arg->res; std::shared_ptr conn = task_arg->conn; - auto* index = static_cast*>(task_arg->res_private_data); + auto index = static_cast*>(task_arg->res_private_data); PikaReplBgWorker* worker = task_arg->worker; worker->ip_port_ = conn->ip_port(); + DEFER { + delete index; + delete task_arg; + }; + std::string table_name; uint32_t partition_id = 0; LogOffset pb_begin; @@ -98,8 +104,6 @@ void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { g_pika_rm->GetSyncMasterPartitionByName(PartitionInfo(table_name, partition_id)); if (!partition) { LOG(WARNING) << "Partition " << table_name << "_" << partition_id << " Not Found"; - delete index; - delete task_arg; return; } @@ -107,8 +111,6 @@ void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { g_pika_rm->GetSyncSlavePartitionByName(PartitionInfo(table_name, partition_id)); if (!slave_partition) { LOG(WARNING) << "Slave Partition " << table_name << "_" << partition_id << " Not Found"; - delete index; - delete task_arg; return; } @@ -121,8 +123,6 @@ void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { } else if (meta.term() < partition->ConsensusTerm()) /*outdated pb*/ { LOG(WARNING) << "Drop outdated binlog sync response " << table_name << "_" << partition_id << " recv term: " << meta.term() << " local term: " << partition->ConsensusTerm(); - delete index; - delete task_arg; return; } if (!only_keepalive) { @@ -134,8 +134,6 @@ void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { LOG(WARNING) << "last_offset " << last_offset.ToString() << " NOT equal to pb prev_offset " << prev_offset.ToString(); slave_partition->SetReplState(ReplState::kTryConnect); - delete index; - delete task_arg; return; } } @@ -147,8 +145,6 @@ void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { // BinlogSync state, we drop remain write binlog task if (((g_pika_server->role() & PIKA_ROLE_SLAVE) == 0) || ((slave_partition->State() != ReplState::kConnected) && (slave_partition->State() != ReplState::kWaitDBSync))) { - delete index; - delete task_arg; return; } @@ -160,8 +156,6 @@ void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { LOG(WARNING) << "Check Session failed " << binlog_res.partition().table_name() << "_" << binlog_res.partition().partition_id(); slave_partition->SetReplState(ReplState::kTryConnect); - delete index; - delete task_arg; return; } @@ -172,8 +166,6 @@ void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, binlog_res.binlog(), &worker->binlog_item_)) { LOG(WARNING) << "Binlog item decode failed"; slave_partition->SetReplState(ReplState::kTryConnect); - delete index; - delete task_arg; return; } const char* redis_parser_start = binlog_res.binlog().data() + BINLOG_ENCODE_LEN; @@ -184,13 +176,9 @@ void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { if (ret != net::kRedisParserDone) { LOG(WARNING) << "Redis parser failed"; slave_partition->SetReplState(ReplState::kTryConnect); - delete index; - delete task_arg; return; } } - delete index; - delete task_arg; if (res->has_consensus_meta()) { LogOffset leader_commit; @@ -217,7 +205,7 @@ void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { int PikaReplBgWorker::HandleWriteBinlog(net::RedisParser* parser, const net::RedisCmdArgsType& argv) { std::string opt = argv[0]; - auto* worker = static_cast(parser->data); + auto worker = static_cast(parser->data); // Monitor related std::string monitor_message; @@ -256,7 +244,7 @@ int PikaReplBgWorker::HandleWriteBinlog(net::RedisParser* parser, const net::Red } void PikaReplBgWorker::HandleBGWorkerWriteDB(void* arg) { - auto* task_arg = static_cast(arg); + auto task_arg = static_cast(arg); const std::shared_ptr c_ptr = task_arg->cmd_ptr; const PikaCmdArgsType& argv = c_ptr->argv(); LogOffset offset = task_arg->offset; @@ -290,12 +278,11 @@ void PikaReplBgWorker::HandleBGWorkerWriteDB(void* arg) { } } - delete task_arg; if (g_pika_conf->consensus_level() != 0) { std::shared_ptr partition = g_pika_rm->GetSyncMasterPartitionByName(PartitionInfo(table_name, partition_id)); - if (partition == nullptr) { + if (!partition) { LOG(WARNING) << "Sync Master Partition not exist " << table_name << partition_id; return; } diff --git a/src/pika_repl_client.cc b/src/pika_repl_client.cc index f94b0bbfe5..03d69d4288 100644 --- a/src/pika_repl_client.cc +++ b/src/pika_repl_client.cc @@ -22,22 +22,18 @@ using pstd::Status; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; PikaReplClient::PikaReplClient(int cron_interval, int keepalive_timeout) { - client_thread_ = new PikaReplClientThread(cron_interval, keepalive_timeout); + client_thread_ = std::make_unique(cron_interval, keepalive_timeout); client_thread_->set_thread_name("PikaReplClient"); for (int i = 0; i < 2 * g_pika_conf->sync_thread_num(); ++i) { - bg_workers_.push_back(new PikaReplBgWorker(PIKA_SYNC_BUFFER_SIZE)); + bg_workers_.push_back(std::make_unique(PIKA_SYNC_BUFFER_SIZE)); } } PikaReplClient::~PikaReplClient() { client_thread_->StopThread(); - delete client_thread_; - for (auto & bg_worker : bg_workers_) { - delete bg_worker; - } LOG(INFO) << "PikaReplClient exit!!!"; } @@ -74,8 +70,7 @@ void PikaReplClient::ScheduleWriteBinlogTask(const std::string& table_partition, const std::shared_ptr& res, std::shared_ptr conn, void* res_private_data) { size_t index = GetHashIndex(table_partition, true); - auto* task_arg = - new ReplClientWriteBinlogTaskArg(res, std::move(conn), res_private_data, bg_workers_[index]); + auto task_arg = new ReplClientWriteBinlogTaskArg(res, std::move(conn), res_private_data, bg_workers_[index].get()); bg_workers_[index]->Schedule(&PikaReplBgWorker::HandleBGWorkerWriteBinlog, static_cast(task_arg)); } @@ -84,7 +79,7 @@ void PikaReplClient::ScheduleWriteDBTask(const std::shared_ptr& cmd_ptr, co const PikaCmdArgsType& argv = cmd_ptr->argv(); std::string dispatch_key = argv.size() >= 2 ? argv[1] : argv[0]; size_t index = GetHashIndex(dispatch_key, false); - auto* task_arg = new ReplClientWriteDBTaskArg(cmd_ptr, offset, table_name, partition_id); + auto task_arg = new ReplClientWriteDBTaskArg(cmd_ptr, offset, table_name, partition_id); bg_workers_[index]->Schedule(&PikaReplBgWorker::HandleBGWorkerWriteDB, static_cast(task_arg)); } @@ -101,7 +96,7 @@ Status PikaReplClient::Close(const std::string& ip, const int port) { return cli Status PikaReplClient::SendMetaSync() { std::string local_ip; - net::NetCli* cli = net::NewRedisCli(); + std::unique_ptr cli (net::NewRedisCli()); cli->set_connect_timeout(1500); if ((cli->Connect(g_pika_server->master_ip(), g_pika_server->master_port(), "")).ok()) { struct sockaddr_in laddr; @@ -110,7 +105,6 @@ Status PikaReplClient::SendMetaSync() { std::string tmp_local_ip(inet_ntoa(laddr.sin_addr)); local_ip = tmp_local_ip; cli->Close(); - delete cli; } else { LOG(WARNING) << "Failed to connect master, Master (" << g_pika_server->master_ip() << ":" << g_pika_server->master_port() << "), try reconnect"; @@ -118,7 +112,6 @@ Status PikaReplClient::SendMetaSync() { // when the connection fails sleep(3); g_pika_server->ResetMetaSyncStatus(); - delete cli; return Status::Corruption("Connect master error"); } diff --git a/src/pika_repl_client_conn.cc b/src/pika_repl_client_conn.cc index 0b85efb783..cbb6664df4 100644 --- a/src/pika_repl_client_conn.cc +++ b/src/pika_repl_client_conn.cc @@ -15,8 +15,10 @@ #include "pika_inner_message.pb.h" using pstd::Status; + +extern std::unique_ptr g_pika_conf; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; PikaReplClientConn::PikaReplClientConn(int fd, const std::string& ip_port, net::Thread* thread, void* worker_specific_data, net::NetMultiplexer* mpx) @@ -49,19 +51,19 @@ int PikaReplClientConn::DealMessage() { } switch (response->type()) { case InnerMessage::kMetaSync: { - auto* task_arg = + auto task_arg = new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleMetaSyncResponse, static_cast(task_arg)); break; } case InnerMessage::kDBSync: { - auto* task_arg = + auto task_arg = new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleDBSyncResponse, static_cast(task_arg)); break; } case InnerMessage::kTrySync: { - auto* task_arg = + auto task_arg = new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleTrySyncResponse, static_cast(task_arg)); break; @@ -71,7 +73,7 @@ int PikaReplClientConn::DealMessage() { break; } case InnerMessage::kRemoveSlaveNode: { - auto* task_arg = + auto task_arg = new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleRemoveSlaveNodeResponse, static_cast(task_arg)); @@ -84,7 +86,7 @@ int PikaReplClientConn::DealMessage() { } void PikaReplClientConn::HandleMetaSyncResponse(void* arg) { - auto* task_arg = static_cast(arg); + auto task_arg = static_cast(arg); std::shared_ptr conn = task_arg->conn; std::shared_ptr response = task_arg->res; @@ -100,7 +102,6 @@ void PikaReplClientConn::HandleMetaSyncResponse(void* arg) { LOG(WARNING) << "Meta Sync Failed: " << reply; g_pika_server->SyncError(); conn->NotifyClose(); - delete task_arg; return; } @@ -119,7 +120,6 @@ void PikaReplClientConn::HandleMetaSyncResponse(void* arg) { << "), failed to establish master-slave relationship"; g_pika_server->SyncError(); conn->NotifyClose(); - delete task_arg; return; } @@ -127,11 +127,10 @@ void PikaReplClientConn::HandleMetaSyncResponse(void* arg) { g_pika_server->PreparePartitionTrySync(); g_pika_server->FinishMetaSync(); LOG(INFO) << "Finish to handle meta sync response"; - delete task_arg; } void PikaReplClientConn::HandleDBSyncResponse(void* arg) { - auto* task_arg = static_cast(arg); + std::unique_ptr task_arg(static_cast(arg)); std::shared_ptr conn = task_arg->conn; std::shared_ptr response = task_arg->res; @@ -145,7 +144,6 @@ void PikaReplClientConn::HandleDBSyncResponse(void* arg) { g_pika_rm->GetSyncSlavePartitionByName(PartitionInfo(table_name, partition_id)); if (!slave_partition) { LOG(WARNING) << "Slave Partition: " << table_name << ":" << partition_id << " Not Found"; - delete task_arg; return; } @@ -153,7 +151,6 @@ void PikaReplClientConn::HandleDBSyncResponse(void* arg) { slave_partition->SetReplState(ReplState::kError); std::string reply = response->has_reply() ? response->reply() : ""; LOG(WARNING) << "DBSync Failed: " << reply; - delete task_arg; return; } @@ -162,18 +159,16 @@ void PikaReplClientConn::HandleDBSyncResponse(void* arg) { std::string partition_name = slave_partition->PartitionName(); slave_partition->SetReplState(ReplState::kWaitDBSync); LOG(INFO) << "Partition: " << partition_name << " Need Wait To Sync"; - delete task_arg; } void PikaReplClientConn::HandleTrySyncResponse(void* arg) { - auto* task_arg = static_cast(arg); + std::unique_ptr task_arg(static_cast(arg)); std::shared_ptr conn = task_arg->conn; std::shared_ptr response = task_arg->res; if (response->code() != InnerMessage::kOk) { std::string reply = response->has_reply() ? response->reply() : ""; LOG(WARNING) << "TrySync Failed: " << reply; - delete task_arg; return; } @@ -185,7 +180,6 @@ void PikaReplClientConn::HandleTrySyncResponse(void* arg) { g_pika_rm->GetSyncMasterPartitionByName(PartitionInfo(table_name, partition_id)); if (!partition) { LOG(WARNING) << "Partition: " << table_name << ":" << partition_id << " Not Found"; - delete task_arg; return; } @@ -193,7 +187,6 @@ void PikaReplClientConn::HandleTrySyncResponse(void* arg) { g_pika_rm->GetSyncSlavePartitionByName(PartitionInfo(table_name, partition_id)); if (!slave_partition) { LOG(WARNING) << "Slave Partition: " << table_name << ":" << partition_id << " Not Found"; - delete task_arg; return; } @@ -207,7 +200,6 @@ void PikaReplClientConn::HandleTrySyncResponse(void* arg) { } else if (meta.term() < partition->ConsensusTerm()) /*outdated pb*/ { LOG(WARNING) << "Drop outdated trysync response " << table_name << ":" << partition_id << " recv term: " << meta.term() << " local term: " << partition->ConsensusTerm(); - delete task_arg; return; } @@ -217,7 +209,6 @@ void PikaReplClientConn::HandleTrySyncResponse(void* arg) { slave_partition->SetReplState(ReplState::kError); LOG(WARNING) << "Consensus Check failed " << s.ToString(); } - delete task_arg; return; } @@ -247,7 +238,6 @@ void PikaReplClientConn::HandleTrySyncResponse(void* arg) { slave_partition->SetReplState(ReplState::kError); LOG(WARNING) << "Partition: " << partition_name << " TrySync Error"; } - delete task_arg; } Status PikaReplClientConn::TrySyncConsensusCheck(const InnerMessage::ConsensusMeta& consensus_meta, @@ -304,14 +294,12 @@ void PikaReplClientConn::DispatchBinlogRes(const std::shared_ptr(arg); + std::unique_ptr task_arg(static_cast(arg)); std::shared_ptr conn = task_arg->conn; std::shared_ptr response = task_arg->res; if (response->code() != InnerMessage::kOk) { std::string reply = response->has_reply() ? response->reply() : ""; LOG(WARNING) << "Remove slave node Failed: " << reply; - delete task_arg; return; } - delete task_arg; } diff --git a/src/pika_repl_client_thread.cc b/src/pika_repl_client_thread.cc index bd68d8c7e7..dec853f106 100644 --- a/src/pika_repl_client_thread.cc +++ b/src/pika_repl_client_thread.cc @@ -11,7 +11,7 @@ #include "pstd/include/pstd_string.h" extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; PikaReplClientThread::PikaReplClientThread(int cron_interval, int keepalive_timeout) : ClientThread(&conn_factory_, cron_interval, keepalive_timeout, &handle_, nullptr) {} diff --git a/src/pika_repl_server.cc b/src/pika_repl_server.cc index 81dbd8b40a..21e2eb1c36 100644 --- a/src/pika_repl_server.cc +++ b/src/pika_repl_server.cc @@ -12,18 +12,18 @@ #include "include/pika_server.h" using pstd::Status; + +extern std::unique_ptr g_pika_conf; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; PikaReplServer::PikaReplServer(const std::set& ips, int port, int cron_interval) { - server_tp_ = new net::ThreadPool(PIKA_REPL_SERVER_TP_SIZE, 100000); - pika_repl_server_thread_ = new PikaReplServerThread(ips, port, cron_interval); + server_tp_ = std::make_unique(PIKA_REPL_SERVER_TP_SIZE, 100000); + pika_repl_server_thread_ = std::make_unique(ips, port, cron_interval); pika_repl_server_thread_->set_thread_name("PikaReplServer"); } PikaReplServer::~PikaReplServer() { - delete pika_repl_server_thread_; - delete server_tp_; LOG(INFO) << "PikaReplServer exit!!!"; } @@ -141,11 +141,11 @@ pstd::Status PikaReplServer::Write(const std::string& ip, const int port, const } int fd = client_conn_map_[ip_port]; std::shared_ptr conn = std::dynamic_pointer_cast(pika_repl_server_thread_->get_conn(fd)); - if (conn == nullptr) { + if (!conn) { return Status::NotFound("The" + ip_port + " conn cannot be found"); } - if (conn->WriteResp(msg) != 0) { + if (conn->WriteResp(msg)) { conn->NotifyClose(); return Status::Corruption("The" + ip_port + " conn, Write Resp Failed"); } diff --git a/src/pika_repl_server_conn.cc b/src/pika_repl_server_conn.cc index aff9c2ed73..a1eeb4ab6c 100644 --- a/src/pika_repl_server_conn.cc +++ b/src/pika_repl_server_conn.cc @@ -12,7 +12,7 @@ using pstd::Status; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; PikaReplServerConn::PikaReplServerConn(int fd, const std::string& ip_port, net::Thread* thread, void* worker_specific_data, net::NetMultiplexer* mpx) @@ -21,7 +21,7 @@ PikaReplServerConn::PikaReplServerConn(int fd, const std::string& ip_port, net:: PikaReplServerConn::~PikaReplServerConn() = default; void PikaReplServerConn::HandleMetaSyncRequest(void* arg) { - auto* task_arg = static_cast(arg); + std::unique_ptr task_arg(static_cast(arg)); const std::shared_ptr req = task_arg->req; std::shared_ptr conn = task_arg->conn; @@ -59,15 +59,13 @@ void PikaReplServerConn::HandleMetaSyncRequest(void* arg) { if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { LOG(WARNING) << "Process MetaSync request serialization failed"; conn->NotifyClose(); - delete task_arg; return; } conn->NotifyWrite(); - delete task_arg; } void PikaReplServerConn::HandleTrySyncRequest(void* arg) { - auto* task_arg = static_cast(arg); + std::unique_ptr task_arg(static_cast(arg)); const std::shared_ptr req = task_arg->req; std::shared_ptr conn = task_arg->conn; @@ -134,11 +132,9 @@ void PikaReplServerConn::HandleTrySyncRequest(void* arg) { if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { LOG(WARNING) << "Handle Try Sync Failed"; conn->NotifyClose(); - delete task_arg; return; } conn->NotifyWrite(); - delete task_arg; } bool PikaReplServerConn::TrySyncUpdateSlaveNode(const std::shared_ptr& partition, @@ -278,7 +274,7 @@ void PikaReplServerConn::BuildConsensusMeta(const bool& reject, const std::vecto } void PikaReplServerConn::HandleDBSyncRequest(void* arg) { - auto* task_arg = static_cast(arg); + std::unique_ptr task_arg(static_cast(arg)); const std::shared_ptr req = task_arg->req; std::shared_ptr conn = task_arg->conn; @@ -350,21 +346,18 @@ void PikaReplServerConn::HandleDBSyncRequest(void* arg) { if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { LOG(WARNING) << "Handle DBSync Failed"; conn->NotifyClose(); - delete task_arg; return; } conn->NotifyWrite(); - delete task_arg; } void PikaReplServerConn::HandleBinlogSyncRequest(void* arg) { - auto* task_arg = static_cast(arg); + std::unique_ptr task_arg(static_cast(arg)); const std::shared_ptr req = task_arg->req; std::shared_ptr conn = task_arg->conn; if (!req->has_binlog_sync()) { LOG(WARNING) << "Pb parse error"; // conn->NotifyClose(); - delete task_arg; return; } const InnerMessage::InnerRequest::BinlogSync& binlog_req = req->binlog_sync(); @@ -387,7 +380,6 @@ void PikaReplServerConn::HandleBinlogSyncRequest(void* arg) { g_pika_rm->GetSyncMasterPartitionByName(PartitionInfo(table_name, partition_id)); if (!master_partition) { LOG(WARNING) << "Sync Master Partition: " << table_name << ":" << partition_id << ", NotFound"; - delete task_arg; return; } @@ -400,7 +392,6 @@ void PikaReplServerConn::HandleBinlogSyncRequest(void* arg) { } else if (meta.term() < master_partition->ConsensusTerm()) /*outdated pb*/ { LOG(WARNING) << "Drop outdated binlog sync req " << table_name << ":" << partition_id << " recv term: " << meta.term() << " local term: " << master_partition->ConsensusTerm(); - delete task_arg; return; } } @@ -409,7 +400,6 @@ void PikaReplServerConn::HandleBinlogSyncRequest(void* arg) { LOG(WARNING) << "Check Session failed " << node.ip() << ":" << node.port() << ", " << table_name << "_" << partition_id; // conn->NotifyClose(); - delete task_arg; return; } @@ -421,7 +411,6 @@ void PikaReplServerConn::HandleBinlogSyncRequest(void* arg) { LOG(WARNING) << "SetMasterLastRecvTime failed " << node.ip() << ":" << node.port() << ", " << table_name << "_" << partition_id << " " << s.ToString(); conn->NotifyClose(); - delete task_arg; return; } @@ -429,7 +418,6 @@ void PikaReplServerConn::HandleBinlogSyncRequest(void* arg) { if (range_start.b_offset != range_end.b_offset) { LOG(WARNING) << "first binlogsync request pb argument invalid"; conn->NotifyClose(); - delete task_arg; return; } @@ -437,39 +425,33 @@ void PikaReplServerConn::HandleBinlogSyncRequest(void* arg) { if (!s.ok()) { LOG(WARNING) << "Activate Binlog Sync failed " << slave_node.ToString() << " " << s.ToString(); conn->NotifyClose(); - delete task_arg; return; } - delete task_arg; return; } // not the first_send the range_ack cant be 0 // set this case as ping if (range_start.b_offset == BinlogOffset() && range_end.b_offset == BinlogOffset()) { - delete task_arg; return; } s = g_pika_rm->UpdateSyncBinlogStatus(slave_node, range_start, range_end); if (!s.ok()) { LOG(WARNING) << "Update binlog ack failed " << table_name << " " << partition_id << " " << s.ToString(); conn->NotifyClose(); - delete task_arg; return; } - delete task_arg; g_pika_server->SignalAuxiliary(); } void PikaReplServerConn::HandleRemoveSlaveNodeRequest(void* arg) { - auto* task_arg = static_cast(arg); + std::unique_ptr task_arg(static_cast(arg)); const std::shared_ptr req = task_arg->req; std::shared_ptr conn = task_arg->conn; if (req->remove_slave_node_size() == 0) { LOG(WARNING) << "Pb parse error"; conn->NotifyClose(); - delete task_arg; return; } const InnerMessage::InnerRequest::RemoveSlaveNode& remove_slave_node_req = req->remove_slave_node(0); @@ -500,11 +482,9 @@ void PikaReplServerConn::HandleRemoveSlaveNodeRequest(void* arg) { if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { LOG(WARNING) << "Remove Slave Node Failed"; conn->NotifyClose(); - delete task_arg; return; } conn->NotifyWrite(); - delete task_arg; } int PikaReplServerConn::DealMessage() { @@ -516,31 +496,31 @@ int PikaReplServerConn::DealMessage() { } switch (req->type()) { case InnerMessage::kMetaSync: { - auto* task_arg = + auto task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleMetaSyncRequest, task_arg); break; } case InnerMessage::kTrySync: { - auto* task_arg = + auto task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleTrySyncRequest, task_arg); break; } case InnerMessage::kDBSync: { - auto* task_arg = + auto task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleDBSyncRequest, task_arg); break; } case InnerMessage::kBinlogSync: { - auto* task_arg = + auto task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleBinlogSyncRequest, task_arg); break; } case InnerMessage::kRemoveSlaveNode: { - auto* task_arg = + auto task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleRemoveSlaveNodeRequest, task_arg); break; diff --git a/src/pika_repl_server_thread.cc b/src/pika_repl_server_thread.cc index cc2d7bdb7a..590ba02f7f 100644 --- a/src/pika_repl_server_thread.cc +++ b/src/pika_repl_server_thread.cc @@ -9,13 +9,12 @@ #include "include/pika_server.h" extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; PikaReplServerThread::PikaReplServerThread(const std::set& ips, int port, int cron_interval) : HolyThread(ips, port, &conn_factory_, cron_interval, &handle_, true), conn_factory_(this), - port_(port) - { + port_(port) { set_keepalive_timeout(180); } diff --git a/src/pika_rm.cc b/src/pika_rm.cc index d2d45b1516..0876da7371 100644 --- a/src/pika_rm.cc +++ b/src/pika_rm.cc @@ -21,7 +21,9 @@ #include "include/pika_command.h" using pstd::Status; -extern PikaReplicaManager* g_pika_rm; + +extern std::unique_ptr g_pika_conf; +extern std::unique_ptr g_pika_rm; extern PikaServer* g_pika_server; /* SyncPartition */ @@ -138,7 +140,7 @@ Status SyncMasterPartition::ActivateSlaveDbSync(const std::string& ip, int port) Status SyncMasterPartition::ReadBinlogFileToWq(const std::shared_ptr& slave_ptr) { int cnt = slave_ptr->sync_win.Remaining(); std::shared_ptr reader = slave_ptr->binlog_reader; - if (reader == nullptr) { + if (!reader) { return Status::OK(); } std::vector tasks; @@ -194,7 +196,7 @@ Status SyncMasterPartition::ConsensusUpdateSlave(const std::string& ip, int port Status SyncMasterPartition::ConsensusUpdateAppliedIndex(const LogOffset& offset) { std::shared_ptr context = coordinator_.context(); - if (context == nullptr) { + if (!context) { LOG(WARNING) << "Coordinator context empty."; return Status::NotFound("context"); } @@ -638,16 +640,11 @@ PikaReplicaManager::PikaReplicaManager() { std::set ips; ips.insert("0.0.0.0"); int port = g_pika_conf->port() + kPortShiftReplServer; - pika_repl_client_ = new PikaReplClient(3000, 60); - pika_repl_server_ = new PikaReplServer(ips, port, 3000); + pika_repl_client_ = std::make_unique(3000, 60); + pika_repl_server_ = std::make_unique(ips, port, 3000); InitPartition(); } -PikaReplicaManager::~PikaReplicaManager() { - delete pika_repl_client_; - delete pika_repl_server_; -} - void PikaReplicaManager::Start() { int ret = 0; ret = pika_repl_client_->Start(); @@ -802,14 +799,14 @@ void PikaReplicaManager::ReplServerUpdateClientConnMap(const std::string& ip_por pika_repl_server_->UpdateClientConnMap(ip_port, fd); } -Status PikaReplicaManager::UpdateSyncBinlogStatus(const RmNode& slave, const LogOffset& range_start, - const LogOffset& range_end) { +Status PikaReplicaManager::UpdateSyncBinlogStatus(const RmNode& slave, const LogOffset& offset_start, + const LogOffset& offset_end) { std::shared_lock l(partitions_rw_); if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { return Status::NotFound(slave.ToString() + " not found"); } std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - Status s = partition->ConsensusUpdateSlave(slave.Ip(), slave.Port(), range_start, range_end); + Status s = partition->ConsensusUpdateSlave(slave.Ip(), slave.Port(), offset_start, offset_end); if (!s.ok()) { return s; } @@ -945,7 +942,7 @@ Status PikaReplicaManager::GetPartitionInfo(const std::string& table, uint32_t p Status PikaReplicaManager::SelectLocalIp(const std::string& remote_ip, const int remote_port, std::string* const local_ip) { - net::NetCli* cli = net::NewRedisCli(); + std::unique_ptr cli(net::NewRedisCli()); cli->set_connect_timeout(1500); if ((cli->Connect(remote_ip, remote_port, "")).ok()) { struct sockaddr_in laddr; @@ -954,10 +951,8 @@ Status PikaReplicaManager::SelectLocalIp(const std::string& remote_ip, const int std::string tmp_ip(inet_ntoa(laddr.sin_addr)); *local_ip = tmp_ip; cli->Close(); - delete cli; } else { LOG(WARNING) << "Failed to connect remote node(" << remote_ip << ":" << remote_port << ")"; - delete cli; return Status::Corruption("connect remote node error"); } return Status::OK(); diff --git a/src/pika_rsync_service.cc b/src/pika_rsync_service.cc index c76f7466ea..780d782d12 100644 --- a/src/pika_rsync_service.cc +++ b/src/pika_rsync_service.cc @@ -15,9 +15,9 @@ #include "include/pika_conf.h" #include "include/pika_define.h" -extern PikaConf* g_pika_conf; +extern std::unique_ptr g_pika_conf; -PikaRsyncService::PikaRsyncService(std::string raw_path, const int port) : raw_path_(std::move(raw_path)), port_(port) { +PikaRsyncService::PikaRsyncService(const std::string& raw_path, const int port) : raw_path_(raw_path), port_(port) { if (raw_path_.back() != '/') { raw_path_ += "/"; } @@ -43,12 +43,12 @@ int PikaRsyncService::StartRsync() { auth = g_pika_conf->masterauth(); } ret = pstd::StartRsync(raw_path_, kDBSyncModule, "0.0.0.0", port_, auth); - if (ret != 0) { + if (ret) { LOG(WARNING) << "Failed to start rsync, path:" << raw_path_ << " error : " << ret; return -1; } ret = CreateSecretFile(); - if (ret != 0) { + if (ret) { LOG(WARNING) << "Failed to create secret file"; return -1; } diff --git a/src/pika_server.cc b/src/pika_server.cc index 5813f39b0f..1eb19bfa2e 100644 --- a/src/pika_server.cc +++ b/src/pika_server.cc @@ -27,22 +27,20 @@ using pstd::Status; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; -extern PikaCmdTableManager* g_pika_cmd_table_manager; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; void DoPurgeDir(void* arg) { - std::string path = *(static_cast(arg)); - LOG(INFO) << "Delete dir: " << path << " start"; - pstd::DeleteDir(path); - LOG(INFO) << "Delete dir: " << path << " done"; - delete static_cast(arg); + std::unique_ptr path(static_cast(arg)); + LOG(INFO) << "Delete dir: " << *path << " start"; + pstd::DeleteDir(*path); + LOG(INFO) << "Delete dir: " << *path << " done"; } void DoDBSync(void* arg) { - auto* dbsa = reinterpret_cast(arg); + std::unique_ptr dbsa(static_cast(arg)); PikaServer* const ps = dbsa->p; ps->DbSyncSendFile(dbsa->ip, dbsa->port, dbsa->table_name, dbsa->partition_id); - delete dbsa; } PikaServer::PikaServer() @@ -72,20 +70,19 @@ PikaServer::PikaServer() int worker_queue_limit = g_pika_conf->maxclients() / worker_num_ + 100; LOG(INFO) << "Worker queue limit is " << worker_queue_limit; pika_dispatch_thread_ = - new PikaDispatchThread(ips, port_, worker_num_, 3000, worker_queue_limit, g_pika_conf->max_conn_rbuf_size()); - pika_monitor_thread_ = new PikaMonitorThread(); - pika_rsync_service_ = new PikaRsyncService(g_pika_conf->db_sync_path(), g_pika_conf->port() + kPortShiftRSync); - pika_pubsub_thread_ = new net::PubSubThread(); - pika_auxiliary_thread_ = new PikaAuxiliaryThread(); + std::make_unique(ips, port_, worker_num_, 3000, worker_queue_limit, g_pika_conf->max_conn_rbuf_size()); + pika_monitor_thread_ = std::make_unique(); + pika_rsync_service_ = std::make_unique(g_pika_conf->db_sync_path(), g_pika_conf->port() + kPortShiftRSync); + pika_pubsub_thread_ = std::make_unique(); + pika_auxiliary_thread_ = std::make_unique(); - pika_client_processor_ = new PikaClientProcessor(g_pika_conf->thread_pool_size(), 100000); + pika_client_processor_ = std::make_unique(g_pika_conf->thread_pool_size(), 100000); } PikaServer::~PikaServer() { // DispatchThread will use queue of worker thread, // so we need to delete dispatch before worker. pika_client_processor_->Stop(); - delete pika_dispatch_thread_; { std::lock_guard l(slave_mutex_); @@ -95,13 +92,6 @@ PikaServer::~PikaServer() { LOG(INFO) << "Delete slave success"; } } - - delete pika_pubsub_thread_; - delete pika_auxiliary_thread_; - delete pika_rsync_service_; - delete pika_client_processor_; - delete pika_monitor_thread_; - bgsave_thread_.StopThread(); key_scan_thread_.StopThread(); @@ -245,7 +235,7 @@ bool PikaServer::readonly(const std::string& table_name, const std::string& key) bool PikaServer::ConsensusCheck(const std::string& table_name, const std::string& key) { if (g_pika_conf->consensus_level() != 0) { std::shared_ptr
table = GetTable(table_name); - if (table == nullptr) { + if (!table) { return false; } uint32_t index = g_pika_cmd_table_manager->DistributeKey(key, table->PartitionNum()); @@ -556,7 +546,7 @@ Status PikaServer::DoSameThingEveryPartition(const TaskType& type) { case TaskType::kResetReplState: { slave_partition = g_pika_rm->GetSyncSlavePartitionByName( PartitionInfo(table_item.second->GetTableName(), partition_item.second->GetPartitionId())); - if (slave_partition == nullptr) { + if (!slave_partition) { LOG(WARNING) << "Slave Partition: " << table_item.second->GetTableName() << ":" << partition_item.second->GetPartitionId() << " Not Found"; } @@ -799,7 +789,7 @@ bool PikaServer::AllPartitionConnectSuccess() { for (const auto& partition_item : table_item.second->partitions_) { slave_partition = g_pika_rm->GetSyncSlavePartitionByName( PartitionInfo(table_item.second->GetTableName(), partition_item.second->GetPartitionId())); - if (slave_partition == nullptr) { + if (!slave_partition) { LOG(WARNING) << "Slave Partition: " << table_item.second->GetTableName() << ":" << partition_item.second->GetPartitionId() << ", NotFound"; return false; @@ -853,7 +843,7 @@ void PikaServer::ScheduleClientBgThreads(net::TaskFunc func, void* arg, const st } size_t PikaServer::ClientProcessorThreadPoolCurQueueSize() { - if (pika_client_processor_ == nullptr) { + if (!pika_client_processor_) { return 0; } return pika_client_processor_->ThreadPoolCurQueueSize(); @@ -870,7 +860,7 @@ void PikaServer::PurgelogsTaskSchedule(net::TaskFunc func, void* arg) { } void PikaServer::PurgeDir(const std::string& path) { - auto* dir_path = new std::string(path); + auto dir_path = new std::string(path); PurgeDirTaskSchedule(&DoPurgeDir, static_cast(dir_path)); } @@ -891,7 +881,7 @@ void PikaServer::DBSync(const std::string& ip, int port, const std::string& tabl // Reuse the bgsave_thread_ // Since we expect BgSave and DBSync execute serially bgsave_thread_.StartThread(); - auto* arg = new DBSyncArg(this, ip, port, table_name, partition_id); + auto arg = new DBSyncArg(this, ip, port, table_name, partition_id); bgsave_thread_.Schedule(&DoDBSync, reinterpret_cast(arg)); } @@ -941,7 +931,7 @@ void PikaServer::DbSyncSendFile(const std::string& ip, int port, const std::stri int ret = 0; LOG(INFO) << "Partition: " << partition->GetPartitionName() << " Start Send files in " << bg_path << " to " << ip; ret = pstd::GetChildren(bg_path, descendant); - if (ret != 0) { + if (ret) { std::string ip_port = pstd::IpPortString(ip, port); std::lock_guard ldb(db_sync_protector_); db_sync_slaves_.erase(ip_port); @@ -989,7 +979,7 @@ void PikaServer::DbSyncSendFile(const std::string& ip, int port, const std::stri pstd::RsyncSendClearTarget(bg_path + "/sets", remote_path + "/sets", secret_file_path, remote); pstd::RsyncSendClearTarget(bg_path + "/zsets", remote_path + "/zsets", secret_file_path, remote); - net::NetCli* cli = net::NewRedisCli(); + std::unique_ptr cli(net::NewRedisCli()); std::string lip(host_); if (cli->Connect(ip, port, "").ok()) { struct sockaddr_in laddr; @@ -997,11 +987,9 @@ void PikaServer::DbSyncSendFile(const std::string& ip, int port, const std::stri getsockname(cli->fd(), reinterpret_cast(&laddr), &llen); lip = inet_ntoa(laddr.sin_addr); cli->Close(); - delete cli; } else { LOG(WARNING) << "Rsync try connect slave rsync service error" << ", slave rsync service(" << ip << ":" << port << ")"; - delete cli; } // Send info file at last @@ -1020,7 +1008,7 @@ void PikaServer::DbSyncSendFile(const std::string& ip, int port, const std::stri } ret = pstd::RsyncSendFile(fn, remote_path + "/" + kBgsaveInfoFile, secret_file_path, remote); pstd::DeleteFile(fn); - if (ret != 0) { + if (ret) { LOG(WARNING) << "Partition: " << partition->GetPartitionName() << " Send Modified Info File Failed"; } } else if (0 != (ret = pstd::RsyncSendFile(bg_path + "/" + kBgsaveInfoFile, remote_path + "/" + kBgsaveInfoFile, diff --git a/src/pika_set.cc b/src/pika_set.cc index 74018e92a3..53401d8f7d 100644 --- a/src/pika_set.cc +++ b/src/pika_set.cc @@ -167,7 +167,7 @@ void SScanCmd::Do(std::shared_ptr partition) { } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - } +} void SRemCmd::DoInitial() { if (!CheckArg(argv_.size())) { diff --git a/src/pika_slave_node.cc b/src/pika_slave_node.cc index 7a09076e64..3e517b5bb2 100644 --- a/src/pika_slave_node.cc +++ b/src/pika_slave_node.cc @@ -8,7 +8,8 @@ #include "include/pika_conf.h" using pstd::Status; -extern PikaConf* g_pika_conf; + +extern std::unique_ptr g_pika_conf; /* SyncWindow */ diff --git a/src/pika_stable_log.cc b/src/pika_stable_log.cc index 47e911e3c6..17c871b9c4 100644 --- a/src/pika_stable_log.cc +++ b/src/pika_stable_log.cc @@ -18,8 +18,9 @@ using pstd::Status; +extern std::unique_ptr g_pika_conf; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; StableLog::StableLog(std::string table_name, uint32_t partition_id, std::string log_path) : purging_(false), table_name_(std::move(table_name)), partition_id_(partition_id), log_path_(std::move(log_path)) { @@ -64,7 +65,7 @@ bool StableLog::PurgeStableLogs(uint32_t to, bool manual) { LOG(WARNING) << "purge process already exist"; return false; } - auto* arg = new PurgeStableLogArg(); + auto arg = new PurgeStableLogArg(); arg->to = to; arg->manual = manual; arg->logger = shared_from_this(); @@ -75,10 +76,9 @@ bool StableLog::PurgeStableLogs(uint32_t to, bool manual) { void StableLog::ClearPurge() { purging_ = false; } void StableLog::DoPurgeStableLogs(void* arg) { - auto* purge_arg = static_cast(arg); + std::unique_ptr purge_arg(static_cast(arg)); purge_arg->logger->PurgeFiles(purge_arg->to, purge_arg->manual); purge_arg->logger->ClearPurge(); - delete static_cast(arg); } bool StableLog::PurgeFiles(uint32_t to, bool manual) { @@ -145,7 +145,7 @@ bool StableLog::PurgeFiles(uint32_t to, bool manual) { bool StableLog::GetBinlogFiles(std::map* binlogs) { std::vector children; int ret = pstd::GetChildren(log_path_, children); - if (ret != 0) { + if (ret) { LOG(WARNING) << log_path_ << " Get all files in log path failed! error:" << ret; return false; } diff --git a/src/pika_table.cc b/src/pika_table.cc index 63658cf6ca..83cd26c50b 100644 --- a/src/pika_table.cc +++ b/src/pika_table.cc @@ -13,8 +13,8 @@ using pstd::Status; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; -extern PikaCmdTableManager* g_pika_cmd_table_manager; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; std::string TablePath(const std::string& path, const std::string& table_name) { char buf[100]; @@ -132,7 +132,7 @@ void Table::KeyScan() { key_scan_info_.key_scaning_ = true; key_scan_info_.duration = -2; // duration -2 mean the task in waiting status, // has not been scheduled for exec - auto* bg_task_arg = new BgTaskArg(); + auto bg_task_arg = new BgTaskArg(); bg_task_arg->table = shared_from_this(); g_pika_server->KeyScanTaskSchedule(&DoKeyScan, reinterpret_cast(bg_task_arg)); } @@ -213,9 +213,8 @@ void Table::Compact(const storage::DataType& type) { } void Table::DoKeyScan(void* arg) { - auto* bg_task_arg = reinterpret_cast(arg); + std::unique_ptr bg_task_arg(static_cast(arg)); bg_task_arg->table->RunKeyScan(); - delete bg_task_arg; } void Table::InitKeyScan() { diff --git a/src/pstd/include/base_conf.h b/src/pstd/include/base_conf.h index c248815f70..779ab48bdf 100644 --- a/src/pstd/include/base_conf.h +++ b/src/pstd/include/base_conf.h @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -17,8 +18,6 @@ namespace pstd { -class BaseConf; - class BaseConf { public: struct Rep { @@ -73,14 +72,8 @@ class BaseConf { void PushConfItem(const Rep::ConfItem& item); - /* - * No copy && no assign operator - */ - BaseConf(const BaseConf&) = delete; - void operator=(const BaseConf&) = delete; private: - Rep* rep_ = nullptr; - + std::unique_ptr rep_; }; } // namespace pstd diff --git a/src/pstd/include/env.h b/src/pstd/include/env.h index ab08f14484..da87801aeb 100644 --- a/src/pstd/include/env.h +++ b/src/pstd/include/env.h @@ -4,8 +4,10 @@ #include #include #include +#include #include "pstd/include/pstd_status.h" +#include "pstd/include/noncopyable.h" namespace pstd { @@ -46,17 +48,13 @@ Status DeleteFile(const std::string& fname); int RenameFile(const std::string& oldname, const std::string& newname); -class FileLock { +class FileLock : public pstd::noncopyable { public: FileLock() = default; virtual ~FileLock()= default;; int fd_ = -1; std::string name_; - - // No copying allowed - FileLock(const FileLock&) = delete; - void operator=(const FileLock&) = delete; }; Status LockFile(const std::string& f, FileLock** l); @@ -68,22 +66,22 @@ bool GetDescendant(const std::string& dir, std::vector& result); uint64_t NowMicros(); void SleepForMicroseconds(int micros); -Status NewSequentialFile(const std::string& fname, SequentialFile** result); +Status NewSequentialFile(const std::string& fname, std::unique_ptr& result); -Status NewWritableFile(const std::string& fname, WritableFile** result); +Status NewWritableFile(const std::string& fname, std::unique_ptr& result); -Status NewRWFile(const std::string& fname, RWFile** result); +Status NewRWFile(const std::string& fname, std::unique_ptr& result); Status AppendSequentialFile(const std::string& fname, SequentialFile** result); -Status AppendWritableFile(const std::string& fname, WritableFile** result, uint64_t write_len = 0); +Status AppendWritableFile(const std::string& fname, std::unique_ptr& result, uint64_t write_len = 0); -Status NewRandomRWFile(const std::string& fname, RandomRWFile** result); +Status NewRandomRWFile(const std::string& fname, std::unique_ptr& result); // A file abstraction for sequential writing. The implementation // must provide buffering since callers may append small fragments // at a time to the file. -class WritableFile { +class WritableFile : public pstd::noncopyable { public: WritableFile() = default; virtual ~WritableFile(); @@ -94,10 +92,6 @@ class WritableFile { virtual Status Sync() = 0; virtual Status Trim(uint64_t offset) = 0; virtual uint64_t Filesize() = 0; - - // No copying allowed - WritableFile(const WritableFile&) = delete; - void operator=(const WritableFile&) = delete; }; // A abstract for the sequential readable file @@ -112,20 +106,15 @@ class SequentialFile { virtual char* ReadLine(char* buf, int n) = 0; }; -class RWFile { +class RWFile : public pstd::noncopyable { public: RWFile() = default; virtual ~RWFile(); virtual char* GetData() = 0; - - // No copying allowed - RWFile(const RWFile&) = delete; - void operator=(const RWFile&) = delete; - private: }; // A file abstraction for random reading and writing. -class RandomRWFile { +class RandomRWFile : public pstd::noncopyable { public: RandomRWFile() = default; virtual ~RandomRWFile() = default; @@ -165,12 +154,6 @@ class RandomRWFile { (void)len; return Status::OK(); } - - // No copying allowed - RandomRWFile(const RandomRWFile&) = delete; - void operator=(const RandomRWFile&) = delete; - - private: }; } // namespace pstd diff --git a/src/pstd/include/lock_mgr.h b/src/pstd/include/lock_mgr.h index 06bb442a17..8cbc9a588d 100644 --- a/src/pstd/include/lock_mgr.h +++ b/src/pstd/include/lock_mgr.h @@ -10,6 +10,7 @@ #include #include "pstd/include/mutex.h" +#include "pstd/include/noncopyable.h" namespace pstd { @@ -17,7 +18,7 @@ namespace lock { struct LockMap; struct LockMapStripe; -class LockMgr { +class LockMgr : public pstd::noncopyable { public: LockMgr(size_t default_num_stripes, int64_t max_num_locks, const std::shared_ptr& factory); @@ -30,10 +31,6 @@ class LockMgr { // Unlock a key locked by TryLock(). void UnLock(const std::string& key); - // No copying allowed - LockMgr(const LockMgr&) = delete; - void operator=(const LockMgr&) = delete; - private: // Default number of lock map stripes const size_t default_num_stripes_[[maybe_unused]]; @@ -47,11 +44,11 @@ class LockMgr { // Map to locked key info std::shared_ptr lock_map_; - Status Acquire(LockMapStripe* stripe, const std::string& key); + Status Acquire(std::shared_ptr stripe, const std::string& key); - Status AcquireLocked(LockMapStripe* stripe, const std::string& key); + Status AcquireLocked(std::shared_ptr stripe, const std::string& key); - void UnLockKey(const std::string& key, LockMapStripe* stripe); + void UnLockKey(const std::string& key, std::shared_ptr stripe); }; diff --git a/src/pstd/include/noncopyable.h b/src/pstd/include/noncopyable.h new file mode 100644 index 0000000000..4e24b0d52d --- /dev/null +++ b/src/pstd/include/noncopyable.h @@ -0,0 +1,23 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_NONCOPYABLE_H_ +#define PIKA_NONCOPYABLE_H_ + +namespace pstd { + +class noncopyable { + protected: + noncopyable() = default; + ~noncopyable() = default; + + private: + noncopyable(const noncopyable&) = delete; + void operator=(const noncopyable&) = delete; +}; + +} + +#endif diff --git a/src/pstd/include/pstd_coding.h b/src/pstd/include/pstd_coding.h index 9a2b1a52c3..f601b5e337 100644 --- a/src/pstd/include/pstd_coding.h +++ b/src/pstd/include/pstd_coding.h @@ -89,7 +89,7 @@ inline uint64_t DecodeFixed64(const char* ptr) { } inline void GetFixed16(std::string* dst, uint16_t* value) { - if (dst == nullptr || value == nullptr) { + if (!dst || !value) { return; } *value = DecodeFixed16(dst->data()); @@ -97,7 +97,7 @@ inline void GetFixed16(std::string* dst, uint16_t* value) { } inline void GetFixed32(std::string* dst, uint32_t* value) { - if (dst == nullptr || value == nullptr) { + if (!dst || !value) { return; } *value = DecodeFixed32(dst->data()); @@ -105,7 +105,7 @@ inline void GetFixed32(std::string* dst, uint32_t* value) { } inline void GetFixed64(std::string* dst, uint64_t* value) { - if (dst == nullptr || value == nullptr) { + if (!dst || !value) { return; } *value = DecodeFixed64(dst->data()); @@ -113,7 +113,7 @@ inline void GetFixed64(std::string* dst, uint64_t* value) { } inline void GetFixed16(Slice* dst, uint16_t* value) { - if (dst == nullptr || value == nullptr) { + if (!dst || !value) { return; } *value = DecodeFixed16(dst->data()); @@ -121,7 +121,7 @@ inline void GetFixed16(Slice* dst, uint16_t* value) { } inline void GetFixed32(Slice* dst, uint32_t* value) { - if (dst == nullptr || value == nullptr) { + if (!dst || !value) { return; } *value = DecodeFixed32(dst->data()); @@ -129,7 +129,7 @@ inline void GetFixed32(Slice* dst, uint32_t* value) { } inline void GetFixed64(Slice* dst, uint64_t* value) { - if (dst == nullptr || value == nullptr) { + if (!dst || !value) { return; } *value = DecodeFixed64(dst->data()); diff --git a/src/pstd/include/pstd_mutex.h b/src/pstd/include/pstd_mutex.h index 98cdbac9de..b1ea9c8203 100644 --- a/src/pstd/include/pstd_mutex.h +++ b/src/pstd/include/pstd_mutex.h @@ -10,6 +10,7 @@ #include #include #include +#include "noncopyable.h" namespace pstd { @@ -24,7 +25,7 @@ void InitOnce(OnceType& once, F&& f, Args&&... args) { return std::call_once(once, std::forward(f), std::forward(args)...); } -class RefMutex { +class RefMutex : public pstd::noncopyable { public: RefMutex() = default; ~RefMutex() = default; @@ -38,17 +39,12 @@ class RefMutex { void Unref(); bool IsLastRef() { return refs_ == 1; } - // No copying - RefMutex(const RefMutex&) = delete; - void operator=(const RefMutex&) = delete; - private: std::mutex mu_; int refs_ = 0; - }; -class RecordMutex { +class RecordMutex : public pstd::noncopyable { public: RecordMutex()= default;; ~RecordMutex(); @@ -62,19 +58,12 @@ class RecordMutex { Mutex mutex_; std::unordered_map records_; - - // No copying - RecordMutex(const RecordMutex&); - void operator=(const RecordMutex&); }; -class RecordLock { +class RecordLock : public pstd::noncopyable { public: - RecordLock(RecordMutex* mu, std::string key) : mu_(mu), key_(std::move(std::move(std::move(key)))) { mu_->Lock(key_); } + RecordLock(RecordMutex* mu, std::string key) : mu_(mu), key_(std::move(key)) { mu_->Lock(key_); } ~RecordLock() { mu_->Unlock(key_); } - // No copying allowed - RecordLock(const RecordLock&) = delete; - void operator=(const RecordLock&) = delete; private: RecordMutex* const mu_; diff --git a/src/pstd/include/pstd_slice.h b/src/pstd/include/pstd_slice.h index 921e981432..9b0402ceea 100644 --- a/src/pstd/include/pstd_slice.h +++ b/src/pstd/include/pstd_slice.h @@ -99,14 +99,13 @@ inline int Slice::compare(const Slice& b) const { const size_t min_len = (size_ < b.size_) ? size_ : b.size_; int r = memcmp(data_, b.data_, min_len); if (r == 0) { - if (size_ < b.size_) { { { + if (size_ < b.size_) { r = -1; - } } } else if (size_ > b.size_) { { { + } else if (size_ > b.size_) { r = +1; -} -} -} + } } + return r; } diff --git a/src/pstd/include/pstd_status.h b/src/pstd/include/pstd_status.h index b3848877be..d3336cd3b5 100644 --- a/src/pstd/include/pstd_status.h +++ b/src/pstd/include/pstd_status.h @@ -40,7 +40,7 @@ class Status { static Status Busy(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kBusy, msg, msg2); } // Returns true if the status indicates success. - bool ok() const { return (state_ == nullptr); } + bool ok() const { return !state_; } // Returns true if the status indicates a NotFound error. bool IsNotFound() const { return code() == kNotFound; } @@ -102,19 +102,19 @@ class Status { kBusy = 11 }; - Code code() const { return (state_ == nullptr) ? kOk : static_cast(state_[4]); } + Code code() const { return !state_ ? kOk : static_cast(state_[4]); } Status(Code code, const Slice& msg, const Slice& msg2); static const char* CopyState(const char* s); }; -inline Status::Status(const Status& s) { state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_); } +inline Status::Status(const Status& s) { state_ = !s.state_ ? nullptr : CopyState(s.state_); } inline void Status::operator=(const Status& s) { // The following condition catches both aliasing (when this == &s), // and the common case where both s and *this are ok. if (state_ != s.state_) { delete[] state_; - state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_); + state_ = !s.state_ ? nullptr : CopyStte(s.state_); } } diff --git a/src/pstd/include/scope_record_lock.h b/src/pstd/include/scope_record_lock.h index 3c531ea95f..882964c178 100644 --- a/src/pstd/include/scope_record_lock.h +++ b/src/pstd/include/scope_record_lock.h @@ -10,6 +10,7 @@ #include #include +#include "pstd/include/noncopyable.h" #include "pstd/include/lock_mgr.h" #include "rocksdb/slice.h" @@ -17,44 +18,37 @@ namespace pstd::lock { using Slice = rocksdb::Slice; -class ScopeRecordLock { +class ScopeRecordLock final : public pstd::noncopyable { public: - ScopeRecordLock(LockMgr* lock_mgr, const Slice& key) : lock_mgr_(lock_mgr), key_(key) { + ScopeRecordLock(std::shared_ptr lock_mgr, const Slice& key) : lock_mgr_(lock_mgr), key_(key) { lock_mgr_->TryLock(key_.ToString()); } ~ScopeRecordLock() { lock_mgr_->UnLock(key_.ToString()); } - ScopeRecordLock(const ScopeRecordLock&) = delete; - void operator=(const ScopeRecordLock&) = delete; - private: - LockMgr* const lock_mgr_; + std::shared_ptr const lock_mgr_; Slice key_; }; -class MultiScopeRecordLock { +class MultiScopeRecordLock final : public pstd::noncopyable { public: - MultiScopeRecordLock(LockMgr* lock_mgr, std::vector keys); + MultiScopeRecordLock(std::shared_ptr lock_mgr, const std::vector& keys); ~MultiScopeRecordLock(); - MultiScopeRecordLock(const MultiScopeRecordLock&) = delete; - void operator=(const MultiScopeRecordLock&) = delete; private: - LockMgr* const lock_mgr_; + std::shared_ptr const lock_mgr_; std::vector keys_; }; -class MultiRecordLock { +class MultiRecordLock : public noncopyable { public: - explicit MultiRecordLock(LockMgr* lock_mgr) : lock_mgr_(lock_mgr) {} + explicit MultiRecordLock(std::shared_ptr lock_mgr) : lock_mgr_(lock_mgr) {} ~MultiRecordLock() = default; + void Lock(const std::vector& keys); void Unlock(const std::vector& keys); - - MultiRecordLock(const MultiRecordLock&) = delete; - void operator=(const MultiRecordLock&) = delete; private: - LockMgr* const lock_mgr_; + std::shared_ptr const lock_mgr_; }; } // namespace pstd::lock diff --git a/src/pstd/src/base_conf.cc b/src/pstd/src/base_conf.cc index 215b68ddae..eec0099160 100644 --- a/src/pstd/src/base_conf.cc +++ b/src/pstd/src/base_conf.cc @@ -19,17 +19,16 @@ namespace pstd { static const int kConfItemLen = 1024 * 1024; -BaseConf::BaseConf(const std::string& path) : rep_(new Rep(path)) {} +BaseConf::BaseConf(const std::string& path) : rep_(std::make_unique(path)) {} -BaseConf::~BaseConf() { delete rep_; } +BaseConf::~BaseConf() {} int BaseConf::LoadConf() { if (!FileExists(rep_->path)) { return -1; } - SequentialFile* sequential_file; - NewSequentialFile(rep_->path, &sequential_file); - + std::unique_ptr sequential_file; + NewSequentialFile(rep_->path, sequential_file); // read conf items char line[kConfItemLen]; @@ -80,19 +79,16 @@ int BaseConf::LoadConf() { } // sequential_file->Close(); - delete sequential_file; return 0; } int BaseConf::ReloadConf() { - Rep* rep = rep_; - rep_ = new Rep(rep->path); + auto rep = std::move(rep_); + rep_ = std::make_unique(rep->path); if (LoadConf() == -1) { - delete rep_; - rep_ = rep; + rep_ = std::move(rep); return -1; } - delete rep; return 0; } @@ -328,11 +324,11 @@ void BaseConf::DumpConf() const { } bool BaseConf::WriteBack() { - WritableFile* write_file; + std::unique_ptr write_file; std::string tmp_path = rep_->path + ".tmp"; - Status ret = NewWritableFile(tmp_path, &write_file); + Status ret = NewWritableFile(tmp_path, write_file); LOG(INFO) << "ret " << ret.ToString(); - if (write_file == nullptr) { + if (!write_file) { return false; } std::string tmp; @@ -346,14 +342,13 @@ bool BaseConf::WriteBack() { } DeleteFile(rep_->path); RenameFile(tmp_path, rep_->path); - delete write_file; return true; } void BaseConf::WriteSampleConf() const { - WritableFile* write_file; + std::unique_ptr write_file; std::string sample_path = rep_->path + ".sample"; - Status ret = NewWritableFile(sample_path, &write_file); + Status ret = NewWritableFile(sample_path, write_file); std::string tmp; for (auto & i : rep_->item) { if (i.type == Rep::kConf) { @@ -363,7 +358,6 @@ void BaseConf::WriteSampleConf() const { write_file->Append(i.value); } } - delete write_file; } void BaseConf::PushConfItem(const Rep::ConfItem& item) { rep_->item.push_back(item); } diff --git a/src/pstd/src/env.cc b/src/pstd/src/env.cc index 60829d610b..8ba54dabda 100644 --- a/src/pstd/src/env.cc +++ b/src/pstd/src/env.cc @@ -151,7 +151,7 @@ Status LockFile(const std::string& fname, FileLock** lock) { result = IOError("lock " + fname, errno); close(fd); } else { - auto* my_lock = new FileLock; + auto my_lock = new FileLock; my_lock->fd_ = fd; my_lock->name_ = fname; *lock = my_lock; @@ -173,7 +173,7 @@ int GetChildren(const std::string& dir, std::vector& result) { int res = 0; result.clear(); DIR* d = opendir(dir.c_str()); - if (d == nullptr) { + if (!d) { return errno; } struct dirent* entry; @@ -189,7 +189,7 @@ int GetChildren(const std::string& dir, std::vector& result) { bool GetDescendant(const std::string& dir, std::vector& result) { DIR* d = opendir(dir.c_str()); - if (d == nullptr) { + if (!d) { return false; } struct dirent* entry; @@ -292,7 +292,7 @@ uint64_t Du(const std::string& filename) { std::string newfile; dir = opendir(filename.c_str()); - if (dir == nullptr) { + if (!dir) { return sum; } while ((entry = readdir(dir)) != nullptr) { @@ -478,7 +478,7 @@ class PosixMmapFile : public WritableFile { assert(base_ <= dst_); assert(dst_ <= limit_); size_t avail = limit_ - dst_; - if (avail == 0) { + if (!avail) { if (!UnmapCurrentRegion() || !MapNewRegion()) { return IOError(filename_, errno); } @@ -718,61 +718,56 @@ class PosixRandomRWFile : public RandomRWFile { // } }; -Status NewSequentialFile(const std::string& fname, SequentialFile** result) { +Status NewSequentialFile(const std::string& fname, std::unique_ptr& result) { FILE* f = fopen(fname.c_str(), "r"); - if (f == nullptr) { - *result = nullptr; + if (!f) { return IOError(fname, errno); } else { - *result = new PosixSequentialFile(fname, f); + result = std::make_unique(fname, f); return Status::OK(); } } -Status NewWritableFile(const std::string& fname, WritableFile** result) { +Status NewWritableFile(const std::string& fname, std::unique_ptr& result) { Status s; const int fd = open(fname.c_str(), O_CREAT | O_RDWR | O_TRUNC | O_CLOEXEC, 0644); if (fd < 0) { - *result = nullptr; s = IOError(fname, errno); } else { - *result = new PosixMmapFile(fname, fd, kPageSize); + result = std::make_unique(fname, fd, kPageSize); } return s; } -Status NewRWFile(const std::string& fname, RWFile** result) { +Status NewRWFile(const std::string& fname, std::unique_ptr& result) { Status s; const int fd = open(fname.c_str(), O_CREAT | O_RDWR | O_CLOEXEC, 0644); if (fd < 0) { - *result = nullptr; s = IOError(fname, errno); } else { - *result = new MmapRWFile(fname, fd, kPageSize); + result = std::make_unique(fname, fd, kPageSize); } return s; } -Status AppendWritableFile(const std::string& fname, WritableFile** result, uint64_t write_len) { +Status AppendWritableFile(const std::string& fname, std::unique_ptr& result, uint64_t write_len) { Status s; const int fd = open(fname.c_str(), O_RDWR | O_CLOEXEC, 0644); if (fd < 0) { - *result = nullptr; s = IOError(fname, errno); } else { - *result = new PosixMmapFile(fname, fd, kPageSize, write_len); + result = std::make_unique(fname, fd, kPageSize, write_len); } return s; } -Status NewRandomRWFile(const std::string& fname, RandomRWFile** result) { +Status NewRandomRWFile(const std::string& fname, std::unique_ptr& result) { Status s; const int fd = open(fname.c_str(), O_CREAT | O_RDWR, 0644); if (fd < 0) { - *result = nullptr; s = IOError(fname, errno); } else { - *result = new PosixRandomRWFile(fname, fd); + result = std::make_unique(fname, fd); } return s; } diff --git a/src/pstd/src/lock_mgr.cc b/src/pstd/src/lock_mgr.cc index 3e12efc812..8a2631f327 100644 --- a/src/pstd/src/lock_mgr.cc +++ b/src/pstd/src/lock_mgr.cc @@ -42,16 +42,12 @@ struct LockMap { explicit LockMap(size_t num_stripes, const std::shared_ptr& factory) : num_stripes_(num_stripes) { lock_map_stripes_.reserve(num_stripes); for (size_t i = 0; i < num_stripes; i++) { - auto* stripe = new LockMapStripe(factory); + auto stripe = std::make_shared(factory); lock_map_stripes_.push_back(stripe); } } - ~LockMap() { - for (auto stripe : lock_map_stripes_) { - delete stripe; - } - } + ~LockMap() {} // Number of sepearate LockMapStripes to create, each with their own Mutex const size_t num_stripes_; @@ -60,7 +56,7 @@ struct LockMap { // (Only maintained if LockMgr::max_num_locks_ is positive.) std::atomic lock_cnt{0}; - std::vector lock_map_stripes_; + std::vector> lock_map_stripes_; size_t GetStripe(const std::string& key) const; }; @@ -85,14 +81,14 @@ Status LockMgr::TryLock(const std::string& key) { #else size_t stripe_num = lock_map_->GetStripe(key); assert(lock_map_->lock_map_stripes_.size() > stripe_num); - LockMapStripe* stripe = lock_map_->lock_map_stripes_.at(stripe_num); + auto stripe = lock_map_->lock_map_stripes_.at(stripe_num); return Acquire(stripe, key); #endif } // Helper function for TryLock(). -Status LockMgr::Acquire(LockMapStripe* stripe, const std::string& key) { +Status LockMgr::Acquire(std::shared_ptr stripe, const std::string& key) { Status result; // we wait indefinitely to acquire the lock @@ -123,7 +119,7 @@ Status LockMgr::Acquire(LockMapStripe* stripe, const std::string& key) { // Try to lock this key after we have acquired the mutex. // REQUIRED: Stripe mutex must be held. -Status LockMgr::AcquireLocked(LockMapStripe* stripe, const std::string& key) { +Status LockMgr::AcquireLocked(std::shared_ptr stripe, const std::string& key) { Status result; // Check if this key is already locked if (stripe->keys.find(key) != stripe->keys.end()) { @@ -147,7 +143,7 @@ Status LockMgr::AcquireLocked(LockMapStripe* stripe, const std::string& key) { return result; } -void LockMgr::UnLockKey(const std::string& key, LockMapStripe* stripe) { +void LockMgr::UnLockKey(const std::string& key, std::shared_ptr stripe) { #ifdef LOCKLESS #else auto stripe_iter = stripe->keys.find(key); @@ -169,7 +165,7 @@ void LockMgr::UnLock(const std::string& key) { // Lock the mutex for the stripe that this key hashes to size_t stripe_num = lock_map_->GetStripe(key); assert(lock_map_->lock_map_stripes_.size() > stripe_num); - LockMapStripe* stripe = lock_map_->lock_map_stripes_.at(stripe_num); + auto stripe = lock_map_->lock_map_stripes_.at(stripe_num); stripe->stripe_mutex->Lock(); UnLockKey(key, stripe); diff --git a/src/pstd/src/posix.cc b/src/pstd/src/posix.cc index 4fcfd7ef84..300c46252f 100644 --- a/src/pstd/src/posix.cc +++ b/src/pstd/src/posix.cc @@ -234,7 +234,7 @@ void Munmap(void* start, size_t length) { void* Malloc(size_t size) { void* p; - if ((p = malloc(size)) == nullptr) { + if (!(p = malloc(size))) { LOG(ERROR) << "Malloc error: " << strerror(errno); } return p; @@ -243,7 +243,7 @@ void* Malloc(size_t size) { void* Realloc(void* ptr, size_t size) { void* p; - if ((p = realloc(ptr, size)) == nullptr) { + if (!(p = realloc(ptr, size))) { LOG(ERROR) << "Realloc error: " << strerror(errno); } return p; @@ -252,7 +252,7 @@ void* Realloc(void* ptr, size_t size) { void* Calloc(size_t nmemb, size_t size) { void* p; - if ((p = calloc(nmemb, size)) == nullptr) { + if (!(p = calloc(nmemb, size))) { LOG(ERROR) << "Calloc error: " << strerror(errno); } return p; @@ -272,7 +272,7 @@ void Fclose(FILE* fp) { FILE* Fdopen(int fd, const char* type) { FILE* fp; - if ((fp = fdopen(fd, type)) == nullptr) { + if (!(fp = fdopen(fd, type))) { LOG(ERROR) << "Fdopen error: " << strerror(errno); } @@ -282,7 +282,7 @@ FILE* Fdopen(int fd, const char* type) { char* Fgets(char* ptr, int n, FILE* stream) { char* rptr; - if (((rptr = fgets(ptr, n, stream)) == nullptr) && (ferror(stream) != 0)) { + if (!(rptr = fgets(ptr, n, stream)) && ferror(stream)) { LOG(ERROR) << "Fgets error"; } @@ -292,7 +292,7 @@ char* Fgets(char* ptr, int n, FILE* stream) { FILE* Fopen(const char* filename, const char* mode) { FILE* fp; - if ((fp = fopen(filename, mode)) == nullptr) { + if (!(fp = fopen(filename, mode))) { LOG(ERROR) << "Fopen error: " << strerror(errno); } @@ -374,7 +374,7 @@ void Connect(int sockfd, struct sockaddr* serv_addr, int addrlen) { struct hostent* Gethostbyname(const char* name) { struct hostent* p; - if ((p = gethostbyname(name)) == nullptr) { + if (!(p = gethostbyname(name))) { LOG(ERROR) << "Gethostbyname error: DNS error " << h_errno; } return p; @@ -384,7 +384,7 @@ struct hostent* Gethostbyname(const char* name) { struct hostent* Gethostbyaddr(const char* addr, int len, int type) { struct hostent* p; - if (p = gethostbyaddr(addr, len, type); p == nullptr) { + if (!(p = gethostbyaddr(addr, len, type)) { LOG(ERROR) << "Gethostbyaddr error: DNS error " << h_errno; } return p; @@ -572,10 +572,10 @@ ssize_t rio_readnb(rio_t* rp, void* usrbuf, size_t n) { nread = 0; /* call read() again */ } else { return -1; /* errno set by read() */ -} + } } else if (nread == 0) { break; /* EOF */ -} + } nleft -= nread; bufp += nread; } @@ -666,12 +666,14 @@ int open_clientfd(char* hostname, int port) { struct hostent* hp; struct sockaddr_in serveraddr; - if ((clientfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { return -1; /* check errno for cause of error */ -} + if ((clientfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + return -1; /* check errno for cause of error */ + } /* Fill in the server's IP address and port */ - if ((hp = gethostbyname(hostname)) == nullptr) { return -2; /* check h_errno for cause of error */ -} + if (!(hp = gethostbyname(hostname))) { + return -2; /* check h_errno for cause of error */ + } memset(&serveraddr, 0, sizeof(serveraddr)); serveraddr.sin_family = AF_INET; memmove(&serveraddr.sin_addr.s_addr, hp->h_addr_list[0], hp->h_length); @@ -716,7 +718,8 @@ int open_listenfd(int port) { } /* Make it a listening socket ready to accept connection requests */ - if (listen(listenfd, LISTENQ) < 0) { return -1; + if (listen(listenfd, LISTENQ) < 0) { + return -1; } return listenfd; } diff --git a/src/pstd/src/pstd_coding.cc b/src/pstd/src/pstd_coding.cc index df9cf5b3ee..8d2b0e67f3 100644 --- a/src/pstd/src/pstd_coding.cc +++ b/src/pstd/src/pstd_coding.cc @@ -33,7 +33,7 @@ void PutFixed64(std::string* dst, uint64_t value) { char* EncodeVarint32(char* dst, uint32_t v) { // Operate on characters as unsigneds - auto* ptr = reinterpret_cast(dst); + auto ptr = reinterpret_cast(dst); static const int B = 128; if (v < (1 << 7)) { *(ptr++) = v; @@ -67,7 +67,7 @@ void PutVarint32(std::string* dst, uint32_t v) { char* EncodeVarint64(char* dst, uint64_t v) { static const int B = 128; - auto* ptr = reinterpret_cast(dst); + auto ptr = reinterpret_cast(dst); while (v >= B) { *(ptr++) = (v & (B - 1)) | B; v >>= 7; @@ -117,7 +117,7 @@ bool GetVarint32(std::string* input, uint32_t* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint32Ptr(p, limit, value); - if (q == nullptr) { + if (!q) { return false; } else { (*input).erase(0, q - p); @@ -129,7 +129,7 @@ bool GetVarint32(Slice* input, uint32_t* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint32Ptr(p, limit, value); - if (q == nullptr) { + if (!q) { return false; } else { *input = Slice(q, limit - q); @@ -158,7 +158,7 @@ bool GetVarint64(Slice* input, uint64_t* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint64Ptr(p, limit, value); - if (q == nullptr) { + if (!q) { return false; } else { *input = Slice(q, limit - q); @@ -169,10 +169,12 @@ bool GetVarint64(Slice* input, uint64_t* value) { const char* GetLengthPrefixedSlice(const char* p, const char* limit, Slice* result) { uint32_t len; p = GetVarint32Ptr(p, limit, &len); - if (p == nullptr) { return nullptr; -} - if (p + len > limit) { return nullptr; -} + if (!p) { + return nullptr; + } + if (p + len > limit) { + return nullptr; + } *result = Slice(p, len); return p + len; } diff --git a/src/pstd/src/pstd_mutex.cc b/src/pstd/src/pstd_mutex.cc index 6d75660c54..1734c6eedb 100644 --- a/src/pstd/src/pstd_mutex.cc +++ b/src/pstd/src/pstd_mutex.cc @@ -44,7 +44,7 @@ void RecordMutex::Lock(const std::string& key) { ref_mutex->Lock(); } else { - auto* ref_mutex = new RefMutex(); + auto ref_mutex = new RefMutex(); records_.emplace(key, ref_mutex); ref_mutex->Ref(); diff --git a/src/pstd/src/pstd_status.cc b/src/pstd/src/pstd_status.cc index 853f282e03..850b6219a8 100644 --- a/src/pstd/src/pstd_status.cc +++ b/src/pstd/src/pstd_status.cc @@ -33,7 +33,7 @@ Status::Status(Code code, const Slice& msg, const Slice& msg2) { } std::string Status::ToString() const { - if (state_ == nullptr) { + if (!state_) { return "OK"; } else { char tmp[30]; diff --git a/src/pstd/src/pstd_string.cc b/src/pstd/src/pstd_string.cc index 23b4ae38ef..07e41601d6 100644 --- a/src/pstd/src/pstd_string.cc +++ b/src/pstd/src/pstd_string.cc @@ -542,7 +542,7 @@ void getRandomHexChars(char* p, unsigned int len) { char charset[] = "0123456789abcdef"; unsigned int j; - if (fp == nullptr || fread(p, len, 1, fp) == 0) { + if (!fp || !fread(p, len, 1, fp)) { /* If we can't read from /dev/urandom, do some reasonable effort * in order to create some entropy, since this function is used to * generate run_id and cluster instance IDs */ diff --git a/src/pstd/src/rsync.cc b/src/pstd/src/rsync.cc index 609576ac9c..42c73c587a 100644 --- a/src/pstd/src/rsync.cc +++ b/src/pstd/src/rsync.cc @@ -1,7 +1,7 @@ #include #include #include - +#include #include #include "pstd/include/env.h" @@ -95,21 +95,18 @@ int StopRsync(const std::string& raw_path) { } // Kill Rsync - SequentialFile* sequential_file; - if (!NewSequentialFile(pid_file, &sequential_file).ok()) { + std::unique_ptr sequential_file; + if (!NewSequentialFile(pid_file, sequential_file).ok()) { LOG(WARNING) << "no rsync pid file found"; return 0; }; char line[32]; - if (sequential_file->ReadLine(line, 32) == nullptr) { + if (!(sequential_file->ReadLine(line, 32))) { LOG(WARNING) << "read rsync pid file err"; - delete sequential_file; return 0; }; - delete sequential_file; - pid_t pid = atoi(line); if (pid <= 1) { @@ -117,7 +114,7 @@ int StopRsync(const std::string& raw_path) { return 0; } - std::string rsync_stop_cmd = "kill -- -$(ps -o pgid= " + std::to_string(pid) + ")"; + std::string rsync_stop_cmd = "kill $(ps -o pgid=" + std::to_string(pid) + ")"; int ret = system(rsync_stop_cmd.c_str()); if (ret == 0 || (WIFEXITED(ret) && !WEXITSTATUS(ret))) { LOG(INFO) << "Stop rsync success!"; diff --git a/src/pstd/src/scope_record_lock.cc b/src/pstd/src/scope_record_lock.cc index 9a8f39ff10..de6d23c0be 100644 --- a/src/pstd/src/scope_record_lock.cc +++ b/src/pstd/src/scope_record_lock.cc @@ -9,8 +9,8 @@ namespace pstd::lock { -MultiScopeRecordLock::MultiScopeRecordLock(LockMgr* lock_mgr, std::vector keys) - : lock_mgr_(lock_mgr), keys_(std::move(keys)) { +MultiScopeRecordLock::MultiScopeRecordLock(std::shared_ptr lock_mgr, const std::vector& keys) + : lock_mgr_(lock_mgr), keys_(keys) { std::string pre_key; std::sort(keys_.begin(), keys_.end()); if (!keys_.empty() && keys_[0].empty()) { diff --git a/src/pstd/tests/base_conf_test.cc b/src/pstd/tests/base_conf_test.cc index c2238a5459..865883e736 100644 --- a/src/pstd/tests/base_conf_test.cc +++ b/src/pstd/tests/base_conf_test.cc @@ -31,14 +31,14 @@ class BaseConfTest : public ::testing::Test { "test_bool : yes\n", }; - WritableFile* write_file; - Status ret = NewWritableFile(test_conf_, &write_file); - if (!ret.ok()) { return ret; -} + std::unique_ptr write_file; + Status ret = NewWritableFile(test_conf_, write_file); + if (!ret.ok()) { + return ret; + } for (std::string& item : sample_conf) { write_file->Append(item); } - delete write_file; return Status::OK(); } @@ -52,7 +52,7 @@ class BaseConfTest : public ::testing::Test { TEST_F(BaseConfTest, WriteReadConf) { ASSERT_OK(CreateSampleConf()); - auto* conf = new BaseConf(test_conf_); + auto conf = std::make_unique(test_conf_); ASSERT_EQ(conf->LoadConf(), 0); // Write configuration diff --git a/src/storage/include/storage/backupable.h b/src/storage/include/storage/backupable.h index 538331a42a..c5462a14b1 100644 --- a/src/storage/include/storage/backupable.h +++ b/src/storage/include/storage/backupable.h @@ -43,7 +43,7 @@ struct BackupContent { class BackupEngine { public: ~BackupEngine(); - static Status Open(Storage* storage, BackupEngine** backup_engine_ptr); + static Status Open(Storage* db, std::shared_ptr& backup_engine_ret); Status SetBackupContent(); @@ -56,7 +56,7 @@ class BackupEngine { private: BackupEngine() = default; - std::map engines_; + std::map> engines_; std::map backup_content_; std::map backup_pthread_ts_; @@ -70,3 +70,4 @@ class BackupEngine { } // namespace storage #endif // SRC_BACKUPABLE_H_ + diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index 57f6bb28cc..21cb1484af 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -1021,14 +1021,14 @@ class Storage { const std::unordered_map& options); private: - RedisStrings* strings_db_{nullptr}; - RedisHashes* hashes_db_{nullptr}; - RedisSets* sets_db_{nullptr}; - RedisZSets* zsets_db_{nullptr}; - RedisLists* lists_db_{nullptr}; + std::unique_ptr strings_db_; + std::unique_ptr hashes_db_; + std::unique_ptr sets_db_; + std::unique_ptr zsets_db_; + std::unique_ptr lists_db_; std::atomic is_opened_; - LRUCache* cursors_store_; + std::unique_ptr> cursors_store_; // Storage start the background thread for compaction task pthread_t bg_tasks_thread_id_; diff --git a/src/storage/src/backupable.cc b/src/storage/src/backupable.cc index f0c8472612..3de6ae2ac6 100644 --- a/src/storage/src/backupable.cc +++ b/src/storage/src/backupable.cc @@ -14,11 +14,6 @@ BackupEngine::~BackupEngine() { // Wait all children threads StopBackup(); WaitBackupPthread(); - // Delete engines - for (auto& engine : engines_) { - delete engine.second; - } - engines_.clear(); } Status BackupEngine::NewCheckpoint(rocksdb::DB* rocksdb_db, const std::string& type) { @@ -27,13 +22,14 @@ Status BackupEngine::NewCheckpoint(rocksdb::DB* rocksdb_db, const std::string& t if (!s.ok()) { return s; } - engines_.insert(std::make_pair(type, checkpoint)); + engines_.insert(std::make_pair(type, std::unique_ptr(checkpoint))); return s; } -Status BackupEngine::Open(storage::Storage* storage, BackupEngine** backup_engine_ptr) { - *backup_engine_ptr = new BackupEngine(); - if (*backup_engine_ptr == nullptr) { +Status BackupEngine::Open(storage::Storage* storage, std::shared_ptr& backup_engine_ret) { + // BackupEngine() is private, can't use make_shared + backup_engine_ret = std::shared_ptr(new BackupEngine()); + if (!backup_engine_ret) { return Status::Corruption("New BackupEngine failed!"); } @@ -42,16 +38,16 @@ Status BackupEngine::Open(storage::Storage* storage, BackupEngine** backup_engin rocksdb::DB* rocksdb_db; std::string types[] = {STRINGS_DB, HASHES_DB, LISTS_DB, ZSETS_DB, SETS_DB}; for (const auto& type : types) { - if ((rocksdb_db = storage->GetDBByType(type)) == nullptr) { + if (!(rocksdb_db = storage->GetDBByType(type))) { s = Status::Corruption("Error db type"); } if (s.ok()) { - s = (*backup_engine_ptr)->NewCheckpoint(rocksdb_db, type); + s = backup_engine_ret->NewCheckpoint(rocksdb_db, type); } if (!s.ok()) { - delete *backup_engine_ptr; + backup_engine_ret = nullptr; break; } } @@ -95,8 +91,8 @@ Status BackupEngine::CreateNewBackupSpecify(const std::string& backup_dir, const } void* ThreadFuncSaveSpecify(void* arg) { - auto* arg_ptr = static_cast(arg); - auto* p = static_cast(arg_ptr->p_engine); + auto arg_ptr = static_cast(arg); + auto p = static_cast(arg_ptr->p_engine); arg_ptr->res = p->CreateNewBackupSpecify(arg_ptr->backup_dir, arg_ptr->key_type); pthread_exit(&(arg_ptr->res)); } @@ -120,13 +116,15 @@ Status BackupEngine::WaitBackupPthread() { Status BackupEngine::CreateNewBackup(const std::string& dir) { Status s = Status::OK(); - std::vector args; + // ensure cleaning up the pointers after the function has finished. + std::vector> args; + args.reserve(engines_.size()); for (const auto& engine : engines_) { pthread_t tid; - auto* arg = new BackupSaveArgs(reinterpret_cast(this), dir, engine.first); - args.push_back(arg); - if (pthread_create(&tid, nullptr, &ThreadFuncSaveSpecify, arg) != 0) { - s = Status::Corruption("pthead_create failed."); + auto arg = std::make_unique(reinterpret_cast(this), dir, engine.first); + args.push_back(std::move(arg)); + if (pthread_create(&tid, nullptr, &ThreadFuncSaveSpecify, args.back().get()) != 0) { + s = Status::Corruption("pthread_create failed."); break; } if (!(backup_pthread_ts_.insert(std::make_pair(engine.first, tid)).second)) { @@ -140,9 +138,6 @@ Status BackupEngine::CreateNewBackup(const std::string& dir) { } s = WaitBackupPthread(); - for (auto& a : args) { - delete a; - } return s; } @@ -151,3 +146,4 @@ void BackupEngine::StopBackup() { } } // namespace storage + diff --git a/src/storage/src/custom_comparator.h b/src/storage/src/custom_comparator.h index 0f37f7b4d3..07648ef8d2 100644 --- a/src/storage/src/custom_comparator.h +++ b/src/storage/src/custom_comparator.h @@ -104,7 +104,7 @@ class ZSetsScoreKeyComparatorImpl : public rocksdb::Comparator { ptr_a += key_a_len + 2 * sizeof(int32_t); ptr_b += key_b_len + 2 * sizeof(int32_t); int ret = key_a_prefix.compare(key_b_prefix); - if (ret != 0) { + if (ret) { return ret; } @@ -129,7 +129,7 @@ class ZSetsScoreKeyComparatorImpl : public rocksdb::Comparator { rocksdb::Slice key_a_member(ptr_a, a_size - (ptr_a - a.data())); rocksdb::Slice key_b_member(ptr_b, b_size - (ptr_b - b.data())); ret = key_a_member.compare(key_b_member); - if (ret != 0) { + if (ret) { return ret; } } diff --git a/src/storage/src/db_checkpoint.cc b/src/storage/src/db_checkpoint.cc index e985e138a1..ce29d4afa9 100644 --- a/src/storage/src/db_checkpoint.cc +++ b/src/storage/src/db_checkpoint.cc @@ -17,6 +17,7 @@ # include +#include # include "file/file_util.h" # include "rocksdb/db.h" // #include "file/filename.h" @@ -107,6 +108,13 @@ Status DBCheckpointImpl::CreateCheckpointWithFiles(const std::string& checkpoint return s; } + // if wal_dir eq db path, rocksdb will clear it when opening + // make wal_dir valid in that case + std::string wal_dir = db_->GetOptions().wal_dir; + if (wal_dir.empty()) { + wal_dir = db_->GetName() + "/"; + } + size_t wal_size = live_wal_files.size(); Log(db_->GetOptions().info_log, "Started the snapshot process -- creating snapshot in directory %s", checkpoint_dir.c_str()); @@ -182,10 +190,10 @@ Status DBCheckpointImpl::CreateCheckpointWithFiles(const std::string& checkpoint if (i + 1 == wal_size) { Log(db_->GetOptions().info_log, "Copying %s", live_wal_files[i]->PathName().c_str()); # if (ROCKSDB_MAJOR < 5 || (ROCKSDB_MAJOR == 5 && ROCKSDB_MINOR < 3)) - s = CopyFile(db_->GetEnv(), db_->GetOptions().wal_dir + live_wal_files[i]->PathName(), + s = CopyFile(db_->GetEnv(), wal_dir + live_wal_files[i]->PathName(), full_private_path + live_wal_files[i]->PathName(), live_wal_files[i]->SizeFileBytes()); # else - s = CopyFile(db_->GetFileSystem(), db_->GetOptions().wal_dir + live_wal_files[i]->PathName(), + s = CopyFile(db_->GetFileSystem(), wal_dir + live_wal_files[i]->PathName(), full_private_path + live_wal_files[i]->PathName(), live_wal_files[i]->SizeFileBytes(), false, nullptr, Temperature::kUnknown); # endif @@ -194,7 +202,7 @@ Status DBCheckpointImpl::CreateCheckpointWithFiles(const std::string& checkpoint if (same_fs) { // we only care about live log files Log(db_->GetOptions().info_log, "Hard Linking %s", live_wal_files[i]->PathName().c_str()); - s = db_->GetEnv()->LinkFile(db_->GetOptions().wal_dir + live_wal_files[i]->PathName(), + s = db_->GetEnv()->LinkFile(wal_dir + live_wal_files[i]->PathName(), full_private_path + live_wal_files[i]->PathName()); if (s.IsNotSupported()) { same_fs = false; @@ -204,10 +212,10 @@ Status DBCheckpointImpl::CreateCheckpointWithFiles(const std::string& checkpoint if (!same_fs) { Log(db_->GetOptions().info_log, "Copying %s", live_wal_files[i]->PathName().c_str()); # if (ROCKSDB_MAJOR < 5 || (ROCKSDB_MAJOR == 5 && ROCKSDB_MINOR < 3)) - s = CopyFile(db_->GetEnv(), db_->GetOptions().wal_dir + live_wal_files[i]->PathName(), + s = CopyFile(db_->GetEnv(), wal_dir + live_wal_files[i]->PathName(), full_private_path + live_wal_files[i]->PathName(), 0); # else - s = CopyFile(db_->GetFileSystem(), db_->GetOptions().wal_dir + live_wal_files[i]->PathName(), + s = CopyFile(db_->GetFileSystem(), wal_dir + live_wal_files[i]->PathName(), full_private_path + live_wal_files[i]->PathName(), 0, false, nullptr, Temperature::kUnknown); # endif } diff --git a/src/storage/src/lru_cache.h b/src/storage/src/lru_cache.h index c6439d7a11..f32facaecb 100644 --- a/src/storage/src/lru_cache.h +++ b/src/storage/src/lru_cache.h @@ -167,7 +167,7 @@ rocksdb::Status LRUCache::Lookup(const T1& key, T2* const value) { LRU_MoveToHead(handle); *value = handle->value; } - return (handle == nullptr) ? rocksdb::Status::NotFound() : rocksdb::Status::OK(); + return (!handle) ? rocksdb::Status::NotFound() : rocksdb::Status::OK(); } template @@ -176,7 +176,7 @@ rocksdb::Status LRUCache::Insert(const T1& key, const T2& value, size_t if (capacity_ == 0) { return rocksdb::Status::Corruption("capacity is empty"); } else { - auto* handle = new LRUHandle(); + auto handle = new LRUHandle(); handle->key = key; handle->value = value; handle->charge = charge; @@ -218,7 +218,7 @@ bool LRUCache::LRUAndHandleTableConsistent() { LRUHandle* current = lru_.prev; while (current != &lru_) { handle = handle_table_.Lookup(current->key); - if (handle == nullptr || handle != current) { + if (!handle || handle != current) { return false; } else { count++; diff --git a/src/storage/src/murmurhash.cc b/src/storage/src/murmurhash.cc index a3afffbea5..bb662e3eb2 100644 --- a/src/storage/src/murmurhash.cc +++ b/src/storage/src/murmurhash.cc @@ -26,8 +26,8 @@ uint64_t MurmurHash64A(const void* key, int len, unsigned int seed) { uint64_t h = seed ^ (len * m); - const auto* data = static_cast(key); - const auto* end = data + (len / 8); + const auto data = static_cast(key); + const auto end = data + (len / 8); while (data != end) { uint64_t k = *data++; @@ -40,7 +40,7 @@ uint64_t MurmurHash64A(const void* key, int len, unsigned int seed) { h *= m; } - const auto* data2 = reinterpret_cast(data); + const auto data2 = reinterpret_cast(data); switch (len & 7) { case 7: @@ -154,7 +154,7 @@ unsigned int MurmurHashNeutral2(const void* key, int len, unsigned int seed) { unsigned int h = seed ^ len; - const auto* data = static_cast(key); + auto data = static_cast(key); while (len >= 4) { unsigned int k; diff --git a/src/storage/src/options_helper.cc b/src/storage/src/options_helper.cc index 4b3b94e145..b0783c35d8 100644 --- a/src/storage/src/options_helper.cc +++ b/src/storage/src/options_helper.cc @@ -41,29 +41,33 @@ bool ParseOptionMember(const MemberType& member_type, const std::string& value, switch (member_type) { case MemberType::kInt: { int intVal; - if (!strToInt(value, &intVal)) { return false; -} + if (!strToInt(value, &intVal)) { + return false; + } *reinterpret_cast(member_address) = intVal; break; } case MemberType::kUint: { uint32_t uint32Val; - if (!strToUint32(value, &uint32Val)) { return false; -} + if (!strToUint32(value, &uint32Val)) { + return false; + } *reinterpret_cast(member_address) = static_cast(uint32Val); break; } case MemberType::kUint64T: { uint64_t uint64Val; - if (!strToUint64(value, &uint64Val)) { return false; -} + if (!strToUint64(value, &uint64Val)) { + return false; + } *reinterpret_cast(member_address) = uint64Val; break; } case MemberType::kSizeT: { uint64_t uint64Val; - if (!strToUint64(value, &uint64Val)) { return false; -} + if (!strToUint64(value, &uint64Val)) { + return false; + } *reinterpret_cast(member_address) = static_cast(uint64Val); break; } diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index 776c753d89..a96f558d7f 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -10,11 +10,10 @@ namespace storage { Redis::Redis(Storage* const s, const DataType& type) : storage_(s), type_(type), - lock_mgr_(new LockMgr(1000, 0, std::make_shared())), - + lock_mgr_(std::make_shared(1000, 0, std::make_shared())), small_compaction_threshold_(5000) { - statistics_store_ = new LRUCache(); - scan_cursors_store_ = new LRUCache(); + statistics_store_ = std::make_unique>(); + scan_cursors_store_ = std::make_unique>(); scan_cursors_store_->SetCapacity(5000); default_compact_range_options_.exclusive_manual_compaction = false; default_compact_range_options_.change_level = true; @@ -28,9 +27,6 @@ Redis::~Redis() { delete handle; } delete db_; - delete lock_mgr_; - delete statistics_store_; - delete scan_cursors_store_; } Status Redis::GetScanStartPoint(const Slice& key, const Slice& pattern, int64_t cursor, std::string* start_point) { diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index 96b63fd6d3..5dbff22a25 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -58,22 +58,23 @@ class Redis { protected: Storage* const storage_; DataType type_; - LockMgr* lock_mgr_ = nullptr; + std::shared_ptr lock_mgr_; rocksdb::DB* db_ = nullptr; + std::vector handles_; rocksdb::WriteOptions default_write_options_; rocksdb::ReadOptions default_read_options_; rocksdb::CompactRangeOptions default_compact_range_options_; // For Scan - LRUCache* scan_cursors_store_ = nullptr; + std::unique_ptr> scan_cursors_store_; Status GetScanStartPoint(const Slice& key, const Slice& pattern, int64_t cursor, std::string* start_point); Status StoreScanNextPoint(const Slice& key, const Slice& pattern, int64_t cursor, const std::string& next_point); // For Statistics std::atomic small_compaction_threshold_; - LRUCache* statistics_store_ = nullptr; + std::unique_ptr> statistics_store_; Status UpdateSpecificKeyStatistics(const std::string& key, size_t count); Status AddCompactKeyTaskIfNeeded(const std::string& key, size_t total); diff --git a/src/storage/src/redis_hyperloglog.cc b/src/storage/src/redis_hyperloglog.cc index a04ca8f6e5..34abe6a1d7 100644 --- a/src/storage/src/redis_hyperloglog.cc +++ b/src/storage/src/redis_hyperloglog.cc @@ -17,7 +17,7 @@ HyperLogLog::HyperLogLog(uint8_t precision, std::string origin_register) { b_ = precision; m_ = 1 << precision; alpha_ = Alpha(); - register_ = new char[m_]; + register_ = std::make_unique(m_); for (uint32_t i = 0; i < m_; ++i) { register_[i] = 0; } @@ -28,7 +28,7 @@ HyperLogLog::HyperLogLog(uint8_t precision, std::string origin_register) { } } -HyperLogLog::~HyperLogLog() { delete[] register_; } +HyperLogLog::~HyperLogLog() {} std::string HyperLogLog::Add(const char* value, uint32_t len) { uint32_t hash_value; diff --git a/src/storage/src/redis_hyperloglog.h b/src/storage/src/redis_hyperloglog.h index eacaecfe9b..8e9bfadd3e 100644 --- a/src/storage/src/redis_hyperloglog.h +++ b/src/storage/src/redis_hyperloglog.h @@ -8,6 +8,7 @@ #include #include +#include namespace storage { @@ -29,7 +30,7 @@ class HyperLogLog { uint32_t m_ = 0; // register bit width uint32_t b_ = 0; // regieter size double alpha_ = 0; - char* register_ = nullptr; // register; + std::unique_ptr register_; }; } // namespace storage diff --git a/src/storage/src/redis_sets.cc b/src/storage/src/redis_sets.cc index df62285ff2..de8f6fa05f 100644 --- a/src/storage/src/redis_sets.cc +++ b/src/storage/src/redis_sets.cc @@ -21,16 +21,11 @@ namespace storage { RedisSets::RedisSets(Storage* const s, const DataType& type) : Redis(s, type) { - spop_counts_store_ = new LRUCache(); + spop_counts_store_ = std::make_unique>(); spop_counts_store_->SetCapacity(1000); } -RedisSets::~RedisSets() { - if (spop_counts_store_ != nullptr) { - delete spop_counts_store_; - spop_counts_store_ = nullptr; - } -} +RedisSets::~RedisSets() {} rocksdb::Status RedisSets::Open(const StorageOptions& storage_options, const std::string& db_path) { statistics_store_->SetCapacity(storage_options.statistics_max_size); diff --git a/src/storage/src/redis_sets.h b/src/storage/src/redis_sets.h index 71d7184bfc..2e2834e422 100644 --- a/src/storage/src/redis_sets.h +++ b/src/storage/src/redis_sets.h @@ -73,7 +73,7 @@ class RedisSets : public Redis { private: // For compact in time after multiple spop - LRUCache* spop_counts_store_ = nullptr; + std::unique_ptr> spop_counts_store_; Status ResetSpopCount(const std::string& key); Status AddAndGetSpopCount(const std::string& key, uint64_t* count); }; diff --git a/src/storage/src/redis_strings.cc b/src/storage/src/redis_strings.cc index 1390b6860c..09b5cdfb38 100644 --- a/src/storage/src/redis_strings.cc +++ b/src/storage/src/redis_strings.cc @@ -213,7 +213,7 @@ Status RedisStrings::BitCount(const Slice& key, int64_t start_offset, int64_t en return Status::NotFound("Stale"); } else { parsed_strings_value.StripSuffix(); - const auto* bit_value = reinterpret_cast(value.data()); + const auto bit_value = reinterpret_cast(value.data()); int64_t value_length = value.length(); if (have_range) { if (start_offset < 0) { @@ -248,10 +248,9 @@ Status RedisStrings::BitCount(const Slice& key, int64_t start_offset, int64_t en } std::string BitOpOperate(BitOpType op, const std::vector& src_values, int64_t max_len) { - char* dest_value = new char[max_len]; - char byte; char output; + auto dest_value = std::make_unique(max_len); for (int64_t j = 0; j < max_len; j++) { if (j < static_cast(src_values[0].size())) { output = src_values[0][j]; @@ -285,8 +284,7 @@ std::string BitOpOperate(BitOpType op, const std::vector& src_value } dest_value[j] = output; } - std::string dest_str(dest_value, max_len); - delete[] dest_value; + std::string dest_str(dest_value.get(), max_len); return dest_str; } @@ -849,8 +847,8 @@ Status RedisStrings::Strlen(const Slice& key, int32_t* len) { int32_t GetBitPos(const unsigned char* s, unsigned int bytes, int bit) { uint64_t word = 0; uint64_t skip_val = 0; - auto* value = const_cast(s); - auto* l = reinterpret_cast(value); + auto value = const_cast(s); + auto l = reinterpret_cast(value); int pos = 0; if (bit == 0) { skip_val = std::numeric_limits::max(); @@ -866,7 +864,7 @@ int32_t GetBitPos(const unsigned char* s, unsigned int bytes, int bit) { bytes = bytes - sizeof(*l); pos = pos + 8 * sizeof(*l); } - auto* c = reinterpret_cast(l); + auto c = reinterpret_cast(l); for (size_t j = 0; j < sizeof(*l); j++) { word = word << 8; if (bytes != 0U) { @@ -907,7 +905,7 @@ Status RedisStrings::BitPos(const Slice& key, int32_t bit, int64_t* ret) { return Status::NotFound("Stale"); } else { parsed_strings_value.StripSuffix(); - const auto* bit_value = reinterpret_cast(value.data()); + const auto bit_value = reinterpret_cast(value.data()); int64_t value_length = value.length(); int64_t start_offset = 0; int64_t end_offset = std::max(value_length - 1, static_cast(0)); @@ -942,7 +940,7 @@ Status RedisStrings::BitPos(const Slice& key, int32_t bit, int64_t start_offset, return Status::NotFound("Stale"); } else { parsed_strings_value.StripSuffix(); - const auto* bit_value = reinterpret_cast(value.data()); + const auto bit_value = reinterpret_cast(value.data()); int64_t value_length = value.length(); int64_t end_offset = std::max(value_length - 1, static_cast(0)); if (start_offset < 0) { @@ -990,7 +988,7 @@ Status RedisStrings::BitPos(const Slice& key, int32_t bit, int64_t start_offset, return Status::NotFound("Stale"); } else { parsed_strings_value.StripSuffix(); - const auto* bit_value = reinterpret_cast(value.data()); + const auto bit_value = reinterpret_cast(value.data()); int64_t value_length = value.length(); if (start_offset < 0) { start_offset = start_offset + value_length; diff --git a/src/storage/src/scope_snapshot.h b/src/storage/src/scope_snapshot.h index 13da08d57f..8fecfc6985 100644 --- a/src/storage/src/scope_snapshot.h +++ b/src/storage/src/scope_snapshot.h @@ -8,16 +8,16 @@ #include "rocksdb/db.h" +#include "pstd/include/noncopyable.h" + namespace storage { -class ScopeSnapshot { +class ScopeSnapshot : public pstd::noncopyable { public: ScopeSnapshot(rocksdb::DB* db, const rocksdb::Snapshot** snapshot) : db_(db), snapshot_(snapshot) { *snapshot_ = db_->GetSnapshot(); } ~ScopeSnapshot() { db_->ReleaseSnapshot(*snapshot_); } - ScopeSnapshot(const ScopeSnapshot&) = delete; - void operator=(const ScopeSnapshot&) = delete; private: rocksdb::DB* const db_; const rocksdb::Snapshot** snapshot_; diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index 2a759abf89..e08cb1dd3a 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -54,7 +54,7 @@ Storage::Storage() current_task_type_(kNone), bg_tasks_should_exit_(false), scan_keynum_exit_(false) { - cursors_store_ = new LRUCache(); + cursors_store_ = std::make_unique>(); cursors_store_->SetCapacity(5000); Status s = StartBGThread(); @@ -79,13 +79,6 @@ Storage::~Storage() { if ((ret = pthread_join(bg_tasks_thread_id_, nullptr)) != 0) { LOG(ERROR) << "pthread_join failed with bgtask thread error " << ret; } - - delete strings_db_; - delete hashes_db_; - delete sets_db_; - delete lists_db_; - delete zsets_db_; - delete cursors_store_; } static std::string AppendSubDirectory(const std::string& db_path, const std::string& sub_db) { @@ -99,31 +92,31 @@ static std::string AppendSubDirectory(const std::string& db_path, const std::str Status Storage::Open(const StorageOptions& storage_options, const std::string& db_path) { mkpath(db_path.c_str(), 0755); - strings_db_ = new RedisStrings(this, kStrings); + strings_db_ = std::make_unique(this, kStrings); Status s = strings_db_->Open(storage_options, AppendSubDirectory(db_path, "strings")); if (!s.ok()) { LOG(FATAL) << "open kv db failed, " << s.ToString(); } - hashes_db_ = new RedisHashes(this, kHashes); + hashes_db_ = std::make_unique(this, kHashes); s = hashes_db_->Open(storage_options, AppendSubDirectory(db_path, "hashes")); if (!s.ok()) { LOG(FATAL) << "open hashes db failed, " << s.ToString(); } - sets_db_ = new RedisSets(this, kSets); + sets_db_ = std::make_unique(this, kSets); s = sets_db_->Open(storage_options, AppendSubDirectory(db_path, "sets")); if (!s.ok()) { LOG(FATAL) << "open set db failed, " << s.ToString(); } - lists_db_ = new RedisLists(this, kLists); + lists_db_ = std::make_unique(this, kLists); s = lists_db_->Open(storage_options, AppendSubDirectory(db_path, "lists")); if (!s.ok()) { LOG(FATAL) << "open list db failed, " << s.ToString(); } - zsets_db_ = new RedisZSets(this, kZSets); + zsets_db_ = std::make_unique(this, kZSets); s = zsets_db_->Open(storage_options, AppendSubDirectory(db_path, "zsets")); if (!s.ok()) { LOG(FATAL) << "open zset db failed, " << s.ToString(); @@ -1302,40 +1295,50 @@ Status Storage::Keys(const DataType& data_type, const std::string& pattern, std: Status s; if (data_type == DataType::kStrings) { s = strings_db_->ScanKeys(pattern, keys); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } } else if (data_type == DataType::kHashes) { s = hashes_db_->ScanKeys(pattern, keys); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } } else if (data_type == DataType::kZSets) { s = zsets_db_->ScanKeys(pattern, keys); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } } else if (data_type == DataType::kSets) { s = sets_db_->ScanKeys(pattern, keys); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } } else if (data_type == DataType::kLists) { s = lists_db_->ScanKeys(pattern, keys); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } } else { s = strings_db_->ScanKeys(pattern, keys); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } s = hashes_db_->ScanKeys(pattern, keys); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } s = zsets_db_->ScanKeys(pattern, keys); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } s = sets_db_->ScanKeys(pattern, keys); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } s = lists_db_->ScanKeys(pattern, keys); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } } return s; } @@ -1469,7 +1472,7 @@ Status Storage::PfMerge(const std::vector& keys) { } static void* StartBGThreadWrapper(void* arg) { - auto* s = reinterpret_cast(arg); + auto s = reinterpret_cast(arg); s->RunBGTask(); return nullptr; } @@ -1593,7 +1596,7 @@ Status Storage::CompactKey(const DataType& type, const std::string& key) { } Status Storage::SetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys) { - std::vector dbs = {sets_db_, zsets_db_, hashes_db_, lists_db_}; + std::vector dbs = {sets_db_.get(), zsets_db_.get(), hashes_db_.get(), lists_db_.get()}; for (const auto& db : dbs) { db->SetMaxCacheStatisticKeys(max_cache_statistic_keys); } @@ -1601,7 +1604,7 @@ Status Storage::SetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys) { } Status Storage::SetSmallCompactionThreshold(uint32_t small_compaction_threshold) { - std::vector dbs = {sets_db_, zsets_db_, hashes_db_, lists_db_}; + std::vector dbs = {sets_db_.get(), zsets_db_.get(), hashes_db_.get(), lists_db_.get()}; for (const auto& db : dbs) { db->SetSmallCompactionThreshold(small_compaction_threshold); } @@ -1673,7 +1676,7 @@ uint64_t Storage::GetProperty(const std::string& db_type, const std::string& pro Status Storage::GetKeyNum(std::vector* key_infos) { KeyInfo key_info; // NOTE: keep the db order with string, hash, list, zset, set - std::vector dbs = {strings_db_, hashes_db_, lists_db_, zsets_db_, sets_db_}; + std::vector dbs = {strings_db_.get(), hashes_db_.get(), lists_db_.get(), zsets_db_.get(), sets_db_.get()}; for (const auto& db : dbs) { // check the scanner was stopped or not, before scanning the next db if (scan_keynum_exit_) { @@ -1715,28 +1718,33 @@ Status Storage::SetOptions(const OptionType& option_type, const std::string& db_ Status s; if (db_type == ALL_DB || db_type == STRINGS_DB) { s = strings_db_->SetOptions(option_type, options); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } } if (db_type == ALL_DB || db_type == HASHES_DB) { s = hashes_db_->SetOptions(option_type, options); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } } if (db_type == ALL_DB || db_type == LISTS_DB) { s = lists_db_->SetOptions(option_type, options); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } } if (db_type == ALL_DB || db_type == ZSETS_DB) { s = zsets_db_->SetOptions(option_type, options); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } } if (db_type == ALL_DB || db_type == SETS_DB) { s = sets_db_->SetOptions(option_type, options); - if (!s.ok()) { return s; -} + if (!s.ok()) { + return s; + } } return s; } diff --git a/src/storage/src/storage_murmur3.h b/src/storage/src/storage_murmur3.h index 5f3a8da55e..21868c9b0e 100644 --- a/src/storage/src/storage_murmur3.h +++ b/src/storage/src/storage_murmur3.h @@ -3,7 +3,7 @@ //----------------------------------------------------------------------------- // MurmurHash3 was written by Austin Appleby, and is placed in the public -// domain. The author hereby disclaims copyright to this source code. +// domain. The autohor hereby disclaims copyright to this source code. // Note - The x86 and x64 versions do _not_ produce the same results, as the // algorithms are optimized for their respective platforms. You can still @@ -89,7 +89,7 @@ extern #endif void MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* out) { - const auto* data = (const uint8_t*)key; + const auto data = (const uint8_t*)key; const int nblocks = len / 4; int i; @@ -101,7 +101,7 @@ extern //---------- // body - const auto* blocks = (const uint32_t*)(data + nblocks * 4); + const auto blocks = (const uint32_t*)(data + nblocks * 4); for (i = -nblocks; i != 0; i++) { uint32_t k1 = getblock(blocks, i); @@ -118,7 +118,7 @@ extern //---------- // tail { - const auto* tail = (data + nblocks * 4); + const auto tail = (data + nblocks * 4); uint32_t k1 = 0; diff --git a/src/storage/src/util.cc b/src/storage/src/util.cc index 9e86f470d5..4f5594d3f7 100644 --- a/src/storage/src/util.cc +++ b/src/storage/src/util.cc @@ -8,6 +8,7 @@ #include #include #include +#include #include "pstd/include/pstd_string.h" @@ -50,11 +51,13 @@ int StrToLongDouble(const char* s, size_t slen, long double* ldval) { return -1; } long double d = strtold(s, &pEnd); - if (pEnd != s + slen) { return -1; -} + if (pEnd != s + slen) { + return -1; + } - if (ldval != nullptr) { *ldval = d; -} + if (ldval != nullptr) { + *ldval = d; + } return 0; } @@ -201,29 +204,31 @@ int is_dir(const char* filename) { int CalculateMetaStartAndEndKey(const std::string& key, std::string* meta_start_key, std::string* meta_end_key) { size_t needed = key.size() + 1; - char* dst = new char[needed]; - const char* start = dst; - strncpy(dst, key.data(), key.size()); - dst += key.size(); + auto dst = std::make_unique(needed); + const char* start = dst.get(); + std::strncpy(dst.get(), key.data(), key.size()); + char* dst_ptr = dst.get() + key.size(); meta_start_key->assign(start, key.size()); - *dst = static_cast(0xff); + *dst_ptr = static_cast(0xff); meta_end_key->assign(start, key.size() + 1); - delete[] start; return 0; } int CalculateDataStartAndEndKey(const std::string& key, std::string* data_start_key, std::string* data_end_key) { size_t needed = sizeof(int32_t) + key.size() + 1; - char* dst = new char[needed]; - const char* start = dst; - EncodeFixed32(dst, key.size()); - dst += sizeof(int32_t); - strncpy(dst, key.data(), key.size()); - dst += key.size(); + auto dst = std::make_unique(needed); + const char* start = dst.get(); + char* dst_ptr = dst.get(); + + EncodeFixed32(dst_ptr, key.size()); + dst_ptr += sizeof(int32_t); + std::strncpy(dst_ptr, key.data(), key.size()); + dst_ptr += key.size(); + *dst_ptr = static_cast(0xff); + data_start_key->assign(start, sizeof(int32_t) + key.size()); - *dst = static_cast(0xff); data_end_key->assign(start, sizeof(int32_t) + key.size() + 1); - delete[] start; + return 0; } @@ -264,7 +269,7 @@ bool DeleteFiles(const char* path) { remove(path); } else if (S_ISDIR(statbuf.st_mode)) // 判断是否是目录 { - if ((dir = opendir(path)) == nullptr) { + if (!(dir = opendir(path))) { return true; } while ((dirinfo = readdir(dir)) != nullptr) { diff --git a/src/storage/src/zsets_filter.h b/src/storage/src/zsets_filter.h index d6370e99d5..51d58d94a9 100644 --- a/src/storage/src/zsets_filter.h +++ b/src/storage/src/zsets_filter.h @@ -92,7 +92,7 @@ class ZSetsScoreFilterFactory : public rocksdb::CompactionFilterFactory { std::unique_ptr CreateCompactionFilter( const rocksdb::CompactionFilter::Context& context) override { - return std::unique_ptr(new ZSetsScoreFilter(*db_ptr_, cf_handles_ptr_)); + return std::make_unique(*db_ptr_, cf_handles_ptr_); } const char* Name() const override { return "ZSetsScoreFilterFactory"; } diff --git a/src/storage/tests/lists_filter_test.cc b/src/storage/tests/lists_filter_test.cc index e775f0a988..23f2351001 100644 --- a/src/storage/tests/lists_filter_test.cc +++ b/src/storage/tests/lists_filter_test.cc @@ -72,7 +72,7 @@ TEST_F(ListsFilterTest, MetaFilterTest) { std::string new_value; // Test Meta Filter - auto* lists_meta_filter = new storage::ListsMetaFilter(); + auto lists_meta_filter = std::make_unique(); ASSERT_TRUE(lists_meta_filter != nullptr); // Timeout timestamp is not set, but it's an empty list. @@ -113,7 +113,6 @@ TEST_F(ListsFilterTest, MetaFilterTest) { filter_result = lists_meta_filter->Filter(0, "FILTER_TEST_KEY", lists_meta_value4.Encode(), &new_value, &value_changed); ASSERT_EQ(filter_result, true); - delete lists_meta_filter; } // Data Filter @@ -125,7 +124,7 @@ TEST_F(ListsFilterTest, DataFilterTest) { std::string new_value; // Timeout timestamp is not set, the version is valid. - auto* lists_data_filter1 = new ListsDataFilter(meta_db, &handles); + auto lists_data_filter1 = std::make_unique(meta_db, &handles); ASSERT_TRUE(lists_data_filter1 != nullptr); EncodeFixed64(str, 1); @@ -140,10 +139,9 @@ TEST_F(ListsFilterTest, DataFilterTest) { ASSERT_EQ(filter_result, false); s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY"); ASSERT_TRUE(s.ok()); - delete lists_data_filter1; // Timeout timestamp is set, but not expired. - auto* lists_data_filter2 = new ListsDataFilter(meta_db, &handles); + auto lists_data_filter2 = std::make_unique(meta_db, &handles); ASSERT_TRUE(lists_data_filter2 != nullptr); EncodeFixed64(str, 1); @@ -158,10 +156,9 @@ TEST_F(ListsFilterTest, DataFilterTest) { ASSERT_EQ(filter_result, false); s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY"); ASSERT_TRUE(s.ok()); - delete lists_data_filter2; // Timeout timestamp is set, already expired. - auto* lists_data_filter3 = new ListsDataFilter(meta_db, &handles); + auto lists_data_filter3 = std::make_unique(meta_db, &handles); ASSERT_TRUE(lists_data_filter3 != nullptr); EncodeFixed64(str, 1); @@ -177,10 +174,9 @@ TEST_F(ListsFilterTest, DataFilterTest) { ASSERT_EQ(filter_result, true); s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY"); ASSERT_TRUE(s.ok()); - delete lists_data_filter3; // Timeout timestamp is not set, the version is invalid - auto* lists_data_filter4 = new ListsDataFilter(meta_db, &handles); + auto lists_data_filter4 = std::make_unique(meta_db, &handles); ASSERT_TRUE(lists_data_filter4 != nullptr); EncodeFixed64(str, 1); @@ -197,10 +193,9 @@ TEST_F(ListsFilterTest, DataFilterTest) { ASSERT_EQ(filter_result, true); s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY"); ASSERT_TRUE(s.ok()); - delete lists_data_filter4; // Meta data has been clear - auto* lists_data_filter5 = new ListsDataFilter(meta_db, &handles); + auto lists_data_filter5 = std::make_unique(meta_db, &handles); ASSERT_TRUE(lists_data_filter5 != nullptr); EncodeFixed64(str, 1); @@ -214,7 +209,6 @@ TEST_F(ListsFilterTest, DataFilterTest) { filter_result = lists_data_filter5->Filter(0, lists_data_value5.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); ASSERT_EQ(filter_result, true); - delete lists_data_filter5; } int main(int argc, char** argv) { diff --git a/src/storage/tests/lock_mgr_test.cc b/src/storage/tests/lock_mgr_test.cc index d6dedbb07b..965ecdb980 100644 --- a/src/storage/tests/lock_mgr_test.cc +++ b/src/storage/tests/lock_mgr_test.cc @@ -19,8 +19,8 @@ void Func(LockMgr* mgr, int id, const std::string& key) { } int main() { - MutexFactory* factory = new MutexFactoryImpl; - LockMgr mgr(1, 3, std::shared_ptr(factory)); + std::shared_ptr factory = std::make_shared(); + LockMgr mgr(1, 3, factory); std::thread t1(Func, &mgr, 1, "key_1"); std::this_thread::sleep_for(std::chrono::milliseconds(100)); diff --git a/src/storage/tests/strings_filter_test.cc b/src/storage/tests/strings_filter_test.cc index 2d2165db56..5bfa713b64 100644 --- a/src/storage/tests/strings_filter_test.cc +++ b/src/storage/tests/strings_filter_test.cc @@ -17,7 +17,7 @@ TEST(StringsFilterTest, FilterTest) { std::string new_value; bool is_stale; bool value_changed; - auto* filter = new StringsFilter; + auto filter = std::make_unique(); int32_t ttl = 1; StringsValue strings_value("FILTER_VALUE"); @@ -27,8 +27,6 @@ TEST(StringsFilterTest, FilterTest) { std::this_thread::sleep_for(std::chrono::milliseconds(2000)); is_stale = filter->Filter(0, "FILTER_KEY", strings_value.Encode(), &new_value, &value_changed); ASSERT_TRUE(is_stale); - - delete filter; } int main(int argc, char** argv) { diff --git a/tools/aof_to_pika/CMakeLists.txt b/tools/aof_to_pika/CMakeLists.txt index 705447c4ac..564b2f0ce1 100644 --- a/tools/aof_to_pika/CMakeLists.txt +++ b/tools/aof_to_pika/CMakeLists.txt @@ -5,7 +5,7 @@ aux_source_directory(${SRC_DIR} BASE_OBJS) add_executable(aof_to_pika ${BASE_OBJS}) -target_include_directories(aof_to_pika PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) +target_include_directories(aof_to_pika PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/include) target_link_libraries(aof_to_pika pthread) set_target_properties(aof_to_pika PROPERTIES diff --git a/tools/aof_to_pika/src/aof_info.cc b/tools/aof_to_pika/src/aof_info.cc index 391384fc91..dc13fc472c 100644 --- a/tools/aof_to_pika/src/aof_info.cc +++ b/tools/aof_to_pika/src/aof_info.cc @@ -6,14 +6,16 @@ short aof_info_level_ = AOF_LOG_INFO; void set_info_level(int l) { aof_info_level_ = l; } void info_print(int l, const std::string& content) { - if (l > AOF_LOG_FATAL || l < AOF_LOG_DEBUG || content.empty()) { return; -} + if (l > AOF_LOG_FATAL || l < AOF_LOG_DEBUG || content.empty()) { + return; + } - if (l < aof_info_level_) { return; -} + if (l < aof_info_level_) { + return; + } if (l >= AOF_LOG_ERR) { std::cerr << content << std::endl; } else { std::cout << content << std::endl; -} + } } diff --git a/tools/benchmark_client/benchmark_client.cc b/tools/benchmark_client/benchmark_client.cc index f969743a3f..8edf2d483f 100644 --- a/tools/benchmark_client/benchmark_client.cc +++ b/tools/benchmark_client/benchmark_client.cc @@ -96,7 +96,7 @@ void* ThreadMain(void* arg) { struct timeval timeout = {1, 500000}; // 1.5 seconds c = redisConnectWithTimeout(hostname.data(), port, timeout); - if (c == nullptr || c->err) { + if (!c || c->err) { if (c) { printf("Thread %lu, Connection error: %s\n", ta->tid, c->errstr); redisFree(c); @@ -111,7 +111,7 @@ void* ThreadMain(void* arg) { size_t auth_argv_len[2] = {4, password.size()}; res = reinterpret_cast(redisCommandArgv(c, 2, reinterpret_cast(auth_argv), reinterpret_cast(auth_argv_len))); - if (res == nullptr) { + if (!res) { printf("Thread %lu Auth Failed, Get reply Error\n", ta->tid); freeReplyObject(res); redisFree(c); @@ -132,7 +132,7 @@ void* ThreadMain(void* arg) { size_t select_argv_len[2] = {6, ta->table_name.size()}; res = reinterpret_cast(redisCommandArgv(c, 2, reinterpret_cast(select_argv), reinterpret_cast(select_argv_len))); - if (res == nullptr) { + if (!res) { printf("Thread %lu Select Table %s Failed, Get reply Error\n", ta->tid, ta->table_name.data()); freeReplyObject(res); redisFree(c); @@ -202,7 +202,7 @@ Status RunSetCommandPipeline(redisContext* c) { if (redisGetReply(c, reinterpret_cast(&res)) == REDIS_ERR) { return Status::Corruption("Redis Pipeline Get Reply Error"); } else { - if (res == nullptr || strcasecmp(res->str, "OK")) { + if (!res || strcasecmp(res->str, "OK")) { std::string res_str = "Exec command error: " + (res != nullptr ? std::string(res->str) : ""); freeReplyObject(res); return Status::Corruption(res_str); @@ -238,7 +238,7 @@ Status RunSetCommand(redisContext* c) { res = reinterpret_cast( redisCommandArgv(c, 3, reinterpret_cast(set_argv), reinterpret_cast(set_argvlen))); - if (res == nullptr || strcasecmp(res->str, "OK")) { + if (!res || strcasecmp(res->str, "OK")) { std::string res_str = "Exec command error: " + (res != nullptr ? std::string(res->str) : ""); freeReplyObject(res); return Status::Corruption(res_str); @@ -272,7 +272,7 @@ Status RunZAddCommand(redisContext* c) { res = reinterpret_cast(redisCommandArgv(c, 4, reinterpret_cast(zadd_argv), reinterpret_cast(zadd_argvlen))); - if (res == nullptr || res->integer == 0) { + if (!res || !res->integer) { std::string res_str = "Exec command error: " + (res != nullptr ? std::string(res->str) : ""); freeReplyObject(res); return Status::Corruption(res_str); diff --git a/tools/binlog_sender/binlog_consumer.cc b/tools/binlog_sender/binlog_consumer.cc index 5b317a560e..b34b3c9bad 100644 --- a/tools/binlog_sender/binlog_consumer.cc +++ b/tools/binlog_sender/binlog_consumer.cc @@ -16,7 +16,6 @@ BinlogConsumer::BinlogConsumer(const std::string& binlog_path, uint32_t first_fi BinlogConsumer::~BinlogConsumer() { delete[] backing_store_; - delete queue_; } std::string BinlogConsumer::NewFileName(const std::string& name, const uint32_t current) { @@ -37,7 +36,7 @@ bool BinlogConsumer::Init() { current_filenum_ = first_filenum_; profile = NewFileName(filename_, current_filenum_); - pstd::Status s = pstd::NewSequentialFile(profile, &queue_); + pstd::Status s = pstd::NewSequentialFile(profile, queue_); return s.ok(); } @@ -208,10 +207,10 @@ pstd::Status BinlogConsumer::Parse(std::string* scratch) { // Roll to next File if (pstd::FileExists(confile)) { // DLOG(INFO) << "BinlogSender roll to new binlog" << confile; - delete queue_; + queue_.reset(); queue_ = nullptr; - pstd::NewSequentialFile(confile, &queue_); + pstd::NewSequentialFile(confile, queue_); current_filenum_++; current_offset_ = 0; diff --git a/tools/binlog_sender/binlog_consumer.h b/tools/binlog_sender/binlog_consumer.h index 992cd1b9e1..1e07660190 100644 --- a/tools/binlog_sender/binlog_consumer.h +++ b/tools/binlog_sender/binlog_consumer.h @@ -58,7 +58,7 @@ class BinlogConsumer { pstd::Slice buffer_; char* const backing_store_; - pstd::SequentialFile* queue_; + std::unique_ptr queue_; }; #endif // INCLUDE_BINLOG_Consumber_H_ diff --git a/tools/codis2pika/CHANGES.md b/tools/codis2pika/CHANGES.md new file mode 100644 index 0000000000..f656e3ca19 --- /dev/null +++ b/tools/codis2pika/CHANGES.md @@ -0,0 +1,4 @@ +# Release Notes +1.0.0.0 + +支持数据从codis 分片模式实例迁移到 pika classic模式实例。 \ No newline at end of file diff --git a/tools/codis2pika/LICENSE b/tools/codis2pika/LICENSE new file mode 100644 index 0000000000..e0362641e9 --- /dev/null +++ b/tools/codis2pika/LICENSE @@ -0,0 +1,45 @@ +MIT License + +Copyright (c) 2023 个推实验室 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--- + +The MIT License (MIT) + +Copyright (c) 2019 Alibaba Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/tools/codis2pika/README.md b/tools/codis2pika/README.md new file mode 100644 index 0000000000..55ad79f793 --- /dev/null +++ b/tools/codis2pika/README.md @@ -0,0 +1,83 @@ +# codis2pika + +[中文文档](https://github.com/GetuiLaboratory/codis2pika/blob/main/README_zh.md) + +Codis2pika is a tool used to migrate codis data to pika. The main purpose is to support the migration of Codis sharding mode to Pika classic mode. + +## 感谢 + +codis2pika 参考借鉴了阿里开源的redis-shake项目,并进行了定制化的改造。因此基本的功能特性与原工具一致,但是功能上存在差异。 + + +## Features + +Same features as the original: + +* 🤗 Support the use of lua custom filtering rules (this part has not been changed, so there is no actual test, but it is theoretically supported. +* 💪 Support large instance migration. + +Some features of codis2pika: +* 🌐 Support the source side as a stand-alone instance and the destination side as a stand-alone instance. +* 🌲 Only five basic data structures of Redis are supported. +* ✅ Testing on the Codis server based on Redis 3.2. +* ⏰ Support long time real-time data synchronization with several seconds delay. +* ✊ Not sensitive to the underlying storage mode of the instance. + + +### Description of changes +* Clustering is not supported: because the data is distributed differently at the bottom of the source instance (codis sharding mode) and the target instance (pika class mode), it needs to be allocated according to the actual business situation. If necessary, add a corresponding algorithm to restore the cluster write interface. +* If Redis migration is required, it is recommended to use [RedisShark](https://github.com/alibaba/RedisShake) tool for more comprehensive functions. This project is mainly to support the migration of sharding mode instances to pika classic instances. + +# Document + +## install + +### Binary package + +Release: [https://github.com/GetuiLaboratory/codis2pika/releases](https://github.com/GetuiLaboratory/codis2pika/releases) + +### Compile from source + +After downloading the source code, run the `sh build. sh` command to compile. + +```shell +sh build.sh +``` + +## Usage + +1. Edit codis2pika.toml, modify the source and target configuration items. +2. Start codis2pika: + +```shell +./bin/codis2pika codis2pika.toml +``` + +3. Check data synchronization status. + +## Configure + +The codis2pika configuration file refers to `codis2pika. toml`. To avoid ambiguity, it is mandatory to assign values to each configuration in the configuration file, otherwise an error will be reported. + +## Data filtering + +codis2pika supports custom filtering rules using lua scripts. codis2pika can be started with the following command: + +```shell +./bin/codis2pika codis2pika.toml filter/xxx.lua +``` +However, the lua data filtering function has not been verified. Please refer to the redis shark project if necessary. + +## Attention +* Extra large keys are not supported; +* The migrated codis needs to set the client output buffer limit to release the restriction, otherwise the link will be broken; +* The migrated node needs to reserve memory redundancy of the same amount of migrated data; +* Before and after data migration, it is recommended to `compact`; +* Multi db is not supported (I don't think it is necessary, and it is not supported for the time being); +* The expiration time of some keys may be delayed. + +## Visualization +It is recommended to configure the monitoring disk in advance to have a visual grasp of the migration process. + +## Verify +Alibaba open-source redis-full-check is recommended. diff --git a/tools/codis2pika/README_zh.md b/tools/codis2pika/README_zh.md new file mode 100644 index 0000000000..0941a49d9a --- /dev/null +++ b/tools/codis2pika/README_zh.md @@ -0,0 +1,82 @@ +# codis2pika + +codis2pika 是一个用来做 codis 数据迁移到 pika 的工具。主要目的是为了支持codis 分片模式迁移到 pika classic模式。 + +## 感谢 + +codis2pika 参考借鉴了阿里开源的redis-shake项目,并进行了定制化的改造。因此基本的功能特性与原工具一致,但是功能上存在差异。 + + +## 特性 + +与原版相同的特性: + +* 🤗 支持使用 lua 自定义过滤规则(这部分未作改动,因此没实际测试,但理论上是支持的) +* 💪 支持大实例迁移 + +codis2pika的一些特性: +* 🌐 支持源端为单机实例,目的端为单机实例 +* 🌲 仅支持 Redis 5种基础数据结构 +* ✅ 测试在 Redis 3.2 版本的codis server +* ⏰ 支持较长时间的数据实时同步,存在几秒延迟 +* ✊ 对实例的底层存储模式不敏感 + +### 改动说明 +* 不支持集群: 由于数据在源实例(codis sharding模式)与目标实例(pika classic模式)底层分布不同,需要结合业务实际情况分配。如有需要,可以在添加对应算法,恢复集群写入接口。 +* 如果需要redis迁移的,建议还是用[RedisShark](https://github.com/alibaba/RedisShake)工具,功能更全面。本项目主要是为了支持sharding模式实例迁移到pika classic实例。 + +# 文档 + +## 安装 + +### 从 Release 下载安装 + +Release: [https://github.com/GetuiLaboratory/codis2pika/releases](https://github.com/GetuiLaboratory/codis2pika/releases) + +### 从源码编译 + +下载源码后,运行 `sh build.sh` 命令编译。 + +```shell +sh build.sh +``` + +## 运行 + +1. 编辑 codis2pika.toml,修改其中的 source 与 target 配置项 +2. 启动 codis2pika: + +```shell +./bin/codis2pika codis2pika.toml +``` + +3. 观察数据同步情况 + +## 配置 + +codis2pika 配置文件参考 `codis2pika.toml`。 为避免歧义强制要求配置文件中的每一项配置均需要赋值,否则会报错。 + +## 数据过滤 + +codis2pika 支持使用 lua 脚本自定义过滤规则,可以实现对数据进行过滤。 搭配 lua 脚本时,codis2pika 启动命令: + +```shell +./bin/codis2pika codis2pika.toml filter/xxx.lua +``` +lua 数据过滤功能未作验证,如有需要请参考redis-shark项目 + +## 注意事项 +* 不支持特大key; +* 被迁移的codis需要设置client-output-buffer-limit解除限制,否则会断开链接; +* 被迁移节点需要预留同等迁移数据量的内存冗余; +* 在执行数据迁移前后,建议进行compact; +* 不支持多db(感觉必要性不大,暂时没支持; +* 部分key的过期时间可能推后。 + +## 迁移过程可视化 +推荐提前配置监控大盘,可以对迁移过程有可视化的掌握。 + +## 验证迁移结果 + +推荐使用阿里开源的redis-full-check工具 + diff --git a/tools/codis2pika/build.sh b/tools/codis2pika/build.sh new file mode 100755 index 0000000000..9e18d14261 --- /dev/null +++ b/tools/codis2pika/build.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +set -e + +echo "[ BUILD RELEASE ]" +BIN_DIR=$(pwd)/bin/ +rm -rf "$BIN_DIR" +mkdir -p "$BIN_DIR" + +# build the current platform +echo "try build for current platform" +go build -v -trimpath -gcflags '-N -l' -o "$BIN_DIR/codis2pika" "./cmd/codis2pika" +echo "build success" + +for g in "linux" "darwin"; do + for a in "amd64" "arm64"; do + echo "try build GOOS=$g GOARCH=$a" + export GOOS=$g + export GOARCH=$a + go build -v -trimpath -gcflags '-N -l' -o "$BIN_DIR/codis2pika-$g-$a" "./cmd/codis2pika" + unset GOOS + unset GOARCH + echo "build success" + done +done + +cp codis2pika.toml "$BIN_DIR" + +if [ "$1" == "dist" ]; then + echo "[ DIST ]" + cd bin + cp -r ../filters ./ + tar -czvf ./codis2pika.tar.gz ./codis2pika.toml ./codis2pika-* ./filters + rm -rf ./filters + cd .. +fi diff --git a/tools/codis2pika/cmd/codis2pika/main.go b/tools/codis2pika/cmd/codis2pika/main.go new file mode 100644 index 0000000000..d752c25fc0 --- /dev/null +++ b/tools/codis2pika/cmd/codis2pika/main.go @@ -0,0 +1,112 @@ +package main + +import ( + "fmt" + "net/http" + _ "net/http/pprof" + "os" + "runtime" + + _ "net/http/pprof" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/commands" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/config" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/filter" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/reader" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/statistics" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/writer" +) + +func main() { + + if len(os.Args) < 2 || len(os.Args) > 3 { + fmt.Println("Usage: codis2pika ") + fmt.Println("Example: codis2pika config.toml lua.lua") + os.Exit(1) + } + + if len(os.Args) == 3 { + luaFile := os.Args[2] + filter.LoadFromFile(luaFile) + } + + // load config + configFile := os.Args[1] + config.LoadFromFile(configFile) + + log.Init() + log.Infof("GOOS: %s, GOARCH: %s", runtime.GOOS, runtime.GOARCH) + log.Infof("Ncpu: %d, GOMAXPROCS: %d", config.Config.Advanced.Ncpu, runtime.GOMAXPROCS(0)) + log.Infof("pid: %d", os.Getpid()) + log.Infof("pprof_port: %d", config.Config.Advanced.PprofPort) + if len(os.Args) == 2 { + log.Infof("No lua file specified, will not filter any cmd.") + } + //性能检测 + if config.Config.Advanced.PprofPort != 0 { + go func() { + err := http.ListenAndServe(fmt.Sprintf("localhost:%d", config.Config.Advanced.PprofPort), nil) + if err != nil { + log.PanicError(err) + } + }() + } + + // create writer + + var theWriter writer.Writer + target := &config.Config.Target + switch config.Config.Target.Type { + //根据模式不同,使用不同的写方法 + case "standalone": + fmt.Println("create NewRedisWriter: ") + theWriter = writer.NewRedisWriter(target.Address, target.Username, target.Password, target.IsTLS) + case "cluster": + fmt.Println("This version does not support targeting clusters. Please customize the cluster allocation algorithm.") + return + // clusters := config.Config.Cluster + // theWriter = writer.NewRedisClusterWriter(target.Address, target.Username, target.Password, target.IsTLS, clusters) + default: + log.Panicf("unknown target type: %s", target.Type) + } + + // create reader + source := &config.Config.Source + var theReader reader.Reader + if source.Type == "sync" { + fmt.Println("create reader NewPSyncReader: ") + theReader = reader.NewPSyncReader(source.Address, source.Username, source.Password, source.IsTLS, source.ElastiCachePSync) + } else { + log.Panicf("unknown source type: %s", source.Type) + } + ch := theReader.StartRead() + + // start sync + fmt.Println("start sync: ") + statistics.Init() + id := uint64(0) + + for e := range ch { + // calc arguments + e.Id = id + id++ + e.CmdName, e.Group, e.Keys = commands.CalcKeys(e.Argv) + + // filter + code := filter.Filter(e) + if code == filter.Allow { + + theWriter.Write(e) + statistics.AddAllowEntriesCount() + } else if code == filter.Disallow { + // do something + statistics.AddDisallowEntriesCount() + } else { + log.Panicf("error when run lua filter. entry: %s", e.ToString()) + } + + } + //wg.Wait() + log.Infof("finished.") +} diff --git a/tools/codis2pika/codis2pika.toml b/tools/codis2pika/codis2pika.toml new file mode 100644 index 0000000000..bb608b4efa --- /dev/null +++ b/tools/codis2pika/codis2pika.toml @@ -0,0 +1,51 @@ +[source] +type = "sync" +address = "192.168.12.44:6379" +username = "" # keep empty if not using ACL +password = "" # keep empty if no authentication is required +tls = false +elasticache_psync = "" # using when source is ElastiCache. ref: https://github.com/alibaba/RedisShake/issues/373 + +[target] +type = "standalone" # only standalone +# When the target is a cluster, write the address of one of the nodes. +# codis2pika will obtain other nodes through the `cluster nodes` command. +address = "192.168.12.43:9221" +username = "" # keep empty if not using ACL +password = "" # keep empty if no authentication is required +tls = false + + +[advanced] +dir = "data" + +# runtime.GOMAXPROCS, 0 means use runtime.NumCPU() cpu cores +ncpu = 4 + +# pprof port, 0 means disable +pprof_port = 0 + +# log +log_file = "codis2pika.log" +log_level = "info" # debug, info or warn +log_interval = 5 # in seconds + +# codis2pika gets key and value from rdb file, and uses RESTORE command to +# create the key in target redis. Redis RESTORE will return a "Target key name +# is busy" error when key already exists. You can use this configuration item +# to change the default behavior of restore: +# panic: codis2pika will stop when meet "Target key name is busy" error. +# rewrite: codis2pika will replace the key with new value. +# ignore: codis2pika will skip restore the key when meet "Target key name is busy" error. +rdb_restore_command_behavior = "skip" # panic, rewrite or skip + +# pipeline +pipeline_count_limit = 1024 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default. This amount is normally 1gb. +target_redis_client_max_querybuf_len = 1024_000_000 + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. +target_redis_proto_max_bulk_len = 512_000_000 \ No newline at end of file diff --git a/tools/codis2pika/docs/quick_start.md b/tools/codis2pika/docs/quick_start.md new file mode 100644 index 0000000000..93bf60c339 --- /dev/null +++ b/tools/codis2pika/docs/quick_start.md @@ -0,0 +1,97 @@ +# 快速开始 + +## 实例信息 + +### 实例 A + +- 地址:codis-server-host:6379 +- 端口:6379 +- 密码:xxxxx + +### pika 实例 B + +- 地址:192.168.0.1:6379 +- 端口:6379 +- 密码:xxxxx + +### 实例 C codis集群实例 + +- 地址: + - codis-server-host-1:6379 + - codis-server-host-2:6379 +- 密码:xxxxx + +## 工作目录 + +``` +. +├── codis2pika # 二进制程序 +└── codis2pika.toml # 配置文件 +``` + +## 开始 + +## A -> B 同步 + +修改 `codis2pika.toml`,改为如下配置: + +```toml +[source] +type = "sync" +address = "codis-server-host:6379" +password = "xxxxx" + +[target] +type = "standalone" +address = "192.168.0.1:6379" +password = "xxxxx" +``` + +启动 codis2pika: + +```bash +./codis2pika codis2pika.toml +``` + +## C -> B 同步 + +修改 `codis2pika.toml`,改为如下配置: + +```toml +[source] +type = "sync" +address = "codis-server-host-1:6379" +password = "xxxxx" + +[target] +type = "standalone" +address = "192.168.0.1:6379" # 这里写集群中的任意一个节点的地址即可 +password = "xxxxx" + +[advanced] +dir = "data" #不可重复 +``` +修改第二个,`cp codis2pika.toml codis2pika2.toml` +```toml +[source] +type = "sync" +address = "codis-server-host-2:6379" +password = "xxxxx" + +[target] +type = "standalone" +address = "192.168.0.1:6379" # 这里写集群中的任意一个节点的地址即可 +password = "xxxxx" + +[advanced] +dir = "data2" #不可重复 +``` + +启动 codis2pika: + +```bash +nohup ./codis2pika codis2pika.toml & +nohup ./codis2pika codis2pika2.toml & +``` + +同理,集群C也可以起多个,实现codis集群迁移到pika集群。 \ No newline at end of file diff --git a/tools/codis2pika/go.mod b/tools/codis2pika/go.mod new file mode 100644 index 0000000000..01821cab3d --- /dev/null +++ b/tools/codis2pika/go.mod @@ -0,0 +1,16 @@ +module github.com/OpenAtomFoundation/pika/tools/codis2pika + +go 1.19 + +require ( + github.com/hdt3213/rdb v1.0.9 + github.com/pelletier/go-toml/v2 v2.0.7 + github.com/rs/zerolog v1.29.1 + github.com/yuin/gopher-lua v1.1.0 +) + +require ( + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + golang.org/x/sys v0.1.0 // indirect +) diff --git a/tools/codis2pika/go.sum b/tools/codis2pika/go.sum new file mode 100644 index 0000000000..4cc933926b --- /dev/null +++ b/tools/codis2pika/go.sum @@ -0,0 +1,46 @@ +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.8.7/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/hdt3213/rdb v1.0.9 h1:x9uiLpgpLSgyKWo8WwYSc5hMg0vglo+u5i5dTnJW33Y= +github.com/hdt3213/rdb v1.0.9/go.mod h1:A1RWBSb4QGdX8fNs2bSoWxkzcWlWGbCC7OgOTFhPG+k= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= +github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.29.1 h1:cO+d60CHkknCbvzEWxP0S9K6KqyTjrCNUy1LdQLCGPc= +github.com/rs/zerolog v1.29.1/go.mod h1:Le6ESbR7hc+DP6Lt1THiV8CQSdkkNrd3R0XbEgp3ZBU= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= +github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/tools/codis2pika/internal/client/func.go b/tools/codis2pika/internal/client/func.go new file mode 100644 index 0000000000..08a633f917 --- /dev/null +++ b/tools/codis2pika/internal/client/func.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/client/proto" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" +) + +// 接口函数:校验入参否为string并返回string的切片 +func ArrayString(replyInterface interface{}, err error) []string { + if err != nil { + log.PanicError(err) + } + replyArray := replyInterface.([]interface{}) + replyArrayString := make([]string, len(replyArray)) + for inx, item := range replyArray { + replyArrayString[inx] = item.(string) + } + return replyArrayString +} + +func EncodeArgv(argv []string, buf *bytes.Buffer) { + writer := proto.NewWriter(buf) + argvInterface := make([]interface{}, len(argv)) + for inx, item := range argv { + argvInterface[inx] = item + } + err := writer.WriteArgs(argvInterface) + if err != nil { + log.PanicError(err) + } +} diff --git a/tools/codis2pika/internal/client/proto/license.txt b/tools/codis2pika/internal/client/proto/license.txt new file mode 100644 index 0000000000..e768695013 --- /dev/null +++ b/tools/codis2pika/internal/client/proto/license.txt @@ -0,0 +1 @@ +port from https://github.com/go-redis/redis/tree/master/internal/proto \ No newline at end of file diff --git a/tools/codis2pika/internal/client/proto/reader.go b/tools/codis2pika/internal/client/proto/reader.go new file mode 100644 index 0000000000..775f4c310b --- /dev/null +++ b/tools/codis2pika/internal/client/proto/reader.go @@ -0,0 +1,522 @@ +package proto + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "math/big" + "strconv" +) + +// redis resp protocol data type. +const ( + RespStatus = '+' // +\r\n + RespError = '-' // -\r\n + RespString = '$' // $\r\n\r\n + RespInt = ':' // :\r\n + RespNil = '_' // _\r\n + RespFloat = ',' // ,\r\n (golang float) + RespBool = '#' // true: #t\r\n false: #f\r\n + RespBlobError = '!' // !\r\n\r\n + RespVerbatim = '=' // =\r\nFORMAT:\r\n + RespBigInt = '(' // (\r\n + RespArray = '*' // *\r\n... (same as resp2) + RespMap = '%' // %\r\n(key)\r\n(value)\r\n... (golang map) + RespSet = '~' // ~\r\n... (same as Array) + RespAttr = '|' // |\r\n(key)\r\n(value)\r\n... + command reply + RespPush = '>' // >\r\n... (same as Array) +) + +// Not used temporarily. +// Redis has not used these two data types for the time being, and will implement them later. +// Streamed = "EOF:" +// StreamedAggregated = '?' + +//------------------------------------------------------------------------------ + +const Nil = RedisError("redis: nil") + +type RedisError string + +func (e RedisError) Error() string { return string(e) } + +func (RedisError) RedisError() {} + +func ParseErrorReply(line []byte) error { + return RedisError(line[1:]) +} + +//------------------------------------------------------------------------------ + +type Reader struct { + rd *bufio.Reader +} + +func NewReader(rd *bufio.Reader) *Reader { + return &Reader{ + rd: rd, + } +} + +func (r *Reader) Buffered() int { + return r.rd.Buffered() +} + +func (r *Reader) Peek(n int) ([]byte, error) { + return r.rd.Peek(n) +} + +func (r *Reader) Reset(rd io.Reader) { + r.rd.Reset(rd) +} + +// PeekReplyType returns the data type of the next response without advancing the Reader, +// and discard the attribute type. +func (r *Reader) PeekReplyType() (byte, error) { + b, err := r.rd.Peek(1) + if err != nil { + return 0, err + } + if b[0] == RespAttr { + if err = r.DiscardNext(); err != nil { + return 0, err + } + return r.PeekReplyType() + } + return b[0], nil +} + +// ReadLine Return a valid reply, it will check the protocol or redis error, +// and discard the attribute type. +func (r *Reader) ReadLine() ([]byte, error) { + line, err := r.readLine() + if err != nil { + return nil, err + } + switch line[0] { + case RespError: + return nil, ParseErrorReply(line) + case RespNil: + return nil, Nil + case RespBlobError: + var blobErr string + blobErr, err = r.readStringReply(line) + if err == nil { + err = RedisError(blobErr) + } + return nil, err + case RespAttr: + if err = r.Discard(line); err != nil { + return nil, err + } + return r.ReadLine() + } + + // Compatible with RESP2 + if IsNilReply(line) { + return nil, Nil + } + + return line, nil +} + +// readLine returns an error if: +// - there is a pending read error; +// - or line does not end with \r\n. +func (r *Reader) readLine() ([]byte, error) { + b, err := r.rd.ReadSlice('\n') + if err != nil { + if err != bufio.ErrBufferFull { + return nil, err + } + + full := make([]byte, len(b)) + copy(full, b) + + b, err = r.rd.ReadBytes('\n') + if err != nil { + return nil, err + } + + full = append(full, b...) + b = full + } + //line不以\r\n结尾,返回err + if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' { + return nil, fmt.Errorf("redis: invalid reply: %q", b) + } + //返回一个命令的切片 + return b[:len(b)-2], nil +} + +func (r *Reader) ReadReply() (interface{}, error) { + line, err := r.ReadLine() + if err != nil { + return nil, err + } + + switch line[0] { + case RespStatus: + return string(line[1:]), nil + case RespInt: + return parseInt(line[1:], 10, 64) + case RespFloat: + return r.readFloat(line) + case RespBool: + return r.readBool(line) + case RespBigInt: + return r.readBigInt(line) + case RespString: + return r.readStringReply(line) + case RespVerbatim: + return r.readVerb(line) + + case RespArray, RespSet, RespPush: + return r.readSlice(line) + case RespMap: + return r.readMap(line) + } + return nil, fmt.Errorf("redis: can't parse %.100q", line) +} + +func (r *Reader) readFloat(line []byte) (float64, error) { + v := string(line[1:]) + switch string(line[1:]) { + case "inf": + return math.Inf(1), nil + case "-inf": + return math.Inf(-1), nil + } + return strconv.ParseFloat(v, 64) +} + +func (r *Reader) readBool(line []byte) (bool, error) { + switch string(line[1:]) { + case "t": + return true, nil + case "f": + return false, nil + } + return false, fmt.Errorf("redis: can't parse bool reply: %q", line) +} + +func (r *Reader) readBigInt(line []byte) (*big.Int, error) { + i := new(big.Int) + if i, ok := i.SetString(string(line[1:]), 10); ok { + return i, nil + } + return nil, fmt.Errorf("redis: can't parse bigInt reply: %q", line) +} + +func (r *Reader) readStringReply(line []byte) (string, error) { + n, err := replyLen(line) + if err != nil { + return "", err + } + + b := make([]byte, n+2) + _, err = io.ReadFull(r.rd, b) + if err != nil { + return "", err + } + + return bytesToString(b[:n]), nil +} + +func (r *Reader) readVerb(line []byte) (string, error) { + s, err := r.readStringReply(line) + if err != nil { + return "", err + } + if len(s) < 4 || s[3] != ':' { + return "", fmt.Errorf("redis: can't parse verbatim string reply: %q", line) + } + return s[4:], nil +} + +func (r *Reader) readSlice(line []byte) ([]interface{}, error) { + n, err := replyLen(line) + if err != nil { + return nil, err + } + + val := make([]interface{}, n) + for i := 0; i < len(val); i++ { + v, err := r.ReadReply() + if err != nil { + if err == Nil { + val[i] = nil + continue + } + if err, ok := err.(RedisError); ok { + val[i] = err + continue + } + return nil, err + } + val[i] = v + } + return val, nil +} + +func (r *Reader) readMap(line []byte) (map[interface{}]interface{}, error) { + n, err := replyLen(line) + if err != nil { + return nil, err + } + m := make(map[interface{}]interface{}, n) + for i := 0; i < n; i++ { + k, err := r.ReadReply() + if err != nil { + return nil, err + } + v, err := r.ReadReply() + if err != nil { + if err == Nil { + m[k] = nil + continue + } + if err, ok := err.(RedisError); ok { + m[k] = err + continue + } + return nil, err + } + m[k] = v + } + return m, nil +} + +// ------------------------------- + +func (r *Reader) ReadInt() (int64, error) { + line, err := r.ReadLine() + if err != nil { + return 0, err + } + switch line[0] { + case RespInt, RespStatus: + return parseInt(line[1:], 10, 64) + case RespString: + s, err := r.readStringReply(line) + if err != nil { + return 0, err + } + return parseInt([]byte(s), 10, 64) + case RespBigInt: + b, err := r.readBigInt(line) + if err != nil { + return 0, err + } + if !b.IsInt64() { + return 0, fmt.Errorf("bigInt(%s) value out of range", b.String()) + } + return b.Int64(), nil + } + return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line) +} + +func (r *Reader) ReadFloat() (float64, error) { + line, err := r.ReadLine() + if err != nil { + return 0, err + } + switch line[0] { + case RespFloat: + return r.readFloat(line) + case RespStatus: + return strconv.ParseFloat(string(line[1:]), 64) + case RespString: + s, err := r.readStringReply(line) + if err != nil { + return 0, err + } + return strconv.ParseFloat(s, 64) + } + return 0, fmt.Errorf("redis: can't parse float reply: %.100q", line) +} + +func (r *Reader) ReadString() (string, error) { + line, err := r.ReadLine() + if err != nil { + return "", err + } + + switch line[0] { + case RespStatus, RespInt, RespFloat: + return string(line[1:]), nil + case RespString: + return r.readStringReply(line) + case RespBool: + b, err := r.readBool(line) + return strconv.FormatBool(b), err + case RespVerbatim: + return r.readVerb(line) + case RespBigInt: + b, err := r.readBigInt(line) + if err != nil { + return "", err + } + return b.String(), nil + } + return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line) +} + +func (r *Reader) ReadBool() (bool, error) { + s, err := r.ReadString() + if err != nil { + return false, err + } + return s == "OK" || s == "1" || s == "true", nil +} + +func (r *Reader) ReadSlice() ([]interface{}, error) { + line, err := r.ReadLine() + if err != nil { + return nil, err + } + return r.readSlice(line) +} + +// ReadFixedArrayLen read fixed array length. +func (r *Reader) ReadFixedArrayLen(fixedLen int) error { + n, err := r.ReadArrayLen() + if err != nil { + return err + } + if n != fixedLen { + return fmt.Errorf("redis: got %d elements in the array, wanted %d", n, fixedLen) + } + return nil +} + +// ReadArrayLen Read and return the length of the array. +func (r *Reader) ReadArrayLen() (int, error) { + line, err := r.ReadLine() + if err != nil { + return 0, err + } + switch line[0] { + case RespArray, RespSet, RespPush: + return replyLen(line) + default: + return 0, fmt.Errorf("redis: can't parse array/set/push reply: %.100q", line) + } +} + +// ReadFixedMapLen reads fixed map length. +func (r *Reader) ReadFixedMapLen(fixedLen int) error { + n, err := r.ReadMapLen() + if err != nil { + return err + } + if n != fixedLen { + return fmt.Errorf("redis: got %d elements in the map, wanted %d", n, fixedLen) + } + return nil +} + +// ReadMapLen reads the length of the map type. +// If responding to the array type (RespArray/RespSet/RespPush), +// it must be a multiple of 2 and return n/2. +// Other types will return an error. +func (r *Reader) ReadMapLen() (int, error) { + line, err := r.ReadLine() + if err != nil { + return 0, err + } + switch line[0] { + case RespMap: + return replyLen(line) + case RespArray, RespSet, RespPush: + // Some commands and RESP2 protocol may respond to array types. + n, err := replyLen(line) + if err != nil { + return 0, err + } + if n%2 != 0 { + return 0, fmt.Errorf("redis: the length of the array must be a multiple of 2, got: %d", n) + } + return n / 2, nil + default: + return 0, fmt.Errorf("redis: can't parse map reply: %.100q", line) + } +} + +// DiscardNext read and discard the data represented by the next line. +func (r *Reader) DiscardNext() error { + line, err := r.readLine() + if err != nil { + return err + } + return r.Discard(line) +} + +// Discard the data represented by line. +func (r *Reader) Discard(line []byte) (err error) { + if len(line) == 0 { + return errors.New("redis: invalid line") + } + switch line[0] { + case RespStatus, RespError, RespInt, RespNil, RespFloat, RespBool, RespBigInt: + return nil + } + + n, err := replyLen(line) + if err != nil && err != Nil { + return err + } + + switch line[0] { + case RespBlobError, RespString, RespVerbatim: + // +\r\n + _, err = r.rd.Discard(n + 2) + return err + case RespArray, RespSet, RespPush: + for i := 0; i < n; i++ { + if err = r.DiscardNext(); err != nil { + return err + } + } + return nil + case RespMap, RespAttr: + // Read key & value. + for i := 0; i < n*2; i++ { + if err = r.DiscardNext(); err != nil { + return err + } + } + return nil + } + + return fmt.Errorf("redis: can't parse %.100q", line) +} + +func replyLen(line []byte) (n int, err error) { + n, err = atoi(line[1:]) + if err != nil { + return 0, err + } + + if n < -1 { + return 0, fmt.Errorf("redis: invalid reply: %q", line) + } + + switch line[0] { + case RespString, RespVerbatim, RespBlobError, + RespArray, RespSet, RespPush, RespMap, RespAttr: + if n == -1 { + return 0, Nil + } + } + return n, nil +} + +// IsNilReply detects redis.Nil of RESP2. +func IsNilReply(line []byte) bool { + return len(line) == 3 && + (line[0] == RespString || line[0] == RespArray) && + line[1] == '-' && line[2] == '1' +} diff --git a/tools/codis2pika/internal/client/proto/strconv.go b/tools/codis2pika/internal/client/proto/strconv.go new file mode 100644 index 0000000000..ab946d6532 --- /dev/null +++ b/tools/codis2pika/internal/client/proto/strconv.go @@ -0,0 +1,27 @@ +package proto + +import "strconv" + +func bytesToString(b []byte) string { + return string(b) +} + +func stringToBytes(s string) []byte { + return []byte(s) +} + +func atoi(b []byte) (int, error) { + return strconv.Atoi(bytesToString(b)) +} + +func parseInt(b []byte, base int, bitSize int) (int64, error) { + return strconv.ParseInt(bytesToString(b), base, bitSize) +} + +func parseUint(b []byte, base int, bitSize int) (uint64, error) { + return strconv.ParseUint(bytesToString(b), base, bitSize) +} + +func parseFloat(b []byte, bitSize int) (float64, error) { + return strconv.ParseFloat(bytesToString(b), bitSize) +} diff --git a/tools/codis2pika/internal/client/proto/writer.go b/tools/codis2pika/internal/client/proto/writer.go new file mode 100644 index 0000000000..ba5854f3ea --- /dev/null +++ b/tools/codis2pika/internal/client/proto/writer.go @@ -0,0 +1,156 @@ +package proto + +import ( + "encoding" + "fmt" + "io" + "net" + "strconv" + "time" +) + +type writer interface { + io.Writer + io.ByteWriter + // WriteString implement io.StringWriter. + WriteString(s string) (n int, err error) +} + +type Writer struct { + writer + + lenBuf []byte + numBuf []byte +} + +func NewWriter(wr writer) *Writer { + return &Writer{ + writer: wr, + + lenBuf: make([]byte, 64), + numBuf: make([]byte, 64), + } +} + +func (w *Writer) WriteArgs(args []interface{}) error { + if err := w.WriteByte(RespArray); err != nil { + return err + } + + if err := w.writeLen(len(args)); err != nil { + return err + } + + for _, arg := range args { + if err := w.WriteArg(arg); err != nil { + return err + } + } + + return nil +} + +func (w *Writer) writeLen(n int) error { + w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10) + w.lenBuf = append(w.lenBuf, '\r', '\n') + _, err := w.Write(w.lenBuf) + return err +} + +func (w *Writer) WriteArg(v interface{}) error { + switch v := v.(type) { + case nil: + return w.string("") + case string: + return w.string(v) + case []byte: + return w.bytes(v) + case int: + return w.int(int64(v)) + case int8: + return w.int(int64(v)) + case int16: + return w.int(int64(v)) + case int32: + return w.int(int64(v)) + case int64: + return w.int(v) + case uint: + return w.uint(uint64(v)) + case uint8: + return w.uint(uint64(v)) + case uint16: + return w.uint(uint64(v)) + case uint32: + return w.uint(uint64(v)) + case uint64: + return w.uint(v) + case float32: + return w.float(float64(v)) + case float64: + return w.float(v) + case bool: + if v { + return w.int(1) + } + return w.int(0) + case time.Time: + w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano) + return w.bytes(w.numBuf) + case time.Duration: + return w.int(v.Nanoseconds()) + case encoding.BinaryMarshaler: + b, err := v.MarshalBinary() + if err != nil { + return err + } + return w.bytes(b) + case net.IP: + return w.bytes(v) + default: + return fmt.Errorf( + "redis: can't marshal %T (implement encoding.BinaryMarshaler)", v) + } +} + +func (w *Writer) bytes(b []byte) error { + if err := w.WriteByte(RespString); err != nil { + return err + } + + if err := w.writeLen(len(b)); err != nil { + return err + } + + if _, err := w.Write(b); err != nil { + return err + } + + return w.crlf() +} + +func (w *Writer) string(s string) error { + return w.bytes(stringToBytes(s)) +} + +func (w *Writer) uint(n uint64) error { + w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10) + return w.bytes(w.numBuf) +} + +func (w *Writer) int(n int64) error { + w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10) + return w.bytes(w.numBuf) +} + +func (w *Writer) float(f float64) error { + w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64) + return w.bytes(w.numBuf) +} + +func (w *Writer) crlf() error { + if err := w.WriteByte('\r'); err != nil { + return err + } + return w.WriteByte('\n') +} diff --git a/tools/codis2pika/internal/client/redis.go b/tools/codis2pika/internal/client/redis.go new file mode 100644 index 0000000000..2e815b0010 --- /dev/null +++ b/tools/codis2pika/internal/client/redis.go @@ -0,0 +1,117 @@ +package client + +import ( + "bufio" + "crypto/tls" + "net" + "time" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/client/proto" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" +) + +type Redis struct { + reader *bufio.Reader + writer *bufio.Writer + protoReader *proto.Reader + protoWriter *proto.Writer +} + +func NewRedisClient(address string, username string, password string, isTls bool) *Redis { + r := new(Redis) + var conn net.Conn + var dialer net.Dialer + var err error + //原Timeout为3s,为降低broken pipe概率调整为8s + dialer.Timeout = 8 * time.Second + if isTls { + conn, err = tls.DialWithDialer(&dialer, "tcp", address, &tls.Config{InsecureSkipVerify: true}) + } else { + conn, err = dialer.Dial("tcp", address) + } + if err != nil { + log.PanicError(err) + } + + r.reader = bufio.NewReader(conn) + r.writer = bufio.NewWriter(conn) + + r.protoReader = proto.NewReader(r.reader) + r.protoWriter = proto.NewWriter(r.writer) + + // auth + if password != "" { + var reply string + if username != "" { + reply = r.DoWithStringReply("auth", username, password) + } else { + reply = r.DoWithStringReply("auth", password) + } + if reply != "OK" { + log.Panicf("auth failed with reply: %s", reply) + } + log.Infof("auth successful. address=[%s]", address) + } else { + log.Infof("no password. address=[%s]", address) + } + + // ping to test connection + reply := r.DoWithStringReply("ping") + + if reply != "PONG" { + panic("ping failed with reply: " + reply) + } + + return r +} + +func (r *Redis) DoWithStringReply(args ...string) string { + r.Send(args...) + + replyInterface, err := r.Receive() + if err != nil { + log.PanicError(err) + } + reply := replyInterface.(string) + return reply +} + +func (r *Redis) Send(args ...string) { + argsInterface := make([]interface{}, len(args)) + for inx, item := range args { + argsInterface[inx] = item + } + err := r.protoWriter.WriteArgs(argsInterface) + if err != nil { + log.PanicError(err) + } + r.flush() +} + +func (r *Redis) SendBytes(buf []byte) { + _, err := r.writer.Write(buf) + if err != nil { + log.PanicError(err) + } + r.flush() +} + +func (r *Redis) flush() { + err := r.writer.Flush() + if err != nil { + log.PanicError(err) + } +} + +func (r *Redis) Receive() (interface{}, error) { + return r.protoReader.ReadReply() +} + +func (r *Redis) BufioReader() *bufio.Reader { + return r.reader +} + +func (r *Redis) SetBufioReader(rd *bufio.Reader) { + r.reader = rd + r.protoReader = proto.NewReader(r.reader) +} diff --git a/tools/codis2pika/internal/commands/keys.go b/tools/codis2pika/internal/commands/keys.go new file mode 100644 index 0000000000..49ff2b989e --- /dev/null +++ b/tools/codis2pika/internal/commands/keys.go @@ -0,0 +1,90 @@ +package commands + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" +) + +// CalcKeys https://redis.io/docs/reference/key-specs/ +func CalcKeys(argv []string) (cmaName string, group string, keys []string) { + argc := len(argv) + group = "unknown" + + cmaName = strings.ToUpper(argv[0]) + if _, ok := containers[cmaName]; ok { + cmaName = fmt.Sprintf("%s-%s", cmaName, strings.ToUpper(argv[1])) + } + cmd, ok := redisCommands[cmaName] + if !ok { + log.Warnf("unknown command. argv=%v", argv) + return + } + group = cmd.group + for _, spec := range cmd.keySpec { + begin := 0 + switch spec.beginSearchType { + case "index": + begin = spec.beginSearchIndex + case "keyword": + var inx, step int + if spec.beginSearchStartFrom > 0 { + inx = spec.beginSearchStartFrom + step = 1 + } else { + inx = -spec.beginSearchStartFrom + step = -1 + } + for ; ; inx += step { + if inx == argc { + log.Panicf("not found keyword. argv=%v", argv) + } + if strings.ToUpper(argv[inx]) == spec.beginSearchKeyword { + begin = inx + 1 + break + } + } + default: + log.Panicf("wrong type: %s", spec.beginSearchType) + } + switch spec.findKeysType { + case "range": + var lastKeyInx int + if spec.findKeysRangeLastKey >= 0 { + lastKeyInx = begin + spec.findKeysRangeLastKey + } else { + lastKeyInx = argc + spec.findKeysRangeLastKey + } + limitCount := math.MaxInt32 + if spec.findKeysRangeLimit <= -2 { + limitCount = (argc - begin) / (-spec.findKeysRangeLimit) + } + keyStep := spec.findKeysRangeKeyStep + for inx := begin; inx <= lastKeyInx && limitCount > 0; inx += keyStep { + keys = append(keys, argv[inx]) + limitCount -= 1 + } + case "keynum": + keynumIdx := begin + spec.findKeysKeynumIndex + if keynumIdx < 0 || keynumIdx > argc { + log.Panicf("keynumInx wrong. argv=%v, keynumIdx=[%d]", argv, keynumIdx) + } + keyCount, err := strconv.Atoi(argv[keynumIdx]) + if err != nil { + log.PanicError(err) + } + firstKey := spec.findKeysKeynumFirstKey + step := spec.findKeysKeynumKeyStep + for inx := begin + firstKey; keyCount > 0; inx += step { + keys = append(keys, argv[inx]) + keyCount -= 1 + } + default: + log.Panicf("wrong type: %s", spec.findKeysType) + } + } + return +} diff --git a/tools/codis2pika/internal/commands/keys_test.go b/tools/codis2pika/internal/commands/keys_test.go new file mode 100644 index 0000000000..84ba5050bf --- /dev/null +++ b/tools/codis2pika/internal/commands/keys_test.go @@ -0,0 +1,90 @@ +package commands + +import ( + "testing" +) + +func testEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +func TestCalcKeys(t *testing.T) { + // SET + cmd, group, keys := CalcKeys([]string{"SET", "key", "value"}) + if cmd != "SET" || group != "STRING" || !testEq(keys, []string{"key"}) { + t.Errorf("CalcKeys(SET key value) failed. cmd=%s, group=%s, keys=%v", cmd, group, keys) + } + + // MSET + cmd, group, keys = CalcKeys([]string{"MSET", "key1", "value1", "key2", "value2"}) + if cmd != "MSET" || group != "STRING" || !testEq(keys, []string{"key1", "key2"}) { + t.Errorf("CalcKeys(MSET key1 value1 key2 value2) failed. cmd=%s, group=%s, keys=%v", cmd, group, keys) + } + + // XADD + cmd, group, keys = CalcKeys([]string{"XADD", "key", "*", "field1", "value1", "field2", "value2"}) + if cmd != "XADD" || group != "STREAM" || !testEq(keys, []string{"key"}) { + t.Errorf("CalcKeys(XADD key * field1 value1 field2 value2) failed. cmd=%s, group=%s, keys=%v", cmd, group, keys) + } + + // ZUNIONSTORE + cmd, group, keys = CalcKeys([]string{"ZUNIONSTORE", "key", "2", "key1", "key2"}) + if cmd != "ZUNIONSTORE" || group != "SORTED_SET" || !testEq(keys, []string{"key", "key1", "key2"}) { + t.Errorf("CalcKeys(ZUNIONSTORE key 2 key1 key2) failed. cmd=%s, group=%s, keys=%v", cmd, group, keys) + } +} + +func TestKeyHash(t *testing.T) { + ret := keyHash("abcde") + if ret != 16097 { + t.Errorf("keyHash(abcde) = %x", ret) + } + ret = keyHash("abcde{") + if ret != 14689 { + t.Errorf("keyHash(abcde{) = %x", ret) + } + ret = keyHash("abcde}") + if ret != 6567 { + t.Errorf("keyHash(abcde}) = %x", ret) + } + ret = keyHash("{abcde}") + if ret != 16097 { + t.Errorf("keyHash({abcde}) = %x", ret) + } + ret = keyHash("same") + if ret != 13447 { + t.Errorf("keyHash(same) = %x", ret) + } + ret = keyHash("abcdefghi{same}abcdefghi") + if ret != 13447 { + t.Errorf("keyHash(abcdefghi{same}abcdefghi) = %x", ret) + } + ret = keyHash("123456789{same}123456789") + if ret != 13447 { + t.Errorf("keyHash(123456789{same}123456789) = %x", ret) + } + ret = keyHash("1234我是你89{same}12我就456789") + if ret != 13447 { + t.Errorf("keyHash(1234我是你89{same}12我就456789) = %x", ret) + } + ret = keyHash("你你你{你你你}你你你") + if ret != 15023 { + t.Errorf("keyHash(1234我是你89{same}12我就456789) = %x", ret) + } + b := make([]byte, 0) + for i := 0; i < 256; i++ { + b = append(b, byte(i)) + } + ret = keyHash(string(b)) + if ret != 16155 { + t.Errorf("keyHash(%s) = %x", string(b), ret) + } +} diff --git a/tools/codis2pika/internal/commands/table.go b/tools/codis2pika/internal/commands/table.go new file mode 100644 index 0000000000..e006affd11 --- /dev/null +++ b/tools/codis2pika/internal/commands/table.go @@ -0,0 +1,2018 @@ +package commands + +var containers = map[string]bool{ + "XGROUP": true, + "FUNCTION": true, +} +var redisCommands = map[string]redisCommand{ + "MSETNX": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + -1, + 2, + 0, + 0, + 0, + 0, + }, + }, + }, + "GETEX": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "INCRBYFLOAT": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "MSET": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + -1, + 2, + 0, + 0, + 0, + 0, + }, + }, + }, + "SET": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "DECRBY": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "INCRBY": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "SETEX": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "DECR": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "INCR": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "PSETEX": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "GETSET": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "SETRANGE": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "APPEND": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "SETNX": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "GETDEL": { + "STRING", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "SDIFFSTORE": { + "SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "range", + -1, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "SINTERSTORE": { + "SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "range", + -1, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "SUNIONSTORE": { + "SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "range", + -1, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "SPOP": { + "SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "SADD": { + "SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "SREM": { + "SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "SMOVE": { + "SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "GEORADIUS": { + "GEO", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "keyword", + 0, + "STORE", + 6, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "keyword", + 0, + "STOREDIST", + 6, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "GEOADD": { + "GEO", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "GEOSEARCHSTORE": { + "GEO", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "GEORADIUSBYMEMBER": { + "GEO", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "keyword", + 0, + "STORE", + 5, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "keyword", + 0, + "STOREDIST", + 5, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "ZPOPMAX": { + "SORTED_SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "ZREMRANGEBYSCORE": { + "SORTED_SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "ZRANGESTORE": { + "SORTED_SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "ZINTERSTORE": { + "SORTED_SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "keynum", + 0, + 0, + 0, + 0, + 1, + 1, + }, + }, + }, + "ZPOPMIN": { + "SORTED_SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "ZINCRBY": { + "SORTED_SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "ZDIFFSTORE": { + "SORTED_SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "keynum", + 0, + 0, + 0, + 0, + 1, + 1, + }, + }, + }, + "ZUNIONSTORE": { + "SORTED_SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "keynum", + 0, + 0, + 0, + 0, + 1, + 1, + }, + }, + }, + "ZREMRANGEBYLEX": { + "SORTED_SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "ZMPOP": { + "SORTED_SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "keynum", + 0, + 0, + 0, + 0, + 1, + 1, + }, + }, + }, + "ZADD": { + "SORTED_SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "ZREMRANGEBYRANK": { + "SORTED_SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "ZREM": { + "SORTED_SET", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "LMPOP": { + "LIST", + []keySpec{ + { + "index", + 1, + "", + 0, + "keynum", + 0, + 0, + 0, + 0, + 1, + 1, + }, + }, + }, + "LSET": { + "LIST", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "RPOPLPUSH": { + "LIST", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "LTRIM": { + "LIST", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "LPUSH": { + "LIST", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "LINSERT": { + "LIST", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "LREM": { + "LIST", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "RPUSH": { + "LIST", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "RPOP": { + "LIST", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "LPOP": { + "LIST", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "LMOVE": { + "LIST", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "RPUSHX": { + "LIST", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "LPUSHX": { + "LIST", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "FUNCTION-FLUSH": { + "SCRIPTING", + []keySpec{}, + }, + "FUNCTION-DELETE": { + "SCRIPTING", + []keySpec{}, + }, + "FUNCTION-RESTORE": { + "SCRIPTING", + []keySpec{}, + }, + "FUNCTION-LOAD": { + "SCRIPTING", + []keySpec{}, + }, + "EVALSHA": { + "SCRIPTING", + []keySpec{ + { + "index", + 2, + "", + 0, + "keynum", + 0, + 0, + 0, + 0, + 1, + 1, + }, + }, + }, + "FCALL": { + "SCRIPTING", + []keySpec{ + { + "index", + 2, + "", + 0, + "keynum", + 0, + 0, + 0, + 0, + 1, + 1, + }, + }, + }, + "EVAL": { + "SCRIPTING", + []keySpec{ + { + "index", + 2, + "", + 0, + "keynum", + 0, + 0, + 0, + 0, + 1, + 1, + }, + }, + }, + "XCLAIM": { + "STREAM", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "XGROUP-DELCONSUMER": { + "STREAM", + []keySpec{ + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "XACK": { + "STREAM", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "XTRIM": { + "STREAM", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "XGROUP-CREATE": { + "STREAM", + []keySpec{ + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "XDEL": { + "STREAM", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "XAUTOCLAIM": { + "STREAM", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "XGROUP-DESTROY": { + "STREAM", + []keySpec{ + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "XADD": { + "STREAM", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "XSETID": { + "STREAM", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "XGROUP-SETID": { + "STREAM", + []keySpec{ + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "XGROUP-CREATECONSUMER": { + "STREAM", + []keySpec{ + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "RESTORE": { + "GENERIC", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "UNLINK": { + "GENERIC", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + -1, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "MOVE": { + "GENERIC", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "COPY": { + "GENERIC", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "PERSIST": { + "GENERIC", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "DEL": { + "GENERIC", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + -1, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "PEXPIREAT": { + "GENERIC", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "RENAME": { + "GENERIC", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "RENAMENX": { + "GENERIC", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "PEXPIRE": { + "GENERIC", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "EXPIRE": { + "GENERIC", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "EXPIREAT": { + "GENERIC", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "PFCOUNT": { + "HYPERLOGLOG", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + -1, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "PFMERGE": { + "HYPERLOGLOG", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 2, + "", + 0, + "range", + -1, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "PFADD": { + "HYPERLOGLOG", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "PFDEBUG": { + "HYPERLOGLOG", + []keySpec{ + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "FLUSHDB": { + "SERVER", + []keySpec{}, + }, + "SWAPDB": { + "SERVER", + []keySpec{}, + }, + "FLUSHALL": { + "SERVER", + []keySpec{}, + }, + "RESTORE-ASKING": { + "SERVER", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "SETBIT": { + "BITMAP", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "BITOP": { + "BITMAP", + []keySpec{ + { + "index", + 2, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + { + "index", + 3, + "", + 0, + "range", + -1, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "BITFIELD": { + "BITMAP", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "HMSET": { + "HASH", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "HINCRBYFLOAT": { + "HASH", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "HDEL": { + "HASH", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "HSETNX": { + "HASH", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "HSET": { + "HASH", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "HINCRBY": { + "HASH", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "SPUBLISH": { + "PUBSUB", + []keySpec{ + { + "index", + 1, + "", + 0, + "range", + 0, + 1, + 0, + 0, + 0, + 0, + }, + }, + }, + "PUBLISH": { + "PUBSUB", + []keySpec{}, + }, + "PING": { + "CONNECTION", + []keySpec{}, + }, + "SELECT": { + "CONNECTION", + []keySpec{}, + }, +} diff --git a/tools/codis2pika/internal/commands/types.go b/tools/codis2pika/internal/commands/types.go new file mode 100644 index 0000000000..20e3de1c33 --- /dev/null +++ b/tools/codis2pika/internal/commands/types.go @@ -0,0 +1,27 @@ +package commands + +type keySpec struct { + // begin_search + beginSearchType string + // @index + beginSearchIndex int + // @keyword + beginSearchKeyword string + beginSearchStartFrom int + + // find_keys + findKeysType string + // @range + findKeysRangeLastKey int + findKeysRangeKeyStep int + findKeysRangeLimit int + // @keynum + findKeysKeynumIndex int + findKeysKeynumFirstKey int + findKeysKeynumKeyStep int +} + +type redisCommand struct { + group string + keySpec []keySpec +} diff --git a/tools/codis2pika/internal/config/config.go b/tools/codis2pika/internal/config/config.go new file mode 100644 index 0000000000..f309f5a4b7 --- /dev/null +++ b/tools/codis2pika/internal/config/config.go @@ -0,0 +1,126 @@ +package config + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "runtime" + + "github.com/pelletier/go-toml/v2" +) + +type tomlSource struct { + Type string `toml:"type"` + Address string `toml:"address"` + Username string `toml:"username"` + Password string `toml:"password"` + IsTLS bool `toml:"tls"` + ElastiCachePSync string `toml:"elasticache_psync"` + RDBFilePath string `toml:"rdb_file_path"` +} + +type tomlTarget struct { + Type string `toml:"type"` + Username string `toml:"username"` + Address string `toml:"address"` + Password string `toml:"password"` + IsTLS bool `toml:"tls"` +} + +type tomlAdvanced struct { + Dir string `toml:"dir"` + + Ncpu int `toml:"ncpu"` + + PprofPort int `toml:"pprof_port"` + + // log + LogFile string `toml:"log_file"` + LogLevel string `toml:"log_level"` + LogInterval int `toml:"log_interval"` + + // rdb restore + RDBRestoreCommandBehavior string `toml:"rdb_restore_command_behavior"` + + // for writer + PipelineCountLimit uint64 `toml:"pipeline_count_limit"` + TargetRedisClientMaxQuerybufLen uint64 `toml:"target_redis_client_max_querybuf_len"` + TargetRedisProtoMaxBulkLen uint64 `toml:"target_redis_proto_max_bulk_len"` +} + +type tomlShakeConfig struct { + Source tomlSource + Target tomlTarget + Advanced tomlAdvanced +} + +var Config tomlShakeConfig + +func init() { + // source + Config.Source.Type = "sync" + Config.Source.Address = "" + Config.Source.Username = "" + Config.Source.Password = "" + Config.Source.IsTLS = false + Config.Source.ElastiCachePSync = "" + + // target + Config.Target.Type = "standalone" + Config.Target.Address = "" + Config.Target.Username = "" + Config.Target.Password = "" + Config.Target.IsTLS = false + + // advanced + Config.Advanced.Dir = "data" + Config.Advanced.Ncpu = 4 + Config.Advanced.PprofPort = 0 + Config.Advanced.LogFile = "codis2pika.log" + Config.Advanced.LogLevel = "info" + Config.Advanced.LogInterval = 5 + Config.Advanced.RDBRestoreCommandBehavior = "rewrite" + Config.Advanced.PipelineCountLimit = 1024 + Config.Advanced.TargetRedisClientMaxQuerybufLen = 1024 * 1000 * 1000 + Config.Advanced.TargetRedisProtoMaxBulkLen = 512 * 1000 * 1000 + +} + +func LoadFromFile(filename string) { + + buf, err := ioutil.ReadFile(filename) + if err != nil { + panic(err.Error()) + } + + decoder := toml.NewDecoder(bytes.NewReader(buf)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&Config) + if err != nil { + missingError, ok := err.(*toml.StrictMissingError) + if ok { + panic(fmt.Sprintf("decode config error:\n%s", missingError.String())) + } + panic(err.Error()) + } + + // dir + err = os.MkdirAll(Config.Advanced.Dir, os.ModePerm) + if err != nil { + panic(err.Error()) + } + err = os.Chdir(Config.Advanced.Dir) + if err != nil { + panic(err.Error()) + } + + // cpu core + var ncpu int + if Config.Advanced.Ncpu == 0 { + ncpu = runtime.NumCPU() + } else { + ncpu = Config.Advanced.Ncpu + } + runtime.GOMAXPROCS(ncpu) +} diff --git a/tools/codis2pika/internal/entry/entry.go b/tools/codis2pika/internal/entry/entry.go new file mode 100644 index 0000000000..28dfc46a7b --- /dev/null +++ b/tools/codis2pika/internal/entry/entry.go @@ -0,0 +1,29 @@ +package entry + +import "fmt" + +type Entry struct { + Id uint64 + IsBase bool // whether the command is decoded from dump.rdb file + DbId int + Argv []string + TimestampMs uint64 + + CmdName string + Group string + Keys []string + Slots []int + + // for statistics + Offset int64 + EncodedSize uint64 // the size of the entry after encode +} + +func NewEntry() *Entry { + e := new(Entry) + return e +} + +func (e *Entry) ToString() string { + return fmt.Sprintf("%v", e.Argv) +} diff --git a/tools/codis2pika/internal/filter/filter.go b/tools/codis2pika/internal/filter/filter.go new file mode 100644 index 0000000000..cfa76e4a4f --- /dev/null +++ b/tools/codis2pika/internal/filter/filter.go @@ -0,0 +1,56 @@ +package filter + +import ( + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/entry" + + lua "github.com/yuin/gopher-lua" +) + +const ( + Allow = 0 + Disallow = 1 + Error = 2 +) + +var luaInstance *lua.LState + +func LoadFromFile(luaFile string) { + luaInstance = lua.NewState() + err := luaInstance.DoFile(luaFile) + if err != nil { + panic(err) + } +} + +func Filter(e *entry.Entry) int { + if luaInstance == nil { + return Allow + } + keys := luaInstance.NewTable() + for _, key := range e.Keys { + keys.Append(lua.LString(key)) + } + + slots := luaInstance.NewTable() + for _, slot := range e.Slots { + slots.Append(lua.LNumber(slot)) + } + + f := luaInstance.GetGlobal("filter") + luaInstance.Push(f) + luaInstance.Push(lua.LNumber(e.Id)) // id + luaInstance.Push(lua.LBool(e.IsBase)) // is_base + luaInstance.Push(lua.LString(e.Group)) // group + luaInstance.Push(lua.LString(e.CmdName)) // cmd name + luaInstance.Push(keys) // keys + luaInstance.Push(slots) // slots + luaInstance.Push(lua.LNumber(e.DbId)) // dbid + luaInstance.Push(lua.LNumber(e.TimestampMs)) // timestamp_ms + + luaInstance.Call(8, 2) + + code := int(luaInstance.Get(1).(lua.LNumber)) + e.DbId = int(luaInstance.Get(2).(lua.LNumber)) + luaInstance.Pop(2) + return code +} diff --git a/tools/codis2pika/internal/log/func.go b/tools/codis2pika/internal/log/func.go new file mode 100644 index 0000000000..e3e246dbf7 --- /dev/null +++ b/tools/codis2pika/internal/log/func.go @@ -0,0 +1,38 @@ +package log + +import ( + "fmt" + + "github.com/rs/zerolog" +) + +func Assert(condition bool, msg string) { + if !condition { + Panicf("Assert failed: %s", msg) + } +} + +func Debugf(format string, args ...interface{}) { + logFinally(logger.Debug(), format, args...) +} + +func Infof(format string, args ...interface{}) { + logFinally(logger.Info(), format, args...) +} + +func Warnf(format string, args ...interface{}) { + logFinally(logger.Warn(), format, args...) +} + +func Panicf(format string, args ...interface{}) { + logFinally(logger.Panic(), format, args...) +} + +func PanicError(err error) { + Panicf(err.Error()) +} + +func logFinally(event *zerolog.Event, format string, args ...interface{}) { + str := fmt.Sprintf(format, args...) + event.Msg(str) +} diff --git a/tools/codis2pika/internal/log/init.go b/tools/codis2pika/internal/log/init.go new file mode 100644 index 0000000000..4231b8c5fa --- /dev/null +++ b/tools/codis2pika/internal/log/init.go @@ -0,0 +1,36 @@ +package log + +import ( + "fmt" + "os" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/config" + + "github.com/rs/zerolog" +) + +var logger zerolog.Logger + +func Init() { + + // log level + switch config.Config.Advanced.LogLevel { + case "debug": + zerolog.SetGlobalLevel(zerolog.DebugLevel) + case "info": + zerolog.SetGlobalLevel(zerolog.InfoLevel) + case "warn": + zerolog.SetGlobalLevel(zerolog.WarnLevel) + default: + panic(fmt.Sprintf("unknown log level: %s", config.Config.Advanced.LogLevel)) + } + + // log file + consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: "2006-01-02 15:04:05"} + fileWriter, err := os.OpenFile(config.Config.Advanced.LogFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + if err != nil { + panic(fmt.Sprintf("open log file failed: %s", err)) + } + multi := zerolog.MultiLevelWriter(consoleWriter, fileWriter) + logger = zerolog.New(multi).With().Timestamp().Logger() +} diff --git a/tools/codis2pika/internal/rdb/rdb.go b/tools/codis2pika/internal/rdb/rdb.go new file mode 100644 index 0000000000..32c6a3bd12 --- /dev/null +++ b/tools/codis2pika/internal/rdb/rdb.go @@ -0,0 +1,507 @@ +package rdb + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + "unsafe" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/config" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/entry" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/rdb/structure" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/rdb/types" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/statistics" + + // "github.com/hdt3213/rdb/model" + "github.com/hdt3213/rdb/parser" +) + +type Loader struct { + replStreamDbId int + nowDBId int + expireAt uint64 + idle int64 + freq int64 + + filPath string + fp *os.File + + ch chan *entry.Entry +} + +func NewLoader(filPath string, ch chan *entry.Entry) *Loader { + ld := new(Loader) + ld.ch = ch + ld.filPath = filPath + return ld +} + +// {"db":0,"key":"string","size":10,"type":"string","value":"aaaaaaa"}, +type KeyReadFromJson struct { + Bb int `json:"db"` + Key string `json:"key"` + Size uint64 `json:"size"` + Keytype string `json:"type"` + Value linectx `json:"value"` + Values linectx `json:"values"` + Members linectx `json:"members"` + Hash linectx `json:"hash"` + Entries linectx `json:"entries"` + Other interface{} +} + +// 解析rdb文件,此处zset暂时默认db=0 +func (ld *Loader) NewParseRDB() int { + var err error + ld.fp, err = os.OpenFile(ld.filPath, os.O_RDONLY, 0666) + if err != nil { + log.Panicf("open file failed. file_path=[%s], error=[%s]", ld.filPath, err) + } + defer func() { + err = ld.fp.Close() + if err != nil { + log.Panicf("close file failed. file_path=[%s], error=[%s]", ld.filPath, err) + } + }() + rd := bufio.NewReader(ld.fp) + + //magic + version + buf := make([]byte, 9) + _, err = io.ReadFull(rd, buf) + if err != nil { + log.PanicError(err) + } + if !bytes.Equal(buf[:5], []byte("REDIS")) { + log.Panicf("verify magic string, invalid file format. bytes=[%v]", buf[:5]) + } + version, err := strconv.Atoi(string(buf[5:])) + if err != nil { + log.PanicError(err) + } + log.Infof("RDB version: %d", version) + + rdbFile, err := os.Open(ld.filPath) + if err != nil { + panic("open dump.rdb failed") + } + defer func() { + _ = rdbFile.Close() + }() + + // for stat + UpdateRDBSentSize := func() { + offset, err := rdbFile.Seek(0, io.SeekCurrent) + if err != nil { + log.PanicError(err) + } + statistics.UpdateRDBSentSize(offset) + } + defer UpdateRDBSentSize() + + decoder := parser.NewDecoder(rdbFile) + log.Infof("start parseRDBEntry") + + err = decoder.Parse(func(o parser.RedisObject) bool { + + var lines []byte + var setlines []string + var jline KeyReadFromJson + + typeByte := o.GetType() + expireAt := o.GetExpiration() + if expireAt != nil { + nowtime := time.Now() + //对已经过期或快过期的key,设置过期缓存3s + if !nowtime.Before(expireAt.Add(-3 * time.Second)) { + ld.expireAt = uint64(6) + } + if nowtime.Equal(expireAt.Add(-3 * time.Second)) { + ld.expireAt = uint64(6) + } + ld.expireAt = uint64(expireAt.Sub(nowtime).Seconds()) + //对于超时计算异常的key,过期时间往往特别巨大,此时特殊处理保留一段时间 + if ld.expireAt > uint64(99999999) { + log.Infof("Anomalous ttl of key: ", o.GetKey(), " ", ld.expireAt) + ld.expireAt = uint64(1000000) + } + + } + jsize := o.GetSize() + + jkey := o.GetKey() + if uint64(jsize) > config.Config.Advanced.TargetRedisProtoMaxBulkLen { + fmt.Println("bigkey not supported") + } + + keytype := GetStringToBytes(typeByte) + + switch typeByte { + case parser.StringType: + str := o.(*parser.StringObject) + lines = str.Value + o := types.ParseObject(bytes.NewReader(lines), keytype, o.GetKey()) + cmds := o.Rewrite() + for _, cmd := range cmds { + ld.cmd2channel(cmd) + } + case parser.SetType: + set := o.(*parser.SetObject) + for _, v := range set.Members { + setlines = append(setlines, string(v)) + } + var newo = new(types.SetObject) + newo.Elements = setlines + newo.Key = jkey + newo.AddSet(setlines) + + cmds := newo.Rewrite() + for _, cmd := range cmds { + ld.cmd2channel(cmd) + } + case parser.ListType: + list := o.(*parser.ListObject) + for _, v := range list.Values { + setlines = append(setlines, string(v)) + } + var newo = new(types.ListObject) + newo.Elements = setlines + newo.Key = jkey + newo.AddList(setlines) + + cmds := newo.Rewrite() + for _, cmd := range cmds { + + ld.cmd2channel(cmd) + } + case parser.HashType: + hash := o.(*parser.HashObject) + lines, err = hash.MarshalJSON() + err = json.Unmarshal(lines, &jline) + if err != nil { + log.Warnf("ParseRDB get unmarshal hash values error :", err.Error()) + } + keyvalue := GetValue(jline, typeByte).string() + anotherReader := io.Reader(bytes.NewBufferString(keyvalue)) + o := types.ParseObject(anotherReader, keytype, jkey) + cmds := o.Rewrite() + for _, cmd := range cmds { + ld.cmd2channel(cmd) + } + case parser.ZSetType: + zset := o.(*parser.ZSetObject) + var n []types.ZSetEntry + for _, zentry := range zset.Entries { + newz := types.ZSetEntry{ + Member: zentry.Member, + Score: strconv.Itoa(int(zentry.Score)), + } + n = append(n, newz) + } + var newo = new(types.ZsetObject) + newo.Key = o.GetKey() + newo.Elements = n + cmds := newo.Rewrite() + for _, cmd := range cmds { + ld.cmd2channel(cmd) + } + } + + if expireAt != nil { + var expirycmd types.RedisCmd + expirycmd = append(expirycmd, "EXPIRE", jkey, fmt.Sprint(ld.expireAt)) + ld.cmd2channel(expirycmd) + } + + ld.expireAt = 0 + ld.idle = 0 + ld.freq = 0 + + // return true to continue, return false to stop the iteration + return true + }) + if err != nil { + panic(err) + } + log.Infof("finish parseRDBEntry") + return ld.replStreamDbId +} + +func (ld *Loader) cmd2channel(cmd types.RedisCmd) { + e := entry.NewEntry() + e.IsBase = true + e.DbId = ld.nowDBId + if cmd == nil { + log.Warnf("the cmd is nil.") + } else { + e.Argv = cmd + ld.ch <- e + } + +} + +type TmpZSetEntry struct { + Member string + Score int +} + +type linectx struct { + cplx []string + smpl string + hash map[string]string + zset []types.ZSetEntry +} + +func (n *linectx) UnmarshalJSON(text []byte) error { + var readzset []types.ZSetEntry + var tmpzset []types.ZSetEntry + t := strings.TrimSpace(string(text)) + if strings.HasPrefix(t, "[") { + + err := json.Unmarshal(text, &n.cplx) + if err != nil { + + err = json.Unmarshal(text, &n.hash) + if err != nil { + + err := json.Unmarshal(text, &tmpzset) + + for _, z := range tmpzset { + newz := types.ZSetEntry{ + Member: z.Member, + Score: z.Score, + } + readzset = append(readzset, newz) + } + n.zset = readzset + + return err + } + } + } + + err := json.Unmarshal(text, &n.smpl) + if err != nil { + + err = json.Unmarshal(text, &n.hash) + if err != nil { + + err := json.Unmarshal(text, &tmpzset) + + for _, z := range tmpzset { + newz := types.ZSetEntry{ + Member: z.Member, + Score: z.Score, + } + readzset = append(readzset, newz) + } + n.zset = readzset + + return err + } + } + return err +} + +// 根据key的类型,返回执行的set命令 +func SetCommandofThis(keytype string) string { + switch keytype { + case types.StringType: + return "SET" + case types.HashType: + return "HSET" + case types.ListType: + return "LSET" + case types.ZSetType: + return "ZADD" + case types.SetType: + return "SADD" + } + log.Panicf("unknown type byte: %s", keytype) + return "SET" +} + +const ( + rdbTypeString = 0 // RDB_TYPE_STRING + rdbTypeList = 1 + rdbTypeSet = 2 + rdbTypeZSet = 3 + rdbTypeHash = 4 // RDB_TYPE_HASH + rdbTypeZSet2 = 5 // ZSET version 2 with doubles stored in binary. + rdbTypeModule = 6 // RDB_TYPE_MODULE + rdbTypeModule2 = 7 // RDB_TYPE_MODULE2 Module value with annotations for parsing without the generating module being loaded. + + // Object types for encoded objects. + + rdbTypeHashZipmap = 9 + rdbTypeListZiplist = 10 + rdbTypeSetIntset = 11 + rdbTypeZSetZiplist = 12 + rdbTypeHashZiplist = 13 + rdbTypeListQuicklist = 14 // RDB_TYPE_LIST_QUICKLIST + rdbTypeStreamListpacks = 15 // RDB_TYPE_STREAM_LISTPACKS + rdbTypeHashListpack = 16 // RDB_TYPE_HASH_ZIPLIST + rdbTypeZSetListpack = 17 // RDB_TYPE_ZSET_LISTPACK + rdbTypeListQuicklist2 = 18 // RDB_TYPE_LIST_QUICKLIST_2 https://github.com/redis/redis/pull/9357 + rdbTypeStreamListpacks2 = 19 // RDB_TYPE_STREAM_LISTPACKS2 + + moduleTypeNameCharSet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" +) + +// 根据类型与原始的value string,解析对象,返回值 +func ParseJsonObject(rd io.Reader, typeByte byte, key string) interface{} { + switch typeByte { + case rdbTypeString: // string + o := new(types.StringObject) + o.LoadFromBuffer(rd, key, typeByte) + return o + case rdbTypeList, rdbTypeListZiplist, rdbTypeListQuicklist, rdbTypeListQuicklist2: // list + o := new(types.ListObject) + o.LoadFromBuffer(rd, key, typeByte) + return o + case rdbTypeSet, rdbTypeSetIntset: // set + o := new(types.SetObject) + o.LoadFromBuffer(rd, key, typeByte) + return o + case rdbTypeZSet, rdbTypeZSet2, rdbTypeZSetZiplist, rdbTypeZSetListpack: // zset + o := new(types.ZsetObject) + o.LoadFromBuffer(rd, key, typeByte) + return o + case rdbTypeHash, rdbTypeHashZipmap, rdbTypeHashZiplist, rdbTypeHashListpack: // hash + o := new(types.HashObject) + o.LoadFromBuffer(rd, key, typeByte) + return o + case rdbTypeStreamListpacks, rdbTypeStreamListpacks2: // stream + o := new(types.StreamObject) + o.LoadFromBuffer(rd, key, typeByte) + return o + case rdbTypeModule, rdbTypeModule2: // module + if typeByte == rdbTypeModule { + log.Panicf("module type is not supported") + } + moduleId := structure.ReadLength(rd) + moduleName := moduleTypeNameByID(moduleId) + switch moduleName { + case "exhash---": + log.Panicf("exhash module is not supported") + case "exstrtype": + log.Panicf("exstrtype module is not supported") + case "tair-json": + log.Panicf("tair-json module is not supported") + default: + log.Panicf("unknown module type: %s", moduleName) + } + } + log.Panicf("unknown type byte: %d", typeByte) + return nil +} + +func moduleTypeNameByID(moduleId uint64) string { + nameList := make([]byte, 9) + moduleId >>= 10 + for i := 8; i >= 0; i-- { + nameList[i] = moduleTypeNameCharSet[moduleId&63] + moduleId >>= 6 + } + return string(nameList) +} + +func StringToBytes(data string) byte { + return *(*byte)(unsafe.Pointer(&data)) +} + +func StringToBytes2(data string) []byte { + return *(*[]byte)(unsafe.Pointer(&data)) +} + +// 用于适配redis shake的解析方法 +func GetStringToBytes(data string) byte { + switch data { + case types.StringType: + return rdbTypeString + case types.HashType: + return rdbTypeHash + case types.ListType: + return rdbTypeList + case types.ZSetType: + return rdbTypeZSet + case types.SetType: + return rdbTypeSet + } + log.Panicf("unknown type byte: %s,try to set as rdbTypeString", data) + + return rdbTypeString +} + +// 适配不同类型的key,获取值的方式不同 +func GetValue(jline KeyReadFromJson, keytype string) linectx { + switch keytype { + case types.StringType: + return jline.Value + case types.HashType: + jline.Value = jline.Hash + case types.ListType: + jline.Value = jline.Values + case types.ZSetType: + jline.Value = jline.Entries + case types.SetType: + jline.Value = jline.Members + } + return jline.Value +} + +// 将linectx转为string +func (n linectx) string() string { + + if len(n.hash) != 0 { + newb, err := json.Marshal(n.hash) + + if err != nil { + return "linectx structure hash transfor to string error" + } + + return string(newb) + } + + if NotNil(n.cplx) { + return func(nlist []string) string { + // for _, v := range nlist { + // newv = newv + " " + v + // } + newb, err := json.Marshal(n.cplx) + if err != nil { + return err.Error() + } + return string(newb) + }(n.cplx) + } + if n.smpl != "" { + return n.smpl + } + + if len(n.zset) != 0 { + + newb, err := json.Marshal(n.zset) + if err != nil { + return "linectx structure zset transfor to string error" + } + + return string(newb) + } + + return "[error] linectx structure transfor to string error" +} + +func NotNil(ctx []string) bool { + var ctxstring string + for _, v := range ctx { + ctxstring = ctxstring + v + } + return len(ctxstring) > 0 +} diff --git a/tools/codis2pika/internal/rdb/structure/byte.go b/tools/codis2pika/internal/rdb/structure/byte.go new file mode 100644 index 0000000000..333cdf6fec --- /dev/null +++ b/tools/codis2pika/internal/rdb/structure/byte.go @@ -0,0 +1,31 @@ +package structure + +import ( + "io" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" +) + +func ReadByte(rd io.Reader) byte { + b := ReadBytes(rd, 1)[0] + return b +} + +func ReadBytes(rd io.Reader, n int) []byte { + buf := make([]byte, n) + _, err := io.ReadFull(rd, buf) + + if err != nil { + log.PanicError(err) + } + return buf +} +func ReadBytesAll(rd io.Reader, n int) []byte { + + allread, err := io.ReadAll(rd) + + if err != nil { + log.PanicError(err) + } + return allread +} diff --git a/tools/codis2pika/internal/rdb/structure/float.go b/tools/codis2pika/internal/rdb/structure/float.go new file mode 100644 index 0000000000..49735744e9 --- /dev/null +++ b/tools/codis2pika/internal/rdb/structure/float.go @@ -0,0 +1,45 @@ +package structure + +import ( + "encoding/binary" + "io" + "math" + "strconv" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" +) + +func ReadFloat(rd io.Reader) float64 { + u := ReadUint8(rd) + + switch u { + case 253: + return math.NaN() + case 254: + return math.Inf(0) + case 255: + return math.Inf(-1) + default: + buf := make([]byte, u) + _, err := io.ReadFull(rd, buf) + if err != nil { + return 0 + } + + v, err := strconv.ParseFloat(string(buf), 64) + if err != nil { + log.PanicError(err) + } + return v + } +} + +func ReadDouble(rd io.Reader) float64 { + var buf = make([]byte, 8) + _, err := io.ReadFull(rd, buf) + if err != nil { + log.PanicError(err) + } + num := binary.LittleEndian.Uint64(buf) + return math.Float64frombits(num) +} diff --git a/tools/codis2pika/internal/rdb/structure/int.go b/tools/codis2pika/internal/rdb/structure/int.go new file mode 100644 index 0000000000..8ce5f6f063 --- /dev/null +++ b/tools/codis2pika/internal/rdb/structure/int.go @@ -0,0 +1,58 @@ +package structure + +import ( + "encoding/binary" + "io" +) + +func ReadUint8(rd io.Reader) uint8 { + b := ReadByte(rd) + return b +} + +func ReadUint16(rd io.Reader) uint16 { + buf := ReadBytes(rd, 2) + return binary.LittleEndian.Uint16(buf) +} + +func ReadUint24(rd io.Reader) uint32 { + buf := ReadBytes(rd, 3) + buf = append(buf, 0) + return binary.LittleEndian.Uint32(buf) +} + +func ReadUint32(rd io.Reader) uint32 { + buf := ReadBytes(rd, 4) + return binary.LittleEndian.Uint32(buf) +} + +func ReadUint64(rd io.Reader) uint64 { + buf := ReadBytes(rd, 8) + return binary.LittleEndian.Uint64(buf) +} + +func ReadInt8(rd io.Reader) int8 { + b := ReadByte(rd) + return int8(b) +} + +func ReadInt16(rd io.Reader) int16 { + buf := ReadBytes(rd, 2) + return int16(binary.LittleEndian.Uint16(buf)) +} + +func ReadInt24(rd io.Reader) int32 { + buf := ReadBytes(rd, 3) + buf = append([]byte{0}, buf...) + return int32(binary.LittleEndian.Uint32(buf)) >> 8 +} + +func ReadInt32(rd io.Reader) int32 { + buf := ReadBytes(rd, 4) + return int32(binary.LittleEndian.Uint32(buf)) +} + +func ReadInt64(rd io.Reader) int64 { + buf := ReadBytes(rd, 8) + return int64(binary.LittleEndian.Uint64(buf)) +} diff --git a/tools/codis2pika/internal/rdb/structure/intset.go b/tools/codis2pika/internal/rdb/structure/intset.go new file mode 100644 index 0000000000..a829cd6090 --- /dev/null +++ b/tools/codis2pika/internal/rdb/structure/intset.go @@ -0,0 +1,32 @@ +package structure + +import ( + "bufio" + "encoding/binary" + "io" + "strconv" + "strings" +) + +func ReadIntset(rd io.Reader) []string { + rd = bufio.NewReader(strings.NewReader(ReadString(rd))) + + encodingType := int(ReadUint32(rd)) + size := int(ReadUint32(rd)) + elements := make([]string, size) + + for i := 0; i < size; i++ { + intBytes := ReadBytes(rd, encodingType) + var intString string + switch encodingType { + case 2: + intString = strconv.FormatInt(int64(int16(binary.LittleEndian.Uint16(intBytes))), 10) + case 4: + intString = strconv.FormatInt(int64(int32(binary.LittleEndian.Uint32(intBytes))), 10) + case 8: + intString = strconv.FormatInt(int64(int64(binary.LittleEndian.Uint64(intBytes))), 10) + } + elements[i] = intString + } + return elements +} diff --git a/tools/codis2pika/internal/rdb/structure/length.go b/tools/codis2pika/internal/rdb/structure/length.go new file mode 100644 index 0000000000..6cc3bb756b --- /dev/null +++ b/tools/codis2pika/internal/rdb/structure/length.go @@ -0,0 +1,68 @@ +package structure + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" +) + +const ( + RDB6ByteLen = 0 // RDB_6BITLEN + RDB14ByteLen = 1 // RDB_14BITLEN + len32or64Bit = 2 + lenSpecial = 3 // RDB_ENCVAL + RDB32ByteLen = 0x80 + RDB64ByteLen = 0x81 +) + +func ReadLength(rd io.Reader) uint64 { + length, special, err := readEncodedLength(rd) + if special { + log.Panicf("illegal length special=true, encoding: %d", length) + } + if err != nil { + log.PanicError(err) + } + return length +} + +func readEncodedLength(rd io.Reader) (length uint64, special bool, err error) { + var lengthBuffer = make([]byte, 8) + // 由于codis版本固定,这里直接写死了first2bits=0 + //firstByte := ReadByte(rd) + //first2bits := (firstByte & 0xc0) >> 6 // first 2 bits of encoding + var firstByte = 0 + var first2bits = 0 + + switch first2bits { + case RDB6ByteLen: + length = uint64(firstByte) & 0x3f + case RDB14ByteLen: + nextByte := ReadByte(rd) + length = (uint64(firstByte)&0x3f)<<8 | uint64(nextByte) + case len32or64Bit: + if firstByte == RDB32ByteLen { + _, err = io.ReadFull(rd, lengthBuffer[0:4]) + if err != nil { + return 0, false, fmt.Errorf("read len32Bit failed: %s", err.Error()) + } + length = uint64(binary.BigEndian.Uint32(lengthBuffer)) + } else if firstByte == RDB64ByteLen { + _, err = io.ReadFull(rd, lengthBuffer) + if err != nil { + return 0, false, fmt.Errorf("read len64Bit failed: %s", err.Error()) + } + length = binary.BigEndian.Uint64(lengthBuffer) + } else { + return 0, false, fmt.Errorf("illegal length encoding: %x", firstByte) + } + case lenSpecial: + special = true + length = uint64(firstByte) & 0x3f + } + //debug info + + return length, special, nil +} diff --git a/tools/codis2pika/internal/rdb/structure/listpack.go b/tools/codis2pika/internal/rdb/structure/listpack.go new file mode 100644 index 0000000000..3a3069b12e --- /dev/null +++ b/tools/codis2pika/internal/rdb/structure/listpack.go @@ -0,0 +1,167 @@ +package structure + +import ( + "bufio" + "io" + "math" + "strconv" + "strings" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" +) + +const ( + lpEncoding7BitUintMask = 0x80 // 10000000 LP_ENCODING_7BIT_UINT_MASK + lpEncoding7BitUint = 0x00 // 00000000 LP_ENCODING_7BIT_UINT + + lpEncoding6BitStrMask = 0xC0 // 11000000 LP_ENCODING_6BIT_STR_MASK + lpEncoding6BitStr = 0x80 // 10000000 LP_ENCODING_6BIT_STR + + lpEncoding13BitIntMask = 0xE0 // 11100000 LP_ENCODING_13BIT_INT_MASK + lpEncoding13BitInt = 0xC0 // 11000000 LP_ENCODING_13BIT_INT + + lpEncoding12BitStrMask = 0xF0 // 11110000 LP_ENCODING_12BIT_STR_MASK + lpEncoding12BitStr = 0xE0 // 11100000 LP_ENCODING_12BIT_STR + + lpEncoding16BitIntMask = 0xFF // 11111111 LP_ENCODING_16BIT_INT_MASK + lpEncoding16BitInt = 0xF1 // 11110001 LP_ENCODING_16BIT_INT + + lpEncoding24BitIntMask = 0xFF // 11111111 LP_ENCODING_24BIT_INT_MASK + lpEncoding24BitInt = 0xF2 // 11110010 LP_ENCODING_24BIT_INT + + lpEncoding32BitIntMask = 0xFF // 11111111 LP_ENCODING_32BIT_INT_MASK + lpEncoding32BitInt = 0xF3 // 11110011 LP_ENCODING_32BIT_INT + + lpEncoding64BitIntMask = 0xFF // 11111111 LP_ENCODING_64BIT_INT_MASK + lpEncoding64BitInt = 0xF4 // 11110100 LP_ENCODING_64BIT_INT + + lpEncoding32BitStrMask = 0xFF // 11111111 LP_ENCODING_32BIT_STR_MASK + lpEncoding32BitStr = 0xF0 // 11110000 LP_ENCODING_32BIT_STR +) + +func ReadListpack(rd io.Reader) []string { + rd = bufio.NewReader(strings.NewReader(ReadString(rd))) + + _ = ReadUint32(rd) // bytes + size := int(ReadUint16(rd)) + var elements []string + for i := 0; i < size; i++ { + ele := readListpackEntry(rd) + elements = append(elements, ele) + } + lastByte := ReadByte(rd) + if lastByte != 0xFF { + log.Panicf("ReadListpack: last byte is not 0xFF, but [%d]", lastByte) + } + return elements +} + +// redis/src/Listpack.c lpGet() +func readListpackEntry(rd io.Reader) string { + var val int64 + var uval, negstart, negmax uint64 + fireByte := ReadByte(rd) + if (fireByte & lpEncoding7BitUintMask) == lpEncoding7BitUint { // 7bit uint + + uval = uint64(fireByte & 0x7f) // 0x7f is 01111111 + negmax = 0 + negstart = math.MaxUint64 // uint + _ = ReadBytes(rd, lpEncodeBacklen(1)) // encode: 1 byte + + } else if (fireByte & lpEncoding6BitStrMask) == lpEncoding6BitStr { // 6bit length str + + length := int(fireByte & 0x3f) // 0x3f is 00111111 + ele := string(ReadBytes(rd, length)) + _ = ReadBytes(rd, lpEncodeBacklen(1+length)) // encode: 1byte, str: length + return ele + + } else if (fireByte & lpEncoding13BitIntMask) == lpEncoding13BitInt { // 13bit int + + secondByte := ReadByte(rd) + uval = (uint64(fireByte&0x1f) << 8) + uint64(secondByte) // 5bit + 8bit, 0x1f is 00011111 + negstart = uint64(1) << 12 + negmax = 8191 // uint13_max + _ = ReadBytes(rd, lpEncodeBacklen(2)) + + } else if (fireByte & lpEncoding16BitIntMask) == lpEncoding16BitInt { // 16bit int + + uval = uint64(ReadUint16(rd)) + negstart = uint64(1) << 15 + negmax = 65535 // uint16_max + _ = ReadBytes(rd, lpEncodeBacklen(2)) // encode: 1byte, int: 2byte + + } else if (fireByte & lpEncoding24BitIntMask) == lpEncoding24BitInt { // 24bit int + + uval = uint64(ReadUint24(rd)) + negstart = uint64(1) << 23 + negmax = math.MaxUint32 >> 8 // uint24_max + _ = ReadBytes(rd, lpEncodeBacklen(1+3)) // encode: 1byte, int: 3byte + + } else if (fireByte & lpEncoding32BitIntMask) == lpEncoding32BitInt { // 32bit int + + uval = uint64(ReadUint32(rd)) + negstart = uint64(1) << 31 + negmax = math.MaxUint32 // uint32_max + _ = ReadBytes(rd, lpEncodeBacklen(1+4)) // encode: 1byte, int: 4byte + + } else if (fireByte & lpEncoding64BitIntMask) == lpEncoding64BitInt { // 64bit int + + uval = ReadUint64(rd) + negstart = uint64(1) << 63 + negmax = math.MaxUint64 // uint64_max + _ = ReadBytes(rd, lpEncodeBacklen(1+8)) // encode: 1byte, int: 8byte + + } else if (fireByte & lpEncoding12BitStrMask) == lpEncoding12BitStr { // 12bit length str + + secondByte := ReadByte(rd) + length := (int(fireByte&0x0f) << 8) + int(secondByte) // 4bit + 8bit + ele := string(ReadBytes(rd, length)) + _ = ReadBytes(rd, lpEncodeBacklen(2+length)) // encode: 2byte, str: length + return ele + + } else if (fireByte & lpEncoding32BitStrMask) == lpEncoding32BitStr { // 32bit length str + + length := int(ReadUint32(rd)) + ele := string(ReadBytes(rd, length)) + _ = ReadBytes(rd, lpEncodeBacklen(5+length)) // encode: 1byte, length: 4byte, str: length + return ele + + } else { + // redis use this value, don't know why + // uval = 12345678900000000 + uint64(fireByte) + // negstart = math.MaxUint64 + // negmax = 0 + log.Panicf("unknown encoding: %x", fireByte) + } + + /* We reach this code path only for integer encodings. + * Convert the unsigned value to the signed one using two's complement + * rule. */ + if uval >= negstart { + /* This three steps conversion should avoid undefined behaviors + * in the unsigned -> signed conversion. */ + + uval = negmax - uval + val = int64(uval) + val = -val - 1 + } else { + val = int64(uval) + } + + return strconv.FormatInt(val, 10) +} + +/* the function just returns the length(byte) of `backlen`. */ +func lpEncodeBacklen(len int) int { + if len <= 127 { + return 1 + } else if len < 16383 { + return 2 + } else if len < 2097151 { + return 3 + } else if len < 268435455 { + return 4 + } else { + return 5 + } +} diff --git a/tools/codis2pika/internal/rdb/structure/string.go b/tools/codis2pika/internal/rdb/structure/string.go new file mode 100644 index 0000000000..bb5d11c9ca --- /dev/null +++ b/tools/codis2pika/internal/rdb/structure/string.go @@ -0,0 +1,73 @@ +package structure + +import ( + "io" + "strconv" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" +) + +const ( + RDBEncInt8 = 0 // RDB_ENC_INT8 + RDBEncInt16 = 1 // RDB_ENC_INT16 + RDBEncInt32 = 2 // RDB_ENC_INT32 + RDBEncLZF = 3 // RDB_ENC_LZF +) + +func ReadString(rd io.Reader) string { + length, special, err := readEncodedLength(rd) + + if err != nil { + log.PanicError(err) + } + if special { + switch length { + case RDBEncInt8: + b := ReadInt8(rd) + return strconv.Itoa(int(b)) + case RDBEncInt16: + b := ReadInt16(rd) + return strconv.Itoa(int(b)) + case RDBEncInt32: + b := ReadInt32(rd) + return strconv.Itoa(int(b)) + default: + log.Panicf("Unknown string encode type %d", length) + } + } + return string(ReadBytesAll(rd, int(length))) +} + +func lzfDecompress(in []byte, outLen int) string { + out := make([]byte, outLen) + + i, o := 0, 0 + for i < len(in) { + ctrl := int(in[i]) + i++ + if ctrl < 32 { + for x := 0; x <= ctrl; x++ { + out[o] = in[i] + i++ + o++ + } + } else { + length := ctrl >> 5 + if length == 7 { + length = length + int(in[i]) + i++ + } + ref := o - ((ctrl & 0x1f) << 8) - int(in[i]) - 1 + i++ + for x := 0; x <= length+1; x++ { + out[o] = out[ref] + ref++ + o++ + } + } + } + if o != outLen { + log.Panicf("lzf decompress failed: outLen: %d, o: %d", outLen, o) + } + return string(out) +} diff --git a/tools/codis2pika/internal/rdb/structure/ziplist.go b/tools/codis2pika/internal/rdb/structure/ziplist.go new file mode 100644 index 0000000000..edc3d5316e --- /dev/null +++ b/tools/codis2pika/internal/rdb/structure/ziplist.go @@ -0,0 +1,110 @@ +package structure + +import ( + "bufio" + "io" + "strconv" + "strings" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" +) + +const ( + zipStr06B = 0x00 // 0000 ZIP_STR_06B + zipStr14B = 0x01 // 0001 + zipStr32B = 0x02 // 0010 + + zipInt04B = 0x0f // high 4 bits of Int 04 encoding + + zipInt08B = 0xfe // 11111110 + zipInt16B = 0xc0 // 11000000 + zipInt24B = 0xf0 // 11110000 + zipInt32B = 0xd0 // 11010000 + zipInt64B = 0xe0 // 11100000 +) + +func ReadZipList(rd io.Reader) []string { + rd = bufio.NewReader(strings.NewReader(ReadString(rd))) + + // The general layout of the ziplist is as follows: + // ... + _ = ReadUint32(rd) // zlbytes + _ = ReadUint32(rd) // zltail + + size := int(ReadUint16(rd)) + var elements []string + if size == 65535 { // 2^16-1, we need to traverse the entire list to know how many items it holds. + for firstByte := ReadByte(rd); firstByte != 0xFE; firstByte = ReadByte(rd) { + ele := readZipListEntry(rd, firstByte) + elements = append(elements, ele) + } + } else { + for i := 0; i < size; i++ { + firstByte := ReadByte(rd) + ele := readZipListEntry(rd, firstByte) + elements = append(elements, ele) + } + if lastByte := ReadByte(rd); lastByte != 0xFF { + log.Panicf("invalid zipList lastByte encoding: %d", lastByte) + } + } + return elements +} + +/* + * So practically an entry is encoded in the following way: + * + * + * + * Or alternatively if the previous entry length is greater than 253 bytes + * the following encoding is used: + * + * 0xFE <4 bytes unsigned little endian prevlen> + */ +func readZipListEntry(rd io.Reader, firstByte byte) string { + // read prevlen + if firstByte == 0xFE { + _ = ReadUint32(rd) // read 4 bytes prevlen + } + firstByte = 0 + var first2bits = 0 + switch first2bits { + case zipStr06B: + length := int(firstByte & 0x3f) // 0x3f = 00111111 + return string(ReadBytesAll(rd, length)) + case zipStr14B: + length := (int(firstByte&0x3f) << 8) + return string(ReadBytesAll(rd, length)) + case zipStr32B: + log.Panicf("Not support zipStr32B ", zipStr32B) + length := (int(firstByte&0x3f) << 8) + return string(ReadBytesAll(rd, int(length))) + } + switch firstByte { + case zipInt08B: + v := ReadInt8(rd) + return strconv.FormatInt(int64(v), 10) + case zipInt16B: + v := ReadInt16(rd) + return strconv.FormatInt(int64(v), 10) + case zipInt24B: + v := ReadInt24(rd) + return strconv.FormatInt(int64(v), 10) + case zipInt32B: + v := ReadInt32(rd) + return strconv.FormatInt(int64(v), 10) + case zipInt64B: + v := ReadInt64(rd) + return strconv.FormatInt(v, 10) + } + if (firstByte >> 4) == zipInt04B { + v := int64(firstByte & 0x0f) // 0x0f = 00001111 + v = v - 1 // 1-13 -> 0-12 + if v < 0 || v > 12 { + log.Panicf("invalid zipInt04B encoding: %d", v) + } + return strconv.FormatInt(v, 10) + } + log.Panicf("invalid encoding: %d", firstByte) + return "" +} diff --git a/tools/codis2pika/internal/rdb/types/hash.go b/tools/codis2pika/internal/rdb/types/hash.go new file mode 100644 index 0000000000..f3c74692a8 --- /dev/null +++ b/tools/codis2pika/internal/rdb/types/hash.go @@ -0,0 +1,103 @@ +package types + +import ( + "encoding/json" + "io" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/rdb/structure" +) + +type HashObject struct { + key string + value map[string]string +} + +func (o *HashObject) LoadFromBuffer(rd io.Reader, key string, typeByte byte) { + o.key = key + o.value = make(map[string]string) + switch typeByte { + case rdbTypeHash: + o.readHash(rd) + case rdbTypeHashZipmap: + o.readHashZipmap(rd) + case rdbTypeHashZiplist: + o.readHashZiplist(rd) + case rdbTypeHashListpack: + o.readHashListpack(rd) + default: + log.Panicf("unknown hash type. typeByte=[%d]", typeByte) + } +} + +type HashMember struct { + elements map[string]string +} + +func (n *HashMember) UnmarshalJSON(text []byte) error { + stringtext := string(text) + err := json.Unmarshal([]byte(stringtext), &n.elements) + + return err +} + +func (o *HashObject) readHash(rd io.Reader) { + var oline HashMember + + ele, err := io.ReadAll(rd) + if err != nil { + log.Panicf("readHash err: ", err, " when ", &oline) + } + err = json.Unmarshal(ele, &oline) + if err != nil { + log.Warnf("hash.go readHash: ", &oline, " may err:", err) + } + + o.value = oline.elements +} + +func (o *HashObject) readHashZipmap(rd io.Reader) { + log.Panicf("not implemented rdbTypeZipmap") +} + +func (o *HashObject) readHashZiplist(rd io.Reader) { + list := structure.ReadZipList(rd) + size := len(list) + for i := 0; i < size; i += 2 { + key := list[i] + value := list[i+1] + o.value[key] = value + } +} + +func (o *HashObject) readHashListpack(rd io.Reader) { + list := structure.ReadListpack(rd) + size := len(list) + for i := 0; i < size; i += 2 { + key := list[i] + value := list[i+1] + o.value[key] = value + } +} + +func (o *HashObject) Rewrite() []RedisCmd { + // var cmds []RedisCmd + cmds := make([]RedisCmd, 0) + if len(o.value) > 1 { + var cmd RedisCmd + cmd = append(cmd, "hmset", o.key) + + for k, v := range o.value { + cmd = append(cmd, k, v) + } + cmds = append(cmds, cmd) + return cmds + } else { + var cmds []RedisCmd + for k, v := range o.value { + cmd := RedisCmd{"hset", o.key, k, v} + cmds = append(cmds, cmd) + } + return cmds + } +} diff --git a/tools/codis2pika/internal/rdb/types/interface.go b/tools/codis2pika/internal/rdb/types/interface.go new file mode 100644 index 0000000000..50721a6163 --- /dev/null +++ b/tools/codis2pika/internal/rdb/types/interface.go @@ -0,0 +1,117 @@ +package types + +import ( + "io" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/rdb/structure" +) + +const ( + // StringType is redis string + StringType = "string" + // ListType is redis list + ListType = "list" + // SetType is redis set + SetType = "set" + // HashType is redis hash + HashType = "hash" + // ZSetType is redis sorted set + ZSetType = "zset" + // AuxType is redis metadata key-value pair + AuxType = "aux" + // DBSizeType is for _OPCODE_RESIZEDB + DBSizeType = "dbsize" +) + +const ( + rdbTypeString = 0 // RDB_TYPE_STRING + rdbTypeList = 1 + rdbTypeSet = 2 + rdbTypeZSet = 3 + rdbTypeHash = 4 // RDB_TYPE_HASH + rdbTypeZSet2 = 5 // ZSET version 2 with doubles stored in binary. + rdbTypeModule = 6 // RDB_TYPE_MODULE + rdbTypeModule2 = 7 // RDB_TYPE_MODULE2 Module value with annotations for parsing without the generating module being loaded. + + // Object types for encoded objects. + + rdbTypeHashZipmap = 9 + rdbTypeListZiplist = 10 + rdbTypeSetIntset = 11 + rdbTypeZSetZiplist = 12 + rdbTypeHashZiplist = 13 + rdbTypeListQuicklist = 14 // RDB_TYPE_LIST_QUICKLIST + rdbTypeStreamListpacks = 15 // RDB_TYPE_STREAM_LISTPACKS + rdbTypeHashListpack = 16 // RDB_TYPE_HASH_ZIPLIST + rdbTypeZSetListpack = 17 // RDB_TYPE_ZSET_LISTPACK + rdbTypeListQuicklist2 = 18 // RDB_TYPE_LIST_QUICKLIST_2 https://github.com/redis/redis/pull/9357 + rdbTypeStreamListpacks2 = 19 // RDB_TYPE_STREAM_LISTPACKS2 + + moduleTypeNameCharSet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" +) + +type RedisCmd []string + +// RedisObject is interface for a redis object +type RedisObject interface { + LoadFromBuffer(rd io.Reader, key string, typeByte byte) + Rewrite() []RedisCmd // TODO big key +} + +func ParseObject(rd io.Reader, typeByte byte, key string) RedisObject { + switch typeByte { + case rdbTypeString: // string + o := new(StringObject) + o.LoadFromBuffer(rd, key, typeByte) + return o + case rdbTypeList, rdbTypeListZiplist, rdbTypeListQuicklist, rdbTypeListQuicklist2: // list + o := new(ListObject) + o.LoadFromBuffer(rd, key, typeByte) + return o + case rdbTypeSet, rdbTypeSetIntset: // set + o := new(SetObject) + o.LoadFromBuffer(rd, key, typeByte) + return o + case rdbTypeZSet, rdbTypeZSet2, rdbTypeZSetZiplist, rdbTypeZSetListpack: // zset + o := new(ZsetObject) + o.LoadFromBuffer(rd, key, typeByte) + return o + case rdbTypeHash, rdbTypeHashZipmap, rdbTypeHashZiplist, rdbTypeHashListpack: // hash + o := new(HashObject) + o.LoadFromBuffer(rd, key, typeByte) + return o + case rdbTypeStreamListpacks, rdbTypeStreamListpacks2: // stream + o := new(StreamObject) + o.LoadFromBuffer(rd, key, typeByte) + return o + case rdbTypeModule, rdbTypeModule2: // module + if typeByte == rdbTypeModule { + log.Panicf("module type is not supported") + } + moduleId := structure.ReadLength(rd) + moduleName := moduleTypeNameByID(moduleId) + switch moduleName { + case "exhash---": + log.Panicf("exhash module is not supported") + case "exstrtype": + log.Panicf("exstrtype module is not supported") + case "tair-json": + log.Panicf("tair-json module is not supported") + default: + log.Panicf("unknown module type: %s", moduleName) + } + } + log.Panicf("unknown type byte: %d", typeByte) + return nil +} + +func moduleTypeNameByID(moduleId uint64) string { + nameList := make([]byte, 9) + moduleId >>= 10 + for i := 8; i >= 0; i-- { + nameList[i] = moduleTypeNameCharSet[moduleId&63] + moduleId >>= 6 + } + return string(nameList) +} diff --git a/tools/codis2pika/internal/rdb/types/list.go b/tools/codis2pika/internal/rdb/types/list.go new file mode 100644 index 0000000000..af4d77507f --- /dev/null +++ b/tools/codis2pika/internal/rdb/types/list.go @@ -0,0 +1,125 @@ +package types + +import ( + "encoding/json" + "io" + "strconv" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/rdb/structure" +) + +// quicklist node container formats +const ( + quicklistNodeContainerPlain = 1 // QUICKLIST_NODE_CONTAINER_PLAIN + quicklistNodeContainerPacked = 2 // QUICKLIST_NODE_CONTAINER_PACKED +) + +type ListObject struct { + Key string + + Elements []string +} + +func (o *ListObject) LoadFromBuffer(rd io.Reader, Key string, typeByte byte) { + o.Key = Key + + switch typeByte { + case rdbTypeList: + o.readList(rd) + case rdbTypeListZiplist: + o.Elements = structure.ReadZipList(rd) + case rdbTypeListQuicklist: + o.readQuickList(rd) + case rdbTypeListQuicklist2: + o.readQuickList2(rd) + default: + log.Panicf("unknown list type %d", typeByte) + } +} + +func (o *ListObject) Rewrite() []RedisCmd { + // var cmds []RedisCmd + cmds := make([]RedisCmd, 0) + if len(o.Elements) > 1 { + var cmd RedisCmd + cmd = append(cmd, "rpush", o.Key) + + for _, v := range o.Elements { + cmd = append(cmd, v) + } + cmds = append(cmds, cmd) + return cmds + } else { + for _, ele := range o.Elements { + cmd := RedisCmd{"rpush", o.Key, ele} + cmds = append(cmds, cmd) + } + return cmds + } + +} + +type ListMember struct { + Elements []string + numbers []int +} + +func (n *ListMember) UnmarshalJSON(text []byte) error { + + err := json.Unmarshal(text, &n.Elements) + if err != nil { + return json.Unmarshal(text, &n.numbers) + } + + return err +} + +func (o *ListObject) readList(rd io.Reader) { + var oline ListMember + ele, err := io.ReadAll(rd) + if err != nil { + log.Panicf("readList error : ", err, " when,", &oline) + } + err = json.Unmarshal(ele, &oline) + + if err != nil { + log.Warnf("list.go readList: ", &oline, " may err:", err) + } + + for _, e := range oline.Elements { + o.Elements = append(o.Elements, string(e)) + } + for _, e := range oline.numbers { + o.Elements = append(o.Elements, strconv.Itoa(e)) + } + +} + +func (o *ListObject) readQuickList(rd io.Reader) { + size := int(structure.ReadLength(rd)) + for i := 0; i < size; i++ { + ziplistElements := structure.ReadZipList(rd) + o.Elements = append(o.Elements, ziplistElements...) + } +} + +func (o *ListObject) readQuickList2(rd io.Reader) { + size := int(structure.ReadLength(rd)) + for i := 0; i < size; i++ { + container := structure.ReadLength(rd) + if container == quicklistNodeContainerPlain { + ele := structure.ReadString(rd) + o.Elements = append(o.Elements, ele) + } else if container == quicklistNodeContainerPacked { + listpackElements := structure.ReadListpack(rd) + o.Elements = append(o.Elements, listpackElements...) + } else { + log.Panicf("unknown quicklist container %d", container) + } + } +} + +func (o *ListObject) AddList(listlines []string) { + o.Elements = listlines +} diff --git a/tools/codis2pika/internal/rdb/types/set.go b/tools/codis2pika/internal/rdb/types/set.go new file mode 100644 index 0000000000..1f960c1c2f --- /dev/null +++ b/tools/codis2pika/internal/rdb/types/set.go @@ -0,0 +1,84 @@ +package types + +import ( + "encoding/json" + "io" + "strconv" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/rdb/structure" +) + +type SetObject struct { + Key string + Elements []string +} + +type SetMember struct { + Elements []string + numbers []int +} + +func (n *SetMember) UnmarshalJSON(text []byte) error { + err := json.Unmarshal(text, &n.Elements) + if err != nil { + return json.Unmarshal(text, &n.numbers) + } + return err +} + +func (o *SetObject) LoadFromBuffer(rd io.Reader, Key string, typeByte byte) { + o.Key = Key + switch typeByte { + case rdbTypeSet: + o.readSet(rd) + case rdbTypeSetIntset: + o.Elements = structure.ReadIntset(rd) + default: + log.Panicf("unknown set type. typeByte=[%d]", typeByte) + } +} + +func (o *SetObject) AddSet(setlist []string) { + o.Elements = setlist +} + +func (o *SetObject) readSet(rd io.Reader) { + var oline SetMember + ele, err := io.ReadAll(rd) + if err != nil { + log.Panicf("readSet error : ", err, " when,", &oline) + } + err = json.Unmarshal(ele, &oline) + + if err != nil { + log.Warnf("set.go readSet: ", &oline, " may err:", err) + } + o.Elements = append(o.Elements, oline.Elements...) + for _, e := range oline.numbers { + o.Elements = append(o.Elements, strconv.Itoa(e)) + } + +} + +func (o *SetObject) Rewrite() []RedisCmd { + + // var cmds []RedisCmd + cmds := make([]RedisCmd, 0) + if len(o.Elements) > 1 { + var cmd RedisCmd + cmd = append(cmd, "sadd", o.Key) + + for _, e := range o.Elements { + cmd = append(cmd, e) + } + cmds = append(cmds, cmd) + return cmds + } else { + for _, ele := range o.Elements { + cmd := RedisCmd{"sadd", o.Key, ele} + cmds = append(cmds, cmd) + } + return cmds + } +} diff --git a/tools/codis2pika/internal/rdb/types/stream.go b/tools/codis2pika/internal/rdb/types/stream.go new file mode 100644 index 0000000000..09f3629d83 --- /dev/null +++ b/tools/codis2pika/internal/rdb/types/stream.go @@ -0,0 +1,249 @@ +package types + +import ( + "encoding/binary" + "fmt" + "io" + "strconv" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/rdb/structure" +) + +/* + * The master entry is composed like in the following example: + * + * +-------+---------+------------+---------+--/--+---------+---------+-+ + * | count | deleted | num-fields | field_1 | field_2 | ... | field_N |0| + * +-------+---------+------------+---------+--/--+---------+---------+-+ + + * Populate the Listpack with the new entry. We use the following + * encoding: + * + * +-----+--------+----------+-------+-------+-/-+-------+-------+--------+ + * |flags|entry-id|num-fields|field-1|value-1|...|field-N|value-N|lp-count| + * +-----+--------+----------+-------+-------+-/-+-------+-------+--------+ + * + * However if the SAMEFIELD flag is set, we have just to populate + * the entry with the values, so it becomes: + * + * +-----+--------+-------+-/-+-------+--------+ + * |flags|entry-id|value-1|...|value-N|lp-count| + * +-----+--------+-------+-/-+-------+--------+ + * + * The entry-id field is actually two separated fields: the ms + * and seq difference compared to the master entry. + * + * The lp-count field is a number that states the number of Listpack pieces + * that compose the entry, so that it's possible to travel the entry + * in reverse order: we can just start from the end of the Listpack, read + * the entry, and jump back N times to seek the "flags" field to read + * the stream full entry. */ + +type StreamObject struct { + key string + cmds []RedisCmd +} + +func (o *StreamObject) LoadFromBuffer(rd io.Reader, key string, typeByte byte) { + o.key = key + switch typeByte { + case rdbTypeStreamListpacks: + o.readStream(rd, key, typeByte) + case rdbTypeStreamListpacks2: + o.readStream(rd, key, typeByte) + default: + log.Panicf("unknown hash type. typeByte=[%d]", typeByte) + } +} + +// see redis rewriteStreamObject() + +func (o *StreamObject) readStream(rd io.Reader, masterKey string, typeByte byte) { + // 1. length(number of listpack), k1, v1, k2, v2, ..., number, ms, seq + + /* Load the number of Listpack. */ + nListpack := int(structure.ReadLength(rd)) + for i := 0; i < nListpack; i++ { + /* Load key */ + key := structure.ReadString(rd) + + /* key is streamId, like: 1612181627287-0 */ + masterMs := int64(binary.BigEndian.Uint64([]byte(key[:8]))) + masterSeq := int64(binary.BigEndian.Uint64([]byte(key[8:]))) + + /* value is a listpack */ + elements := structure.ReadListpack(rd) + inx := 0 + + /* The front of stream listpack is master entry */ + /* Parse the master entry */ + count := nextInteger(&inx, elements) // count + deleted := nextInteger(&inx, elements) // deleted + numFields := int(nextInteger(&inx, elements)) // num-fields + + fields := elements[3 : 3+numFields] // fields + inx = 3 + numFields + + // master entry end by zero + lastEntry := nextString(&inx, elements) + if lastEntry != "0" { + log.Panicf("master entry not ends by zero. lastEntry=[%s]", lastEntry) + } + + /* Parse entries */ + for count != 0 || deleted != 0 { + flags := nextInteger(&inx, elements) // [is_same_fields|is_deleted] + entryMs := nextInteger(&inx, elements) + entrySeq := nextInteger(&inx, elements) + + args := []string{"xadd", masterKey, fmt.Sprintf("%v-%v", entryMs+masterMs, entrySeq+masterSeq)} + + if flags&2 == 2 { // same fields, get field from master entry. + for j := 0; j < numFields; j++ { + args = append(args, fields[j], nextString(&inx, elements)) + } + } else { // get field by lp.Next() + num := int(nextInteger(&inx, elements)) + args = append(args, elements[inx:inx+num*2]...) + inx += num * 2 + } + + _ = nextString(&inx, elements) // lp_count + + if flags&1 == 1 { // is_deleted + deleted -= 1 + } else { + count -= 1 + o.cmds = append(o.cmds, args) + } + } + } + + /* Load total number of items inside the stream. */ + _ = structure.ReadLength(rd) // number + + /* Load the last entry ID. */ + lastMs := structure.ReadLength(rd) + lastSeq := structure.ReadLength(rd) + lastid := fmt.Sprintf("%v-%v", lastMs, lastSeq) + if nListpack == 0 { + /* Use the XADD MAXLEN 0 trick to generate an empty stream if + * the key we are serializing is an empty string, which is possible + * for the Stream type. */ + args := []string{"xadd", masterKey, "MAXLEN", "0", lastid, "x", "y"} + o.cmds = append(o.cmds, args) + } + + /* Append XSETID after XADD, make sure lastid is correct, + * in case of XDEL lastid. */ + o.cmds = append(o.cmds, []string{"xsetid", masterKey, lastid}) + + if typeByte == rdbTypeStreamListpacks2 { + /* Load the first entry ID. */ + _ = structure.ReadLength(rd) // first_ms + _ = structure.ReadLength(rd) // first_seq + + /* Load the maximal deleted entry ID. */ + _ = structure.ReadLength(rd) // max_deleted_ms + _ = structure.ReadLength(rd) // max_deleted_seq + + /* Load the offset. */ + _ = structure.ReadLength(rd) // offset + } + + /* 2. nConsumerGroup, groupName, ms, seq, PEL, Consumers */ + + /* Load the number of groups. */ + nConsumerGroup := int(structure.ReadLength(rd)) + for i := 0; i < nConsumerGroup; i++ { + /* Load groupName */ + groupName := structure.ReadString(rd) + + /* Load the last ID */ + lastMs := structure.ReadLength(rd) + lastSeq := structure.ReadLength(rd) + lastid := fmt.Sprintf("%v-%v", lastMs, lastSeq) + + /* Create Group */ + o.cmds = append(o.cmds, []string{"CREATE", masterKey, groupName, lastid}) + + /* Load group offset. */ + if typeByte == rdbTypeStreamListpacks2 { + _ = structure.ReadLength(rd) // offset + } + + /* Load the global PEL */ + nPel := int(structure.ReadLength(rd)) + mapId2Time := make(map[string]uint64) + mapId2Count := make(map[string]uint64) + + for j := 0; j < nPel; j++ { + /* Load streamId */ + tmpBytes := structure.ReadBytes(rd, 16) + ms := binary.BigEndian.Uint64(tmpBytes[:8]) + seq := binary.BigEndian.Uint64(tmpBytes[8:]) + streamId := fmt.Sprintf("%v-%v", ms, seq) + + /* Load deliveryTime */ + deliveryTime := structure.ReadUint64(rd) + + /* Load deliveryCount */ + deliveryCount := structure.ReadLength(rd) + + /* Save deliveryTime and deliveryCount */ + mapId2Time[streamId] = deliveryTime + mapId2Count[streamId] = deliveryCount + } + + /* Generate XCLAIMs for each consumer that happens to + * have pending entries. Empty consumers are discarded. */ + nConsumer := int(structure.ReadLength(rd)) + for j := 0; j < nConsumer; j++ { + /* Load consumerName */ + consumerName := structure.ReadString(rd) + + /* Load lastSeenTime */ + _ = structure.ReadUint64(rd) + + /* Consumer PEL */ + nPEL := int(structure.ReadLength(rd)) + for i := 0; i < nPEL; i++ { + + /* Load streamId */ + tmpBytes := structure.ReadBytes(rd, 16) + ms := binary.BigEndian.Uint64(tmpBytes[:8]) + seq := binary.BigEndian.Uint64(tmpBytes[8:]) + streamId := fmt.Sprintf("%v-%v", ms, seq) + + /* Send */ + args := []string{ + "xclaim", masterKey, groupName, consumerName, "0", streamId, + "TIME", strconv.FormatUint(mapId2Time[streamId], 10), + "RETRYCOUNT", strconv.FormatUint(mapId2Count[streamId], 10), + "JUSTID", "FORCE"} + o.cmds = append(o.cmds, args) + } + } + } +} + +func nextInteger(inx *int, elements []string) int64 { + ele := elements[*inx] + *inx++ + i, err := strconv.ParseInt(ele, 10, 64) + if err != nil { + log.Panicf("integer is not a number. ele=[%s]", ele) + } + return i +} + +func nextString(inx *int, elements []string) string { + ele := elements[*inx] + *inx++ + return ele +} + +func (o *StreamObject) Rewrite() []RedisCmd { + return o.cmds +} diff --git a/tools/codis2pika/internal/rdb/types/string.go b/tools/codis2pika/internal/rdb/types/string.go new file mode 100644 index 0000000000..991db0b21e --- /dev/null +++ b/tools/codis2pika/internal/rdb/types/string.go @@ -0,0 +1,23 @@ +package types + +import ( + "io" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/rdb/structure" +) + +type StringObject struct { + value string + key string +} + +func (o *StringObject) LoadFromBuffer(rd io.Reader, key string, _ byte) { + o.key = key + o.value = structure.ReadString(rd) +} + +func (o *StringObject) Rewrite() []RedisCmd { + cmd := RedisCmd{} + cmd = append(cmd, "set", o.key, o.value) + return []RedisCmd{cmd} +} diff --git a/tools/codis2pika/internal/rdb/types/zset.go b/tools/codis2pika/internal/rdb/types/zset.go new file mode 100644 index 0000000000..f8d4aabe96 --- /dev/null +++ b/tools/codis2pika/internal/rdb/types/zset.go @@ -0,0 +1,134 @@ +package types + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/rdb/structure" +) + +type ZSetEntry struct { + Member string + Score string + //Score int +} + +type ZsetObject struct { + Key string + Elements []ZSetEntry +} + +func (o *ZsetObject) LoadFromBuffer(rd io.Reader, key string, typeByte byte) { + o.Key = key + switch typeByte { + case rdbTypeZSet: + o.readZset(rd) + case rdbTypeZSet2: + o.readZset2(rd) + case rdbTypeZSetZiplist: + o.readZsetZiplist(rd) + case rdbTypeZSetListpack: + o.readZsetListpack(rd) + default: + log.Panicf("unknown zset type. typeByte=[%d]", typeByte) + } +} + +type ZsetMember struct { + Elements []ZSetEntry +} + +type ZSetTempEntry struct { + Member string + //Score string + Score int +} + +func (n *ZsetMember) UnmarshalJSON(text []byte) error { + var readzset []ZSetEntry + err := json.Unmarshal([]byte(string(text)), &readzset) + + for _, z := range readzset { + newz := ZSetEntry{ + Member: z.Member, + Score: z.Score, + } + n.Elements = append(n.Elements, newz) + } + + return err +} + +func (o *ZsetObject) readZset(rd io.Reader) { + var oline []ZSetEntry + ele, err := io.ReadAll(rd) + if err != nil { + log.Panicf("readZset error : ", err, " when,", &oline) + } + err = json.Unmarshal(ele, &oline) + if err != nil { + log.Warnf("zset.go readZset: ", &oline, " may err:", err) + } + + o.Elements = oline +} + +func (o *ZsetObject) readZset2(rd io.Reader) { + size := int(structure.ReadLength(rd)) + o.Elements = make([]ZSetEntry, size) + for i := 0; i < size; i++ { + o.Elements[i].Member = structure.ReadString(rd) + score := structure.ReadDouble(rd) + o.Elements[i].Score = fmt.Sprintf("%f", score) + } +} + +func (o *ZsetObject) readZsetZiplist(rd io.Reader) { + list := structure.ReadZipList(rd) + size := len(list) + if size%2 != 0 { + log.Panicf("zset listpack size is not even. size=[%d]", size) + } + o.Elements = make([]ZSetEntry, size/2) + for i := 0; i < size; i += 2 { + o.Elements[i/2].Member = list[i] + o.Elements[i/2].Score = list[i+1] + } +} + +func (o *ZsetObject) readZsetListpack(rd io.Reader) { + list := structure.ReadListpack(rd) + size := len(list) + if size%2 != 0 { + log.Panicf("zset listpack size is not even. size=[%d]", size) + } + o.Elements = make([]ZSetEntry, size/2) + for i := 0; i < size; i += 2 { + o.Elements[i/2].Member = list[i] + o.Elements[i/2].Score = list[i+1] + } +} + +func (o *ZsetObject) Rewrite() []RedisCmd { + + // var cmds []RedisCmd + cmds := make([]RedisCmd, 0) + if len(o.Elements) > 1 { + var cmd RedisCmd + cmd = append(cmd, "zadd", o.Key) + + for _, ele := range o.Elements { + cmd = append(cmd, ele.Score, ele.Member) + } + cmds = append(cmds, cmd) + return cmds + } else { + for _, ele := range o.Elements { + cmd := RedisCmd{"zadd", o.Key, ele.Score, ele.Member} + cmds = append(cmds, cmd) + } + return cmds + } +} diff --git a/tools/codis2pika/internal/reader/interface.go b/tools/codis2pika/internal/reader/interface.go new file mode 100644 index 0000000000..a9acdbf9b4 --- /dev/null +++ b/tools/codis2pika/internal/reader/interface.go @@ -0,0 +1,7 @@ +package reader + +import "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/entry" + +type Reader interface { + StartRead() chan *entry.Entry +} diff --git a/tools/codis2pika/internal/reader/psync.go b/tools/codis2pika/internal/reader/psync.go new file mode 100644 index 0000000000..a7ea3b17e5 --- /dev/null +++ b/tools/codis2pika/internal/reader/psync.go @@ -0,0 +1,259 @@ +package reader + +import ( + "bufio" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "time" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/client" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/entry" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/rdb" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/reader/rotate" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/statistics" +) + +type psyncReader struct { + client *client.Redis + address string + ch chan *entry.Entry + DbId int + + rd *bufio.Reader + receivedOffset int64 + elastiCachePSync string +} + +func NewPSyncReader(address string, username string, password string, isTls bool, ElastiCachePSync string) Reader { + r := new(psyncReader) + r.address = address + r.elastiCachePSync = ElastiCachePSync + r.client = client.NewRedisClient(address, username, password, isTls) + r.rd = r.client.BufioReader() + log.Infof("psyncReader connected to redis successful. address=[%s]", address) + return r +} + +func (r *psyncReader) StartRead() chan *entry.Entry { + r.ch = make(chan *entry.Entry, 1024) + + go func() { + r.clearDir() + //另起一个协程,模拟从服务器定时向主服务器发送一个ack命令,包含偏移量 + go r.sendReplconfAck() + //保存rdb快照 + r.saveRDB() + //初始offset设置为0 + startOffset := r.receivedOffset + //保存aof文件 + go r.saveAOF(r.rd) + r.sendRDB() + + time.Sleep(1 * time.Second) // wait for saveAOF create aof file + r.sendAOF(startOffset) + }() + + return r.ch +} + +func (r *psyncReader) clearDir() { + files, err := ioutil.ReadDir("./") + if err != nil { + log.PanicError(err) + } + + for _, f := range files { + if strings.HasSuffix(f.Name(), ".rdb") || strings.HasSuffix(f.Name(), ".aof") { + err = os.Remove(f.Name()) + if err != nil { + log.PanicError(err) + } + log.Warnf("remove file. filename=[%s]", f.Name()) + } + } +} + +func (r *psyncReader) saveRDB() { + log.Infof("start save RDB. address=[%s]", r.address) + argv := []string{"replconf", "listening-port", "10007"} // 10007 is magic number + log.Infof("send %v", argv) + reply := r.client.DoWithStringReply(argv...) + if reply != "OK" { + log.Warnf("send replconf command to redis server failed. address=[%s], reply=[%s], error=[]", r.address, reply) + } + + // send psync + argv = []string{"PSYNC", "?", "-1"} + if r.elastiCachePSync != "" { + argv = []string{r.elastiCachePSync, "?", "-1"} + } + r.client.Send(argv...) + log.Infof("send %v", argv) + // format: \n\n\n$\r\n + for true { + // \n\n\n$ + b, err := r.rd.ReadByte() + if err != nil { + log.PanicError(err) + } + if b == '\n' { + continue + } + if b == '-' { + reply, err := r.rd.ReadString('\n') + if err != nil { + log.PanicError(err) + } + reply = strings.TrimSpace(reply) + log.Panicf("psync error. address=[%s], reply=[%s]", r.address, reply) + } + if b != '+' { + log.Panicf("invalid psync reply. address=[%s], b=[%s]", r.address, string(b)) + } + break + } + reply, err := r.rd.ReadString('\n') + if err != nil { + log.PanicError(err) + } + reply = strings.TrimSpace(reply) + log.Infof("receive [%s]", reply) + masterOffset, err := strconv.Atoi(strings.Split(reply, " ")[2]) + if err != nil { + log.PanicError(err) + } + r.receivedOffset = int64(masterOffset) + + log.Infof("source db is doing bgsave. address=[%s]", r.address) + timeStart := time.Now() + // format: \n\n\n$\r\n + for true { + // \n\n\n$ + b, err := r.rd.ReadByte() + if err != nil { + log.PanicError(err) + } + if b == '\n' { + continue + } + if b != '$' { + log.Panicf("invalid rdb format. address=[%s], b=[%s]", r.address, string(b)) + } + break + } + log.Infof("source db bgsave finished. timeUsed=[%.2f]s, address=[%s]", time.Since(timeStart).Seconds(), r.address) + //返回读取到buf中的换行符后的总长度 + lengthStr, err := r.rd.ReadString('\n') + if err != nil { + log.PanicError(err) + } + lengthStr = strings.TrimSpace(lengthStr) + length, err := strconv.ParseInt(lengthStr, 10, 64) + if err != nil { + log.PanicError(err) + } + log.Infof("received rdb length. length=[%d]", length) + //设置读取的rdb文件的长度 + statistics.SetRDBFileSize(length) + + // create rdb file + rdbFilePath := "dump.rdb" + log.Infof("create dump.rdb file. filename_path=[%s]", rdbFilePath) + rdbFileHandle, err := os.OpenFile(rdbFilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + log.PanicError(err) + } + + // read rdb + remainder := length + const bufSize int64 = 32 * 1024 * 1024 // 32MB + buf := make([]byte, bufSize) + for remainder != 0 { + readOnce := bufSize + if remainder < readOnce { + readOnce = remainder + } + n, err := r.rd.Read(buf[:readOnce]) + if err != nil { + log.PanicError(err) + } + remainder -= int64(n) + //循环读取buf中的rdb数据;每次读取,都会将length减去读取长度,直到余数=0 + statistics.UpdateRDBReceivedSize(length - remainder) + _, err = rdbFileHandle.Write(buf[:n]) + if err != nil { + log.PanicError(err) + } + } + err = rdbFileHandle.Close() + if err != nil { + log.PanicError(err) + } + log.Infof("save RDB finished. address=[%s], total_bytes=[%d]", r.address, length) +} + +func (r *psyncReader) saveAOF(rd io.Reader) { + log.Infof("start save AOF. address=[%s]", r.address) + // create aof file + aofWriter := rotate.NewAOFWriter(r.receivedOffset) + defer aofWriter.Close() + buf := make([]byte, 16*1024) // 16KB is enough for writing file + for { + n, err := rd.Read(buf) + if err != nil { + log.PanicError(err) + } + r.receivedOffset += int64(n) + statistics.UpdateAOFReceivedOffset(r.receivedOffset) + aofWriter.Write(buf[:n]) + } +} + +func (r *psyncReader) sendRDB() { + // start parse rdb + log.Infof("start send RDB. address=[%s]", r.address) + rdbLoader := rdb.NewLoader("dump.rdb", r.ch) + r.DbId = rdbLoader.NewParseRDB() + log.Infof("send RDB finished. address=[%s], repl-stream-db=[%d]", r.address, r.DbId) +} + +func (r *psyncReader) sendAOF(offset int64) { + aofReader := rotate.NewAOFReader(offset) + defer aofReader.Close() + r.client.SetBufioReader(bufio.NewReader(aofReader)) + log.Infof("sendAOF start. offset ", offset) + for { + argv := client.ArrayString(r.client.Receive()) + + // select + if strings.EqualFold(argv[0], "select") { + DbId, err := strconv.Atoi(argv[1]) + if err != nil { + log.PanicError(err) + } + r.DbId = DbId + continue + } + + if strings.EqualFold(argv[0], "PING") { + continue + } + + e := entry.NewEntry() + e.Argv = argv + e.DbId = r.DbId + e.Offset = aofReader.Offset() + r.ch <- e + } +} + +func (r *psyncReader) sendReplconfAck() { + for range time.Tick(time.Millisecond * 100) { + // send ack receivedOffset + r.client.Send("replconf", "ack", strconv.FormatInt(r.receivedOffset, 10)) + } +} diff --git a/tools/codis2pika/internal/reader/rdb_reader.go b/tools/codis2pika/internal/reader/rdb_reader.go new file mode 100644 index 0000000000..0f48132e29 --- /dev/null +++ b/tools/codis2pika/internal/reader/rdb_reader.go @@ -0,0 +1,42 @@ +package reader + +import ( + "path/filepath" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/entry" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/rdb" +) + +type rdbReader struct { + path string + ch chan *entry.Entry +} + +func NewRDBReader(path string) Reader { + log.Infof("NewRDBReader: path=[%s]", path) + absolutePath, err := filepath.Abs(path) + if err != nil { + log.Panicf("NewRDBReader: filepath.Abs error: %s", err.Error()) + } + log.Infof("NewRDBReader: absolute path=[%s]", absolutePath) + r := new(rdbReader) + r.path = absolutePath + return r +} + +func (r *rdbReader) StartRead() chan *entry.Entry { + r.ch = make(chan *entry.Entry, 1024) + + go func() { + // start parse rdb + log.Infof("start send RDB. path=[%s]", r.path) + rdbLoader := rdb.NewLoader(r.path, r.ch) + //rdb模块解析了DB,并直接发送到了通道中 + _ = rdbLoader.NewParseRDB() + log.Infof("send RDB finished. path=[%s]", r.path) + close(r.ch) + }() + + return r.ch +} diff --git a/tools/codis2pika/internal/reader/rotate/aof_reader.go b/tools/codis2pika/internal/reader/rotate/aof_reader.go new file mode 100644 index 0000000000..ba8d2e1b9b --- /dev/null +++ b/tools/codis2pika/internal/reader/rotate/aof_reader.go @@ -0,0 +1,85 @@ +package rotate + +import ( + "fmt" + "io" + "os" + "time" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/utils" +) + +type AOFReader struct { + file *os.File + offset int64 + pos int64 + filename string +} + +func NewAOFReader(offset int64) *AOFReader { + r := new(AOFReader) + r.openFile(offset) + return r +} + +func (r *AOFReader) openFile(offset int64) { + r.filename = fmt.Sprintf("%d.aof", offset) + var err error + r.file, err = os.OpenFile(r.filename, os.O_RDONLY, 0644) + if err != nil { + log.PanicError(err) + } + r.offset = offset + r.pos = 0 + log.Infof("AOFReader open file. aof_filename=[%s]", r.filename) +} + +func (r *AOFReader) readNextFile(offset int64) { + filename := fmt.Sprintf("%d.aof", offset) + if utils.DoesFileExist(filename) { + r.Close() + err := os.Remove(r.filename) + if err != nil { + return + } + r.openFile(offset) + } +} + +func (r *AOFReader) Read(buf []byte) (n int, err error) { + n, err = r.file.Read(buf) + for err == io.EOF { + if r.filename != fmt.Sprintf("%d.aof", r.offset) { + r.readNextFile(r.offset) + } + time.Sleep(time.Millisecond * 10) + _, err = r.file.Seek(0, 1) + if err != nil { + log.PanicError(err) + } + n, err = r.file.Read(buf) + } + if err != nil { + log.PanicError(err) + } + r.offset += int64(n) + r.pos += int64(n) + return n, nil +} + +func (r *AOFReader) Offset() int64 { + return r.offset +} + +func (r *AOFReader) Close() { + if r.file == nil { + return + } + err := r.file.Close() + if err != nil { + log.PanicError(err) + } + r.file = nil + log.Infof("AOFReader close file. aof_filename=[%s]", r.filename) +} diff --git a/tools/codis2pika/internal/reader/rotate/aof_writer.go b/tools/codis2pika/internal/reader/rotate/aof_writer.go new file mode 100644 index 0000000000..a8bc61bcff --- /dev/null +++ b/tools/codis2pika/internal/reader/rotate/aof_writer.go @@ -0,0 +1,69 @@ +package rotate + +import ( + "fmt" + "os" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" +) + +const MaxFileSize = 1024 * 1024 * 1024 // 1G + +type AOFWriter struct { + file *os.File + offset int64 + filename string + filesize int64 +} + +func NewAOFWriter(offset int64) *AOFWriter { + w := &AOFWriter{} + w.openFile(offset) + return w +} + +// 初始化时,将传入的offset设为偏移量 +func (w *AOFWriter) openFile(offset int64) { + w.filename = fmt.Sprintf("%d.aof", offset) + var err error + w.file, err = os.OpenFile(w.filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) + if err != nil { + log.PanicError(err) + } + w.offset = offset + w.filesize = 0 + log.Infof("AOFWriter open file. filename=[%s]", w.filename) +} + +// 每次写入buf,都会更新累加AOFWriter.offset +func (w *AOFWriter) Write(buf []byte) { + _, err := w.file.Write(buf) + if err != nil { + log.PanicError(err) + } + w.offset += int64(len(buf)) + w.filesize += int64(len(buf)) + if w.filesize > MaxFileSize { + w.Close() + w.openFile(w.offset) + } + err = w.file.Sync() + if err != nil { + log.PanicError(err) + } +} + +func (w *AOFWriter) Close() { + if w.file == nil { + return + } + err := w.file.Sync() + if err != nil { + log.PanicError(err) + } + err = w.file.Close() + if err != nil { + log.PanicError(err) + } + log.Infof("AOFWriter close file. filename=[%s], filesize=[%d]", w.filename, w.filesize) +} diff --git a/tools/codis2pika/internal/statistics/statistics.go b/tools/codis2pika/internal/statistics/statistics.go new file mode 100644 index 0000000000..062a1be863 --- /dev/null +++ b/tools/codis2pika/internal/statistics/statistics.go @@ -0,0 +1,91 @@ +package statistics + +import ( + "time" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/config" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" +) + +var ( + // ID + entryId uint64 + // rdb + rdbFileSize int64 + rdbReceivedSize int64 + rdbSendSize int64 + // aof + aofReceivedOffset int64 + aofAppliedOffset int64 + // ops + allowEntriesCount int64 + disallowEntriesCount int64 + unansweredBytesCount uint64 +) + +func Init() { + go func() { + //配置文件中的log_interval 默认为5s + seconds := config.Config.Advanced.LogInterval + if seconds <= 0 { + log.Infof("statistics disabled. seconds=[%d]", seconds) + } + + for range time.Tick(time.Duration(seconds) * time.Second) { + if rdbFileSize == 0 { + continue + } + if rdbFileSize > rdbReceivedSize { + log.Infof("receiving rdb. percent=[%.2f]%%, rdbFileSize=[%.3f]G, rdbReceivedSize=[%.3f]G", + float64(rdbReceivedSize)/float64(rdbFileSize)*100, + float64(rdbFileSize)/1024/1024/1024, + float64(rdbReceivedSize)/1024/1024/1024) + } else if rdbFileSize > rdbSendSize { + log.Infof("syncing rdb. entryId=[%d], unansweredBytesCount=[%d]bytes, rdbFileSize=[%.3f]G", + entryId, + unansweredBytesCount, + float64(rdbFileSize)/1024/1024/1024) + // 当 rdbFileSize == rdbSendSize 时,开始同步aof文件 + } else { + log.Infof("syncing aof. entryId=[%d], unansweredBytesCount=[%d]bytes, diff=[%d]", + entryId, + unansweredBytesCount, + aofReceivedOffset-aofAppliedOffset) + } + + allowEntriesCount = 0 + disallowEntriesCount = 0 + } + }() +} +func UpdateEntryId(id uint64) { + entryId = id +} +func AddAllowEntriesCount() { + allowEntriesCount++ +} +func AddDisallowEntriesCount() { + disallowEntriesCount++ +} + +// 将输出的size大小,设置为rdb文件的大小 +func SetRDBFileSize(size int64) { + rdbFileSize = size +} +func UpdateRDBReceivedSize(size int64) { + rdbReceivedSize = size +} +func UpdateRDBSentSize(offset int64) { + rdbSendSize = offset +} + +// 将入参设置为aof +func UpdateAOFReceivedOffset(offset int64) { + aofReceivedOffset = offset +} +func UpdateAOFAppliedOffset(offset int64) { + aofAppliedOffset = offset +} +func UpdateUnansweredBytesCount(count uint64) { + unansweredBytesCount = count +} diff --git a/tools/codis2pika/internal/utils/crc16.go b/tools/codis2pika/internal/utils/crc16.go new file mode 100644 index 0000000000..0099b9c1f7 --- /dev/null +++ b/tools/codis2pika/internal/utils/crc16.go @@ -0,0 +1,89 @@ +package utils + +/* + * Copyright 2001-2010 Georges Menie (www.menie.org) + * Copyright 2010-2012 Salvatore Sanfilippo (adapted to Redis coding style) + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* CRC16 implementation according to CCITT standards. + * + * Note by @antirez: this is actually the XMODEM CRC 16 algorithm, using the + * following parameters: + * + * Name : "XMODEM", also known as "ZMODEM", "CRC-16/ACORN" + * Width : 16 bit + * Poly : 1021 (That is actually x^16 + x^12 + x^5 + 1) + * Initialization : 0000 + * Reflect Input byte : False + * Reflect Output CRC : False + * Xor constant to output CRC : 0000 + * Output for "123456789" : 31C3 + */ + +var crc16tab = [256]uint16{ + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, +} + +func Crc16(buf string) uint16 { + var crc uint16 + bytesBuf := []byte(buf) + for _, n := range bytesBuf { + crc = (crc << uint16(8)) ^ crc16tab[((crc>>uint16(8))^uint16(n))&0x00FF] + } + return crc +} diff --git a/tools/codis2pika/internal/utils/crc64.go b/tools/codis2pika/internal/utils/crc64.go new file mode 100644 index 0000000000..75de9586ad --- /dev/null +++ b/tools/codis2pika/internal/utils/crc64.go @@ -0,0 +1,98 @@ +package utils + +var crc64Table = [256]uint64{ + 0x0000000000000000, 0x7ad870c830358979, 0xf5b0e190606b12f2, 0x8f689158505e9b8b, + 0xc038e5739841b68f, 0xbae095bba8743ff6, 0x358804e3f82aa47d, 0x4f50742bc81f2d04, + 0xab28ecb46814fe75, 0xd1f09c7c5821770c, 0x5e980d24087fec87, 0x24407dec384a65fe, + 0x6b1009c7f05548fa, 0x11c8790fc060c183, 0x9ea0e857903e5a08, 0xe478989fa00bd371, + 0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8, 0x88b81eabe8d57d73, 0xf2606e63d8e0f40a, + 0xbd301a4810ffd90e, 0xc7e86a8020ca5077, 0x4880fbd87094cbfc, 0x32588b1040a14285, + 0xd620138fe0aa91f4, 0xacf86347d09f188d, 0x2390f21f80c18306, 0x594882d7b0f40a7f, + 0x1618f6fc78eb277b, 0x6cc0863448deae02, 0xe3a8176c18803589, 0x997067a428b5bcf0, + 0xfa11fe77117cdf02, 0x80c98ebf2149567b, 0x0fa11fe77117cdf0, 0x75796f2f41224489, + 0x3a291b04893d698d, 0x40f16bccb908e0f4, 0xcf99fa94e9567b7f, 0xb5418a5cd963f206, + 0x513912c379682177, 0x2be1620b495da80e, 0xa489f35319033385, 0xde51839b2936bafc, + 0x9101f7b0e12997f8, 0xebd98778d11c1e81, 0x64b116208142850a, 0x1e6966e8b1770c73, + 0x8719014c99c2b083, 0xfdc17184a9f739fa, 0x72a9e0dcf9a9a271, 0x08719014c99c2b08, + 0x4721e43f0183060c, 0x3df994f731b68f75, 0xb29105af61e814fe, 0xc849756751dd9d87, + 0x2c31edf8f1d64ef6, 0x56e99d30c1e3c78f, 0xd9810c6891bd5c04, 0xa3597ca0a188d57d, + 0xec09088b6997f879, 0x96d1784359a27100, 0x19b9e91b09fcea8b, 0x636199d339c963f2, + 0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416, 0x2aca3b2d1a053f9d, 0x50124be52a30b6e4, + 0x1f423fcee22f9be0, 0x659a4f06d21a1299, 0xeaf2de5e82448912, 0x902aae96b271006b, + 0x74523609127ad31a, 0x0e8a46c1224f5a63, 0x81e2d7997211c1e8, 0xfb3aa75142244891, + 0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec, 0x41da32eaea507767, 0x3b024222da65fe1e, + 0xa2722586f2d042ee, 0xd8aa554ec2e5cb97, 0x57c2c41692bb501c, 0x2d1ab4dea28ed965, + 0x624ac0f56a91f461, 0x1892b03d5aa47d18, 0x97fa21650afae693, 0xed2251ad3acf6fea, + 0x095ac9329ac4bc9b, 0x7382b9faaaf135e2, 0xfcea28a2faafae69, 0x8632586aca9a2710, + 0xc9622c4102850a14, 0xb3ba5c8932b0836d, 0x3cd2cdd162ee18e6, 0x460abd1952db919f, + 0x256b24ca6b12f26d, 0x5fb354025b277b14, 0xd0dbc55a0b79e09f, 0xaa03b5923b4c69e6, + 0xe553c1b9f35344e2, 0x9f8bb171c366cd9b, 0x10e3202993385610, 0x6a3b50e1a30ddf69, + 0x8e43c87e03060c18, 0xf49bb8b633338561, 0x7bf329ee636d1eea, 0x012b592653589793, + 0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee, 0xbbcbcc9dfb2ca865, 0xc113bc55cb19211c, + 0x5863dbf1e3ac9dec, 0x22bbab39d3991495, 0xadd33a6183c78f1e, 0xd70b4aa9b3f20667, + 0x985b3e827bed2b63, 0xe2834e4a4bd8a21a, 0x6debdf121b863991, 0x1733afda2bb3b0e8, + 0xf34b37458bb86399, 0x8993478dbb8deae0, 0x06fbd6d5ebd3716b, 0x7c23a61ddbe6f812, + 0x3373d23613f9d516, 0x49aba2fe23cc5c6f, 0xc6c333a67392c7e4, 0xbc1b436e43a74e9d, + 0x95ac9329ac4bc9b5, 0xef74e3e19c7e40cc, 0x601c72b9cc20db47, 0x1ac40271fc15523e, + 0x5594765a340a7f3a, 0x2f4c0692043ff643, 0xa02497ca54616dc8, 0xdafce7026454e4b1, + 0x3e847f9dc45f37c0, 0x445c0f55f46abeb9, 0xcb349e0da4342532, 0xb1eceec59401ac4b, + 0xfebc9aee5c1e814f, 0x8464ea266c2b0836, 0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4, + 0xe8a46c1224f5a634, 0x927c1cda14c02f4d, 0x1d148d82449eb4c6, 0x67ccfd4a74ab3dbf, + 0x289c8961bcb410bb, 0x5244f9a98c8199c2, 0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30, + 0x438c80a64ce15841, 0x3954f06e7cd4d138, 0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca, + 0x83b465d5d4a0eece, 0xf96c151de49567b7, 0x76048445b4cbfc3c, 0x0cdcf48d84fe7545, + 0x6fbd6d5ebd3716b7, 0x15651d968d029fce, 0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c, + 0xaf85882d2576a038, 0xd55df8e515432941, 0x5a3569bd451db2ca, 0x20ed197575283bb3, + 0xc49581ead523e8c2, 0xbe4df122e51661bb, 0x3125607ab548fa30, 0x4bfd10b2857d7349, + 0x04ad64994d625e4d, 0x7e7514517d57d734, 0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6, + 0x12b5926535897936, 0x686de2ad05bcf04f, 0xe70573f555e26bc4, 0x9ddd033d65d7e2bd, + 0xd28d7716adc8cfb9, 0xa85507de9dfd46c0, 0x273d9686cda3dd4b, 0x5de5e64efd965432, + 0xb99d7ed15d9d8743, 0xc3450e196da80e3a, 0x4c2d9f413df695b1, 0x36f5ef890dc31cc8, + 0x79a59ba2c5dc31cc, 0x037deb6af5e9b8b5, 0x8c157a32a5b7233e, 0xf6cd0afa9582aa47, + 0x4ad64994d625e4da, 0x300e395ce6106da3, 0xbf66a804b64ef628, 0xc5bed8cc867b7f51, + 0x8aeeace74e645255, 0xf036dc2f7e51db2c, 0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de, + 0xe1fea520be311aaf, 0x9b26d5e88e0493d6, 0x144e44b0de5a085d, 0x6e963478ee6f8124, + 0x21c640532670ac20, 0x5b1e309b16452559, 0xd476a1c3461bbed2, 0xaeaed10b762e37ab, + 0x37deb6af5e9b8b5b, 0x4d06c6676eae0222, 0xc26e573f3ef099a9, 0xb8b627f70ec510d0, + 0xf7e653dcc6da3dd4, 0x8d3e2314f6efb4ad, 0x0256b24ca6b12f26, 0x788ec2849684a65f, + 0x9cf65a1b368f752e, 0xe62e2ad306bafc57, 0x6946bb8b56e467dc, 0x139ecb4366d1eea5, + 0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8, 0xa97e5ef8cea5d153, 0xd3a62e30fe90582a, + 0xb0c7b7e3c7593bd8, 0xca1fc72bf76cb2a1, 0x45775673a732292a, 0x3faf26bb9707a053, + 0x70ff52905f188d57, 0x0a2722586f2d042e, 0x854fb3003f739fa5, 0xff97c3c80f4616dc, + 0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4, 0xee5fbac7cf26d75f, 0x9487ca0fff135e26, + 0xdbd7be24370c7322, 0xa10fceec0739fa5b, 0x2e675fb4576761d0, 0x54bf2f7c6752e8a9, + 0xcdcf48d84fe75459, 0xb71738107fd2dd20, 0x387fa9482f8c46ab, 0x42a7d9801fb9cfd2, + 0x0df7adabd7a6e2d6, 0x772fdd63e7936baf, 0xf8474c3bb7cdf024, 0x829f3cf387f8795d, + 0x66e7a46c27f3aa2c, 0x1c3fd4a417c62355, 0x935745fc4798b8de, 0xe98f353477ad31a7, + 0xa6df411fbfb21ca3, 0xdc0731d78f8795da, 0x536fa08fdfd90e51, 0x29b7d047efec8728} + +type digest struct { + crc uint64 +} + +func NewDigest() *digest { + d := &digest{} + return d +} + +func (d *digest) Update(p []byte) { + for _, b := range p { + d.crc = crc64Table[byte(d.crc)^b] ^ (d.crc >> 8) + } +} + +func (d *digest) Write(p []byte) (int, error) { + d.Update(p) + return len(p), nil +} + +func (d *digest) Sum64() uint64 { return d.crc } + +func CalcCRC64(p []byte) uint64 { + var crc uint64 + for _, b := range p { + //每次循环 做一次异或运算,再做一次右移运算,再做一次异或运算 + crc = crc64Table[byte(crc)^b] ^ (crc >> 8) + } + return crc +} diff --git a/tools/codis2pika/internal/utils/crc_test.go b/tools/codis2pika/internal/utils/crc_test.go new file mode 100644 index 0000000000..95107d3406 --- /dev/null +++ b/tools/codis2pika/internal/utils/crc_test.go @@ -0,0 +1,10 @@ +package utils + +import "testing" + +func TestCrc16(t *testing.T) { + ret := Crc16("123456789") + if ret != 0x31c3 { + t.Errorf("Crc16(123456789) = %x", ret) + } +} diff --git a/tools/codis2pika/internal/utils/file.go b/tools/codis2pika/internal/utils/file.go new file mode 100644 index 0000000000..83a0c6e4d0 --- /dev/null +++ b/tools/codis2pika/internal/utils/file.go @@ -0,0 +1,19 @@ +package utils + +import ( + "os" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" +) + +func DoesFileExist(fileName string) bool { + _, err := os.Stat(fileName) + if err != nil { + if os.IsNotExist(err) { + return false + } else { + log.PanicError(err) + } + } + return true +} diff --git a/tools/codis2pika/internal/writer/interface.go b/tools/codis2pika/internal/writer/interface.go new file mode 100644 index 0000000000..1b4ddf145e --- /dev/null +++ b/tools/codis2pika/internal/writer/interface.go @@ -0,0 +1,7 @@ +package writer + +import "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/entry" + +type Writer interface { + Write(entry *entry.Entry) +} diff --git a/tools/codis2pika/internal/writer/redis.go b/tools/codis2pika/internal/writer/redis.go new file mode 100644 index 0000000000..4072276d61 --- /dev/null +++ b/tools/codis2pika/internal/writer/redis.go @@ -0,0 +1,176 @@ +package writer + +import ( + "bytes" + "strconv" + "sync/atomic" + "time" + + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/client" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/client/proto" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/config" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/entry" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/log" + "github.com/OpenAtomFoundation/pika/tools/codis2pika/internal/statistics" +) + +type authInfo struct { + address string + username string + password string + isTls bool +} + +// var authInfoChan chan *authInfo +var authInfoSingle *authInfo +var slotauths map[int]*authInfo +var addressauths map[string]*authInfo + +func init() { + //设置队列长度等于最大重试次数 + slotauths = make(map[int]*authInfo, 1024) + addressauths = make(map[string]*authInfo, 100) +} + +type redisWriter struct { + client *client.Redis + DbId int + + cmdBuffer *bytes.Buffer + chWaitReply chan *entry.Entry + + UpdateUnansweredBytesCount uint64 // have sent in bytes +} + +func newWriteClientPool(address string, username string, password string, isTls bool) { + getauthinfo := authInfo{address, username, password, isTls} + if config.Config.Target.Type != "cluster" { + authInfoSingle = &getauthinfo + } else { + addressauths[address] = &getauthinfo + } +} + +func getWriteConn() *client.Redis { + authinfo := authInfoSingle + newclient := client.NewRedisClient(authinfo.address, authinfo.username, authinfo.password, authinfo.isTls) + return newclient +} + +func getClusterWriteConn(slotauths map[int]*authInfo, slots []int) *client.Redis { + //每条消息只有一个slot + v := slots[0] + authinfo := slotauths[v] + newclient := client.NewRedisClient(authinfo.address, authinfo.username, authinfo.password, authinfo.isTls) + return newclient +} + +func NewRedisWriter(address string, username string, password string, isTls bool) Writer { + //初始化redisWriter结构体 + rw := new(redisWriter) + + if config.Config.Target.Type != "cluster" { + //初始化用户信息 + newWriteClientPool(address, username, password, isTls) + //新建redis client + rw.client = getWriteConn() + } else { + //集群模式直接创建连接客户端 + rw.client = client.NewRedisClient(address, username, password, isTls) + } + log.Infof("redisWriter connected to redis successful. address=[%s]", address) + rw.cmdBuffer = new(bytes.Buffer) + //根据配置文件中的队列长度创建channel + rw.chWaitReply = make(chan *entry.Entry, config.Config.Advanced.PipelineCountLimit) + //协程刷新数据,更新迁移情况 + go rw.flushInterval() + return rw +} + +func (w *redisWriter) Write(e *entry.Entry) { + // recover when net.conn broken pipe + defer func() { + if err := recover(); err != nil { + log.Warnf("This is broken pipe error: %v", err) + log.Infof("try again %v ", e.Argv) + if config.Config.Target.Type != "cluster" { + //获取新的redis client + w.client = getWriteConn() + //获取返回信息 + w.chWaitReply <- e + //再次写入 + w.client.SendBytes(w.cmdBuffer.Bytes()) + } else { + w.client = getClusterWriteConn(slotauths, e.Slots) + //获取返回信息 + w.chWaitReply <- e + //再次写入 + w.client.SendBytes(w.cmdBuffer.Bytes()) + } + log.Infof("recover finish. %v ", e.Argv) + } + }() + // switch db if we need + if w.DbId != e.DbId { + w.switchDbTo(e.DbId) + } + + // send + w.cmdBuffer.Reset() + + //获取接口参数,并将buffer的cmd写入 + client.EncodeArgv(e.Argv, w.cmdBuffer) + //计算entry的key值大小是否超过大key的阈值限制 + e.EncodedSize = uint64(w.cmdBuffer.Len()) + for e.EncodedSize+atomic.LoadUint64(&w.UpdateUnansweredBytesCount) > config.Config.Advanced.TargetRedisClientMaxQuerybufLen { + time.Sleep(1 * time.Nanosecond) + } + w.chWaitReply <- e + atomic.AddUint64(&w.UpdateUnansweredBytesCount, e.EncodedSize) + //将buf中的byte写入 write并flush + w.client.SendBytes(w.cmdBuffer.Bytes()) +} + +func (w *redisWriter) switchDbTo(newDbId int) { + w.client.Send("select", strconv.Itoa(newDbId)) + w.DbId = newDbId +} + +func (w *redisWriter) flushInterval() { + for { + select { + //从chan *entry.Entry通道中获取消息 + case e := <-w.chWaitReply: + reply, err := w.client.Receive() + //定义的常量值 + if err == proto.Nil { + log.Warnf("redisWriter receive nil reply. argv=%v", e.Argv) + } else if err != nil { + if err.Error() == "BUSYKEY Target key name already exists." { + if config.Config.Advanced.RDBRestoreCommandBehavior == "skip" { + log.Warnf("redisWriter received BUSYKEY reply. argv=%v", e.Argv) + } else if config.Config.Advanced.RDBRestoreCommandBehavior == "panic" { + log.Panicf("redisWriter received BUSYKEY reply. argv=%v", e.Argv) + } + // 当写入发生panic时,此时的client已经关闭,无法再读读取,返回EOF + } else if err.Error() == "EOF" { + log.Warnf("redisWriter received EOF. error=[%v], argv=%v, slots=%v, reply=[%v]", err, e.Argv, e.Slots, reply) + log.Infof("try again %v ", e.Argv) + if config.Config.Target.Type == "cluster" { + w.client = getClusterWriteConn(slotauths, e.Slots) + } else { + w.client = getWriteConn() + } + w.Write(e) + log.Infof("finish ") + } else { + log.Panicf("redisWriter received error. error=[%v], argv=%v, slots=%v, reply=[%v]", err, e.Argv, e.Slots, reply) + } + } + atomic.AddUint64(&w.UpdateUnansweredBytesCount, ^(e.EncodedSize - 1)) + statistics.UpdateEntryId(e.Id) + statistics.UpdateAOFAppliedOffset(e.Offset) + statistics.UpdateUnansweredBytesCount(atomic.LoadUint64(&w.UpdateUnansweredBytesCount)) + } + } +} diff --git a/tools/codis2pika/scripts/commands/acl-cat.json b/tools/codis2pika/scripts/commands/acl-cat.json new file mode 100644 index 0000000000..a132cbcf41 --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl-cat.json @@ -0,0 +1,24 @@ +{ + "CAT": { + "summary": "List the ACL categories or the commands inside a category", + "complexity": "O(1) since the categories and commands are a fixed set.", + "group": "server", + "since": "6.0.0", + "arity": -2, + "container": "ACL", + "function": "aclCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ], + "arguments": [ + { + "name": "categoryname", + "type": "string", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/acl-deluser.json b/tools/codis2pika/scripts/commands/acl-deluser.json new file mode 100644 index 0000000000..3c61557d49 --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl-deluser.json @@ -0,0 +1,25 @@ +{ + "DELUSER": { + "summary": "Remove the specified ACL users and the associated rules", + "complexity": "O(1) amortized time considering the typical user.", + "group": "server", + "since": "6.0.0", + "arity": -3, + "container": "ACL", + "function": "aclCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ], + "arguments": [ + { + "name": "username", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/acl-dryrun.json b/tools/codis2pika/scripts/commands/acl-dryrun.json new file mode 100644 index 0000000000..544858c3a0 --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl-dryrun.json @@ -0,0 +1,35 @@ +{ + "DRYRUN": { + "summary": "Returns whether the user can execute the given command without executing the command.", + "complexity": "O(1).", + "group": "server", + "since": "7.0.0", + "arity": -4, + "container": "ACL", + "function": "aclCommand", + "history": [], + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ], + "arguments": [ + { + "name": "username", + "type": "string" + }, + { + "name": "command", + "type": "string" + }, + { + "name": "arg", + "type": "string", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/acl-genpass.json b/tools/codis2pika/scripts/commands/acl-genpass.json new file mode 100644 index 0000000000..9de0313ecb --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl-genpass.json @@ -0,0 +1,24 @@ +{ + "GENPASS": { + "summary": "Generate a pseudorandom secure password to use for ACL users", + "complexity": "O(1)", + "group": "server", + "since": "6.0.0", + "arity": -2, + "container": "ACL", + "function": "aclCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ], + "arguments": [ + { + "name": "bits", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/acl-getuser.json b/tools/codis2pika/scripts/commands/acl-getuser.json new file mode 100644 index 0000000000..b87c7f6d49 --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl-getuser.json @@ -0,0 +1,34 @@ +{ + "GETUSER": { + "summary": "Get the rules for a specific ACL user", + "complexity": "O(N). Where N is the number of password, command and pattern rules that the user has.", + "group": "server", + "since": "6.0.0", + "arity": 3, + "container": "ACL", + "function": "aclCommand", + "history": [ + [ + "6.2.0", + "Added Pub/Sub channel patterns." + ], + [ + "7.0.0", + "Added selectors and changed the format of key and channel patterns from a list to their rule representation." + ] + ], + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ], + "arguments": [ + { + "name": "username", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/acl-help.json b/tools/codis2pika/scripts/commands/acl-help.json new file mode 100644 index 0000000000..1cec00a538 --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl-help.json @@ -0,0 +1,16 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "server", + "since": "6.0.0", + "arity": 2, + "container": "ACL", + "function": "aclCommand", + "command_flags": [ + "LOADING", + "STALE", + "SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/acl-list.json b/tools/codis2pika/scripts/commands/acl-list.json new file mode 100644 index 0000000000..f7a740d9de --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl-list.json @@ -0,0 +1,18 @@ +{ + "LIST": { + "summary": "List the current ACL rules in ACL config file format", + "complexity": "O(N). Where N is the number of configured users.", + "group": "server", + "since": "6.0.0", + "arity": 2, + "container": "ACL", + "function": "aclCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/acl-load.json b/tools/codis2pika/scripts/commands/acl-load.json new file mode 100644 index 0000000000..a4f138e48c --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl-load.json @@ -0,0 +1,18 @@ +{ + "LOAD": { + "summary": "Reload the ACLs from the configured ACL file", + "complexity": "O(N). Where N is the number of configured users.", + "group": "server", + "since": "6.0.0", + "arity": 2, + "container": "ACL", + "function": "aclCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/acl-log.json b/tools/codis2pika/scripts/commands/acl-log.json new file mode 100644 index 0000000000..c599685ddc --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl-log.json @@ -0,0 +1,36 @@ +{ + "LOG": { + "summary": "List latest events denied because of ACLs in place", + "complexity": "O(N) with N being the number of entries shown.", + "group": "server", + "since": "6.0.0", + "arity": -2, + "container": "ACL", + "function": "aclCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ], + "arguments": [ + { + "name": "operation", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "count", + "type": "integer" + }, + { + "name": "reset", + "type": "pure-token", + "token": "RESET" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/acl-save.json b/tools/codis2pika/scripts/commands/acl-save.json new file mode 100644 index 0000000000..0c6ee8a1d4 --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl-save.json @@ -0,0 +1,18 @@ +{ + "SAVE": { + "summary": "Save the current ACL rules in the configured ACL file", + "complexity": "O(N). Where N is the number of configured users.", + "group": "server", + "since": "6.0.0", + "arity": 2, + "container": "ACL", + "function": "aclCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/acl-setuser.json b/tools/codis2pika/scripts/commands/acl-setuser.json new file mode 100644 index 0000000000..7f1f308dfd --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl-setuser.json @@ -0,0 +1,40 @@ +{ + "SETUSER": { + "summary": "Modify or create the rules for a specific ACL user", + "complexity": "O(N). Where N is the number of rules provided.", + "group": "server", + "since": "6.0.0", + "arity": -3, + "container": "ACL", + "function": "aclCommand", + "history": [ + [ + "6.2.0", + "Added Pub/Sub channel patterns." + ], + [ + "7.0.0", + "Added selectors and key based permissions." + ] + ], + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ], + "arguments": [ + { + "name": "username", + "type": "string" + }, + { + "name": "rule", + "type": "string", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/acl-users.json b/tools/codis2pika/scripts/commands/acl-users.json new file mode 100644 index 0000000000..5d00edbbf9 --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl-users.json @@ -0,0 +1,18 @@ +{ + "USERS": { + "summary": "List the username of all the configured ACL rules", + "complexity": "O(N). Where N is the number of configured users.", + "group": "server", + "since": "6.0.0", + "arity": 2, + "container": "ACL", + "function": "aclCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/acl-whoami.json b/tools/codis2pika/scripts/commands/acl-whoami.json new file mode 100644 index 0000000000..7c3cc9ace9 --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl-whoami.json @@ -0,0 +1,17 @@ +{ + "WHOAMI": { + "summary": "Return the name of the user associated to the current connection", + "complexity": "O(1)", + "group": "server", + "since": "6.0.0", + "arity": 2, + "container": "ACL", + "function": "aclCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/acl.json b/tools/codis2pika/scripts/commands/acl.json new file mode 100644 index 0000000000..3c9141abc6 --- /dev/null +++ b/tools/codis2pika/scripts/commands/acl.json @@ -0,0 +1,12 @@ +{ + "ACL": { + "summary": "A container for Access List Control commands ", + "complexity": "Depends on subcommand.", + "group": "server", + "since": "6.0.0", + "arity": -2, + "command_flags": [ + "SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/append.json b/tools/codis2pika/scripts/commands/append.json new file mode 100644 index 0000000000..77d170541e --- /dev/null +++ b/tools/codis2pika/scripts/commands/append.json @@ -0,0 +1,49 @@ +{ + "APPEND": { + "summary": "Append a value to a key", + "complexity": "O(1). The amortized time complexity is O(1) assuming the appended value is small and the already present value is of any size, since the dynamic string library used by Redis will double the free space available on every reallocation.", + "group": "string", + "since": "2.0.0", + "arity": 3, + "function": "appendCommand", + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "value", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/asking.json b/tools/codis2pika/scripts/commands/asking.json new file mode 100644 index 0000000000..a825804d1b --- /dev/null +++ b/tools/codis2pika/scripts/commands/asking.json @@ -0,0 +1,16 @@ +{ + "ASKING": { + "summary": "Sent by cluster clients after an -ASK redirect", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": 1, + "function": "askingCommand", + "command_flags": [ + "FAST" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/auth.json b/tools/codis2pika/scripts/commands/auth.json new file mode 100644 index 0000000000..ff5e4b2851 --- /dev/null +++ b/tools/codis2pika/scripts/commands/auth.json @@ -0,0 +1,40 @@ +{ + "AUTH": { + "summary": "Authenticate to the server", + "complexity": "O(N) where N is the number of passwords defined for the user", + "group": "connection", + "since": "1.0.0", + "arity": -2, + "function": "authCommand", + "history": [ + [ + "6.0.0", + "Added ACL style (username and password)." + ] + ], + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE", + "FAST", + "NO_AUTH", + "SENTINEL", + "ALLOW_BUSY" + ], + "acl_categories": [ + "CONNECTION" + ], + "arguments": [ + { + "name": "username", + "type": "string", + "optional": true, + "since": "6.0.0" + }, + { + "name": "password", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/bgrewriteaof.json b/tools/codis2pika/scripts/commands/bgrewriteaof.json new file mode 100644 index 0000000000..27d64dd932 --- /dev/null +++ b/tools/codis2pika/scripts/commands/bgrewriteaof.json @@ -0,0 +1,15 @@ +{ + "BGREWRITEAOF": { + "summary": "Asynchronously rewrite the append-only file", + "complexity": "O(1)", + "group": "server", + "since": "1.0.0", + "arity": 1, + "function": "bgrewriteaofCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "NOSCRIPT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/bgsave.json b/tools/codis2pika/scripts/commands/bgsave.json new file mode 100644 index 0000000000..4b645db4b0 --- /dev/null +++ b/tools/codis2pika/scripts/commands/bgsave.json @@ -0,0 +1,30 @@ +{ + "BGSAVE": { + "summary": "Asynchronously save the dataset to disk", + "complexity": "O(1)", + "group": "server", + "since": "1.0.0", + "arity": -1, + "function": "bgsaveCommand", + "history": [ + [ + "3.2.2", + "Added the `SCHEDULE` option." + ] + ], + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "NOSCRIPT" + ], + "arguments": [ + { + "name": "schedule", + "token": "SCHEDULE", + "type": "pure-token", + "optional": true, + "since": "3.2.2" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/bitcount.json b/tools/codis2pika/scripts/commands/bitcount.json new file mode 100644 index 0000000000..c852609182 --- /dev/null +++ b/tools/codis2pika/scripts/commands/bitcount.json @@ -0,0 +1,82 @@ +{ + "BITCOUNT": { + "summary": "Count set bits in a string", + "complexity": "O(N)", + "group": "bitmap", + "since": "2.6.0", + "arity": -2, + "function": "bitcountCommand", + "history": [ + [ + "7.0.0", + "Added the `BYTE|BIT` option." + ] + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "BITMAP" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "index", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "start", + "type": "integer" + }, + { + "name": "end", + "type": "integer" + }, + { + "name": "index_unit", + "type": "oneof", + "optional": true, + "since": "7.0.0", + "arguments": [ + { + "name": "byte", + "type": "pure-token", + "token": "BYTE" + }, + { + "name": "bit", + "type": "pure-token", + "token": "BIT" + } + ] + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/bitfield.json b/tools/codis2pika/scripts/commands/bitfield.json new file mode 100644 index 0000000000..d1bec969e8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/bitfield.json @@ -0,0 +1,143 @@ +{ + "BITFIELD": { + "summary": "Perform arbitrary bitfield integer operations on strings", + "complexity": "O(1) for each subcommand specified", + "group": "bitmap", + "since": "3.2.0", + "arity": -2, + "function": "bitfieldCommand", + "get_keys_function": "bitfieldGetKeys", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "BITMAP" + ], + "key_specs": [ + { + "notes": "This command allows both access and modification of the key", + "flags": [ + "RW", + "UPDATE", + "ACCESS", + "VARIABLE_FLAGS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "operation", + "type": "oneof", + "multiple": true, + "arguments": [ + { + "token": "GET", + "name": "encoding_offset", + "type": "block", + "arguments": [ + { + "name": "encoding", + "type": "string" + }, + { + "name": "offset", + "type": "integer" + } + ] + }, + { + "name": "write", + "type": "block", + "arguments": [ + { + "token": "OVERFLOW", + "name": "wrap_sat_fail", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "wrap", + "type": "pure-token", + "token": "WRAP" + }, + { + "name": "sat", + "type": "pure-token", + "token": "SAT" + }, + { + "name": "fail", + "type": "pure-token", + "token": "FAIL" + } + ] + }, + { + "name": "write_operation", + "type": "oneof", + "arguments": [ + { + "token": "SET", + "name": "encoding_offset_value", + "type": "block", + "arguments": [ + { + "name": "encoding", + "type": "string" + }, + { + "name": "offset", + "type": "integer" + }, + { + "name": "value", + "type": "integer" + } + ] + }, + { + "token": "INCRBY", + "name": "encoding_offset_increment", + "type": "block", + "arguments": [ + { + "name": "encoding", + "type": "string" + }, + { + "name": "offset", + "type": "integer" + }, + { + "name": "increment", + "type": "integer" + } + ] + } + ] + } + ] + } + ] + } + ] + } +} \ No newline at end of file diff --git a/tools/codis2pika/scripts/commands/bitfield_ro.json b/tools/codis2pika/scripts/commands/bitfield_ro.json new file mode 100644 index 0000000000..99e3c98607 --- /dev/null +++ b/tools/codis2pika/scripts/commands/bitfield_ro.json @@ -0,0 +1,61 @@ +{ + "BITFIELD_RO": { + "summary": "Perform arbitrary bitfield integer operations on strings. Read-only variant of BITFIELD", + "complexity": "O(1) for each subcommand specified", + "group": "bitmap", + "since": "6.0.0", + "arity": -2, + "function": "bitfieldroCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "BITMAP" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "token": "GET", + "name": "encoding_offset", + "type": "block", + "multiple": true, + "multiple_token": true, + "arguments": [ + { + "name": "encoding", + "type": "string" + }, + { + "name": "offset", + "type": "integer" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/bitop.json b/tools/codis2pika/scripts/commands/bitop.json new file mode 100644 index 0000000000..7cca1f0b0a --- /dev/null +++ b/tools/codis2pika/scripts/commands/bitop.json @@ -0,0 +1,72 @@ +{ + "BITOP": { + "summary": "Perform bitwise operations between strings", + "complexity": "O(N)", + "group": "bitmap", + "since": "2.6.0", + "arity": -4, + "function": "bitopCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "BITMAP" + ], + "key_specs": [ + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 3 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "operation", + "type": "string" + }, + { + "name": "destkey", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/bitpos.json b/tools/codis2pika/scripts/commands/bitpos.json new file mode 100644 index 0000000000..b8d2bf03b1 --- /dev/null +++ b/tools/codis2pika/scripts/commands/bitpos.json @@ -0,0 +1,93 @@ +{ + "BITPOS": { + "summary": "Find first bit set or clear in a string", + "complexity": "O(N)", + "group": "bitmap", + "since": "2.8.7", + "arity": -3, + "function": "bitposCommand", + "history": [ + [ + "7.0.0", + "Added the `BYTE|BIT` option." + ] + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "BITMAP" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "bit", + "type": "integer" + }, + { + "name": "index", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "start", + "type": "integer" + }, + { + "name": "end_index", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "end", + "type": "integer" + }, + { + "name": "index_unit", + "type": "oneof", + "optional": true, + "since": "7.0.0", + "arguments": [ + { + "name": "byte", + "type": "pure-token", + "token": "BYTE" + }, + { + "name": "bit", + "type": "pure-token", + "token": "BIT" + } + ] + } + ] + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/blmove.json b/tools/codis2pika/scripts/commands/blmove.json new file mode 100644 index 0000000000..62036147bc --- /dev/null +++ b/tools/codis2pika/scripts/commands/blmove.json @@ -0,0 +1,106 @@ +{ + "BLMOVE": { + "summary": "Pop an element from a list, push it to another list and return it; or block until one is available", + "complexity": "O(1)", + "group": "list", + "since": "6.2.0", + "arity": 6, + "function": "blmoveCommand", + "command_flags": [ + "WRITE", + "DENYOOM", + "NOSCRIPT", + "BLOCKING" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "source", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "destination", + "type": "key", + "key_spec_index": 1 + }, + { + "name": "wherefrom", + "type": "oneof", + "arguments": [ + { + "name": "left", + "type": "pure-token", + "token": "LEFT" + }, + { + "name": "right", + "type": "pure-token", + "token": "RIGHT" + } + ] + }, + { + "name": "whereto", + "type": "oneof", + "arguments": [ + { + "name": "left", + "type": "pure-token", + "token": "LEFT" + }, + { + "name": "right", + "type": "pure-token", + "token": "RIGHT" + } + ] + }, + { + "name": "timeout", + "type": "double" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/blmpop.json b/tools/codis2pika/scripts/commands/blmpop.json new file mode 100644 index 0000000000..29d381ad86 --- /dev/null +++ b/tools/codis2pika/scripts/commands/blmpop.json @@ -0,0 +1,77 @@ +{ + "BLMPOP": { + "summary": "Pop elements from a list, or block until one is available", + "complexity": "O(N+M) where N is the number of provided keys and M is the number of elements returned.", + "group": "list", + "since": "7.0.0", + "arity": -5, + "function": "blmpopCommand", + "get_keys_function": "blmpopGetKeys", + "command_flags": [ + "WRITE", + "BLOCKING" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "timeout", + "type": "double" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "where", + "type": "oneof", + "arguments": [ + { + "name": "left", + "type": "pure-token", + "token": "LEFT" + }, + { + "name": "right", + "type": "pure-token", + "token": "RIGHT" + } + ] + }, + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/blpop.json b/tools/codis2pika/scripts/commands/blpop.json new file mode 100644 index 0000000000..6871654812 --- /dev/null +++ b/tools/codis2pika/scripts/commands/blpop.json @@ -0,0 +1,57 @@ +{ + "BLPOP": { + "summary": "Remove and get the first element in a list, or block until one is available", + "complexity": "O(N) where N is the number of provided keys.", + "group": "list", + "since": "2.0.0", + "arity": -3, + "function": "blpopCommand", + "history": [ + [ + "6.0.0", + "`timeout` is interpreted as a double instead of an integer." + ] + ], + "command_flags": [ + "WRITE", + "NOSCRIPT", + "BLOCKING" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -2, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "timeout", + "type": "double" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/brpop.json b/tools/codis2pika/scripts/commands/brpop.json new file mode 100644 index 0000000000..8f65202dd3 --- /dev/null +++ b/tools/codis2pika/scripts/commands/brpop.json @@ -0,0 +1,57 @@ +{ + "BRPOP": { + "summary": "Remove and get the last element in a list, or block until one is available", + "complexity": "O(N) where N is the number of provided keys.", + "group": "list", + "since": "2.0.0", + "arity": -3, + "function": "brpopCommand", + "history": [ + [ + "6.0.0", + "`timeout` is interpreted as a double instead of an integer." + ] + ], + "command_flags": [ + "WRITE", + "NOSCRIPT", + "BLOCKING" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -2, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "timeout", + "type": "double" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/brpoplpush.json b/tools/codis2pika/scripts/commands/brpoplpush.json new file mode 100644 index 0000000000..7f8d11aba9 --- /dev/null +++ b/tools/codis2pika/scripts/commands/brpoplpush.json @@ -0,0 +1,85 @@ +{ + "BRPOPLPUSH": { + "summary": "Pop an element from a list, push it to another list and return it; or block until one is available", + "complexity": "O(1)", + "group": "list", + "since": "2.2.0", + "arity": 4, + "function": "brpoplpushCommand", + "history": [ + [ + "6.0.0", + "`timeout` is interpreted as a double instead of an integer." + ] + ], + "deprecated_since": "6.2.0", + "replaced_by": "`BLMOVE` with the `RIGHT` and `LEFT` arguments", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "NOSCRIPT", + "BLOCKING" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "source", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "destination", + "type": "key", + "key_spec_index": 1 + }, + { + "name": "timeout", + "type": "double" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/bzmpop.json b/tools/codis2pika/scripts/commands/bzmpop.json new file mode 100644 index 0000000000..d5ba57c586 --- /dev/null +++ b/tools/codis2pika/scripts/commands/bzmpop.json @@ -0,0 +1,77 @@ +{ + "BZMPOP": { + "summary": "Remove and return members with scores in a sorted set or block until one is available", + "complexity": "O(K) + O(N*log(M)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.", + "group": "sorted_set", + "since": "7.0.0", + "arity": -5, + "function": "bzmpopCommand", + "get_keys_function": "blmpopGetKeys", + "command_flags": [ + "WRITE", + "BLOCKING" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "timeout", + "type": "double" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "where", + "type": "oneof", + "arguments": [ + { + "name": "min", + "type": "pure-token", + "token": "MIN" + }, + { + "name": "max", + "type": "pure-token", + "token": "MAX" + } + ] + }, + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/bzpopmax.json b/tools/codis2pika/scripts/commands/bzpopmax.json new file mode 100644 index 0000000000..5ca53aa2a0 --- /dev/null +++ b/tools/codis2pika/scripts/commands/bzpopmax.json @@ -0,0 +1,58 @@ +{ + "BZPOPMAX": { + "summary": "Remove and return the member with the highest score from one or more sorted sets, or block until one is available", + "complexity": "O(log(N)) with N being the number of elements in the sorted set.", + "group": "sorted_set", + "since": "5.0.0", + "arity": -3, + "function": "bzpopmaxCommand", + "history": [ + [ + "6.0.0", + "`timeout` is interpreted as a double instead of an integer." + ] + ], + "command_flags": [ + "WRITE", + "NOSCRIPT", + "FAST", + "BLOCKING" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -2, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "timeout", + "type": "double" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/bzpopmin.json b/tools/codis2pika/scripts/commands/bzpopmin.json new file mode 100644 index 0000000000..742a2310ce --- /dev/null +++ b/tools/codis2pika/scripts/commands/bzpopmin.json @@ -0,0 +1,58 @@ +{ + "BZPOPMIN": { + "summary": "Remove and return the member with the lowest score from one or more sorted sets, or block until one is available", + "complexity": "O(log(N)) with N being the number of elements in the sorted set.", + "group": "sorted_set", + "since": "5.0.0", + "arity": -3, + "function": "bzpopminCommand", + "history": [ + [ + "6.0.0", + "`timeout` is interpreted as a double instead of an integer." + ] + ], + "command_flags": [ + "WRITE", + "NOSCRIPT", + "FAST", + "BLOCKING" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -2, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "timeout", + "type": "double" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-caching.json b/tools/codis2pika/scripts/commands/client-caching.json new file mode 100644 index 0000000000..de49cfc397 --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-caching.json @@ -0,0 +1,37 @@ +{ + "CACHING": { + "summary": "Instruct the server about tracking or not keys in the next request", + "complexity": "O(1)", + "group": "connection", + "since": "6.0.0", + "arity": 3, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "arguments": [ + { + "name": "mode", + "type": "oneof", + "arguments": [ + { + "name": "yes", + "type": "pure-token", + "token": "YES" + }, + { + "name": "no", + "type": "pure-token", + "token": "NO" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-getname.json b/tools/codis2pika/scripts/commands/client-getname.json new file mode 100644 index 0000000000..89a62e872a --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-getname.json @@ -0,0 +1,19 @@ +{ + "GETNAME": { + "summary": "Get the current connection name", + "complexity": "O(1)", + "group": "connection", + "since": "2.6.9", + "arity": 2, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-getredir.json b/tools/codis2pika/scripts/commands/client-getredir.json new file mode 100644 index 0000000000..2313d07131 --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-getredir.json @@ -0,0 +1,19 @@ +{ + "GETREDIR": { + "summary": "Get tracking notifications redirection client ID if any", + "complexity": "O(1)", + "group": "connection", + "since": "6.0.0", + "arity": 2, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-help.json b/tools/codis2pika/scripts/commands/client-help.json new file mode 100644 index 0000000000..6f725bae6d --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-help.json @@ -0,0 +1,18 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "connection", + "since": "5.0.0", + "arity": 2, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-id.json b/tools/codis2pika/scripts/commands/client-id.json new file mode 100644 index 0000000000..e05c8c62a3 --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-id.json @@ -0,0 +1,19 @@ +{ + "ID": { + "summary": "Returns the client ID for the current connection", + "complexity": "O(1)", + "group": "connection", + "since": "5.0.0", + "arity": 2, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-info.json b/tools/codis2pika/scripts/commands/client-info.json new file mode 100644 index 0000000000..2668eaf14a --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-info.json @@ -0,0 +1,22 @@ +{ + "INFO": { + "summary": "Returns information about the current client connection.", + "complexity": "O(1)", + "group": "connection", + "since": "6.2.0", + "arity": 2, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-kill.json b/tools/codis2pika/scripts/commands/client-kill.json new file mode 100644 index 0000000000..398738bb29 --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-kill.json @@ -0,0 +1,117 @@ +{ + "KILL": { + "summary": "Kill the connection of a client", + "complexity": "O(N) where N is the number of client connections", + "group": "connection", + "since": "2.4.0", + "arity": -3, + "container": "CLIENT", + "function": "clientCommand", + "history": [ + [ + "2.8.12", + "Added new filter format." + ], + [ + "2.8.12", + "`ID` option." + ], + [ + "3.2.0", + "Added `master` type in for `TYPE` option." + ], + [ + "5.0.0", + "Replaced `slave` `TYPE` with `replica`. `slave` still supported for backward compatibility." + ], + [ + "6.2.0", + "`LADDR` option." + ] + ], + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "arguments": [ + { + "name": "ip:port", + "type": "string", + "optional": true + }, + { + "token": "ID", + "name": "client-id", + "type": "integer", + "optional": true, + "since": "2.8.12" + }, + { + "token": "TYPE", + "name": "normal_master_slave_pubsub", + "type": "oneof", + "optional": true, + "since": "2.8.12", + "arguments": [ + { + "name": "normal", + "type": "pure-token", + "token": "normal" + }, + { + "name": "master", + "type": "pure-token", + "token": "master", + "since": "3.2.0" + }, + { + "name": "slave", + "type": "pure-token", + "token": "slave" + }, + { + "name": "replica", + "type": "pure-token", + "token": "replica", + "since": "5.0.0" + }, + { + "name": "pubsub", + "type": "pure-token", + "token": "pubsub" + } + ] + }, + { + "token": "USER", + "name": "username", + "type": "string", + "optional": true + }, + { + "token": "ADDR", + "name": "ip:port", + "type": "string", + "optional": true + }, + { + "token": "LADDR", + "name": "ip:port", + "type": "string", + "optional": true, + "since": "6.2.0" + }, + { + "token": "SKIPME", + "name": "yes/no", + "type": "string", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-list.json b/tools/codis2pika/scripts/commands/client-list.json new file mode 100644 index 0000000000..bcd1dcefb8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-list.json @@ -0,0 +1,82 @@ +{ + "LIST": { + "summary": "Get the list of client connections", + "complexity": "O(N) where N is the number of client connections", + "group": "connection", + "since": "2.4.0", + "arity": -2, + "container": "CLIENT", + "function": "clientCommand", + "history": [ + [ + "2.8.12", + "Added unique client `id` field." + ], + [ + "5.0.0", + "Added optional `TYPE` filter." + ], + [ + "6.2.0", + "Added `laddr` field and the optional `ID` filter." + ] + ], + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "token": "TYPE", + "name": "normal_master_replica_pubsub", + "type": "oneof", + "optional": true, + "since": "5.0.0", + "arguments": [ + { + "name": "normal", + "type": "pure-token", + "token": "normal" + }, + { + "name": "master", + "type": "pure-token", + "token": "master" + }, + { + "name": "replica", + "type": "pure-token", + "token": "replica" + }, + { + "name": "pubsub", + "type": "pure-token", + "token": "pubsub" + } + ] + }, + { + "name": "id", + "token": "ID", + "type": "block", + "optional": true, + "since": "6.2.0", + "arguments": [ + { + "name": "client-id", + "type": "integer", + "multiple": true + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-no-evict.json b/tools/codis2pika/scripts/commands/client-no-evict.json new file mode 100644 index 0000000000..af99348d28 --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-no-evict.json @@ -0,0 +1,38 @@ +{ + "NO-EVICT": { + "summary": "Set client eviction mode for the current connection", + "complexity": "O(1)", + "group": "connection", + "since": "7.0.0", + "arity": 3, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "arguments": [ + { + "name": "enabled", + "type": "oneof", + "arguments": [ + { + "name": "on", + "type": "pure-token", + "token": "ON" + }, + { + "name": "off", + "type": "pure-token", + "token": "OFF" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-pause.json b/tools/codis2pika/scripts/commands/client-pause.json new file mode 100644 index 0000000000..3a1d9be833 --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-pause.json @@ -0,0 +1,50 @@ +{ + "PAUSE": { + "summary": "Stop processing commands from clients for some time", + "complexity": "O(1)", + "group": "connection", + "since": "2.9.50", + "arity": -3, + "container": "CLIENT", + "function": "clientCommand", + "history": [ + [ + "6.2.0", + "`CLIENT PAUSE WRITE` mode added along with the `mode` option." + ] + ], + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "arguments": [ + { + "name": "timeout", + "type": "integer" + }, + { + "name": "mode", + "type": "oneof", + "optional": true, + "since": "6.2.0", + "arguments": [ + { + "name": "write", + "type": "pure-token", + "token": "WRITE" + }, + { + "name": "all", + "type": "pure-token", + "token": "ALL" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-reply.json b/tools/codis2pika/scripts/commands/client-reply.json new file mode 100644 index 0000000000..59128ca3af --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-reply.json @@ -0,0 +1,42 @@ +{ + "REPLY": { + "summary": "Instruct the server whether to reply to commands", + "complexity": "O(1)", + "group": "connection", + "since": "3.2.0", + "arity": 3, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "arguments": [ + { + "name": "on_off_skip", + "type": "oneof", + "arguments": [ + { + "name": "on", + "type": "pure-token", + "token": "ON" + }, + { + "name": "off", + "type": "pure-token", + "token": "OFF" + }, + { + "name": "skip", + "type": "pure-token", + "token": "SKIP" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-setname.json b/tools/codis2pika/scripts/commands/client-setname.json new file mode 100644 index 0000000000..58e5f446da --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-setname.json @@ -0,0 +1,25 @@ +{ + "SETNAME": { + "summary": "Set the current connection name", + "complexity": "O(1)", + "group": "connection", + "since": "2.6.9", + "arity": 3, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "arguments": [ + { + "name": "connection-name", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-tracking.json b/tools/codis2pika/scripts/commands/client-tracking.json new file mode 100644 index 0000000000..06f1df9445 --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-tracking.json @@ -0,0 +1,75 @@ +{ + "TRACKING": { + "summary": "Enable or disable server assisted client side caching support", + "complexity": "O(1). Some options may introduce additional complexity.", + "group": "connection", + "since": "6.0.0", + "arity": -3, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "arguments": [ + { + "name": "status", + "type": "oneof", + "arguments": [ + { + "name": "on", + "type": "pure-token", + "token": "ON" + }, + { + "name": "off", + "type": "pure-token", + "token": "OFF" + } + ] + }, + { + "token": "REDIRECT", + "name": "client-id", + "type": "integer", + "optional": true + }, + { + "token": "PREFIX", + "name": "prefix", + "type": "string", + "optional": true, + "multiple": true, + "multiple_token": true + }, + { + "name": "BCAST", + "token": "BCAST", + "type": "pure-token", + "optional": true + }, + { + "name": "OPTIN", + "token": "OPTIN", + "type": "pure-token", + "optional": true + }, + { + "name": "OPTOUT", + "token": "OPTOUT", + "type": "pure-token", + "optional": true + }, + { + "name": "NOLOOP", + "token": "NOLOOP", + "type": "pure-token", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-trackinginfo.json b/tools/codis2pika/scripts/commands/client-trackinginfo.json new file mode 100644 index 0000000000..0669f9a274 --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-trackinginfo.json @@ -0,0 +1,19 @@ +{ + "TRACKINGINFO": { + "summary": "Return information about server assisted client side caching for the current connection", + "complexity": "O(1)", + "group": "connection", + "since": "6.2.0", + "arity": 2, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-unblock.json b/tools/codis2pika/scripts/commands/client-unblock.json new file mode 100644 index 0000000000..c6690730b4 --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-unblock.json @@ -0,0 +1,43 @@ +{ + "UNBLOCK": { + "summary": "Unblock a client blocked in a blocking command from a different connection", + "complexity": "O(log N) where N is the number of client connections", + "group": "connection", + "since": "5.0.0", + "arity": -3, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "arguments": [ + { + "name": "client-id", + "type": "integer" + }, + { + "name": "timeout_error", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "timeout", + "type": "pure-token", + "token": "TIMEOUT" + }, + { + "name": "error", + "type": "pure-token", + "token": "ERROR" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client-unpause.json b/tools/codis2pika/scripts/commands/client-unpause.json new file mode 100644 index 0000000000..c25fd0466b --- /dev/null +++ b/tools/codis2pika/scripts/commands/client-unpause.json @@ -0,0 +1,20 @@ +{ + "UNPAUSE": { + "summary": "Resume processing of clients that were paused", + "complexity": "O(N) Where N is the number of paused clients", + "group": "connection", + "since": "6.2.0", + "arity": 2, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/client.json b/tools/codis2pika/scripts/commands/client.json new file mode 100644 index 0000000000..dfe2a9bc72 --- /dev/null +++ b/tools/codis2pika/scripts/commands/client.json @@ -0,0 +1,12 @@ +{ + "CLIENT": { + "summary": "A container for client connection commands", + "complexity": "Depends on subcommand.", + "group": "connection", + "since": "2.4.0", + "arity": -2, + "command_flags": [ + "SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-addslots.json b/tools/codis2pika/scripts/commands/cluster-addslots.json new file mode 100644 index 0000000000..518d67e096 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-addslots.json @@ -0,0 +1,26 @@ +{ + "ADDSLOTS": { + "summary": "Assign new hash slots to receiving node", + "complexity": "O(N) where N is the total number of hash slot arguments", + "group": "cluster", + "since": "3.0.0", + "arity": -3, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "slot", + "type": "integer", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-addslotsrange.json b/tools/codis2pika/scripts/commands/cluster-addslotsrange.json new file mode 100644 index 0000000000..ee58776d39 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-addslotsrange.json @@ -0,0 +1,36 @@ +{ + "ADDSLOTSRANGE": { + "summary": "Assign new hash slots to receiving node", + "complexity": "O(N) where N is the total number of the slots between the start slot and end slot arguments.", + "group": "cluster", + "since": "7.0.0", + "arity": -4, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "start-slot_end-slot", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "start-slot", + "type": "integer" + }, + { + "name": "end-slot", + "type": "integer" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-bumpepoch.json b/tools/codis2pika/scripts/commands/cluster-bumpepoch.json new file mode 100644 index 0000000000..66dc28f0a7 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-bumpepoch.json @@ -0,0 +1,19 @@ +{ + "BUMPEPOCH": { + "summary": "Advance the cluster config epoch", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": 2, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-count-failure-reports.json b/tools/codis2pika/scripts/commands/cluster-count-failure-reports.json new file mode 100644 index 0000000000..bf25bc2648 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-count-failure-reports.json @@ -0,0 +1,24 @@ +{ + "COUNT-FAILURE-REPORTS": { + "summary": "Return the number of failure reports active for a given node", + "complexity": "O(N) where N is the number of failure reports", + "group": "cluster", + "since": "3.0.0", + "arity": 3, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-countkeysinslot.json b/tools/codis2pika/scripts/commands/cluster-countkeysinslot.json new file mode 100644 index 0000000000..5a94646265 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-countkeysinslot.json @@ -0,0 +1,23 @@ +{ + "COUNTKEYSINSLOT": { + "summary": "Return the number of local keys in the specified hash slot", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": 3, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "slot", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-delslots.json b/tools/codis2pika/scripts/commands/cluster-delslots.json new file mode 100644 index 0000000000..862299b7ee --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-delslots.json @@ -0,0 +1,26 @@ +{ + "DELSLOTS": { + "summary": "Set hash slots as unbound in receiving node", + "complexity": "O(N) where N is the total number of hash slot arguments", + "group": "cluster", + "since": "3.0.0", + "arity": -3, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "slot", + "type": "integer", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-delslotsrange.json b/tools/codis2pika/scripts/commands/cluster-delslotsrange.json new file mode 100644 index 0000000000..d13bf0ab8d --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-delslotsrange.json @@ -0,0 +1,36 @@ +{ + "DELSLOTSRANGE": { + "summary": "Set hash slots as unbound in receiving node", + "complexity": "O(N) where N is the total number of the slots between the start slot and end slot arguments.", + "group": "cluster", + "since": "7.0.0", + "arity": -4, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "start-slot_end-slot", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "start-slot", + "type": "integer" + }, + { + "name": "end-slot", + "type": "integer" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-failover.json b/tools/codis2pika/scripts/commands/cluster-failover.json new file mode 100644 index 0000000000..c57a1861bd --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-failover.json @@ -0,0 +1,38 @@ +{ + "FAILOVER": { + "summary": "Forces a replica to perform a manual failover of its master.", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": -2, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "options", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "force", + "type": "pure-token", + "token": "FORCE" + }, + { + "name": "takeover", + "type": "pure-token", + "token": "TAKEOVER" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-flushslots.json b/tools/codis2pika/scripts/commands/cluster-flushslots.json new file mode 100644 index 0000000000..2478713d8c --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-flushslots.json @@ -0,0 +1,19 @@ +{ + "FLUSHSLOTS": { + "summary": "Delete a node's own slots information", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": 2, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-forget.json b/tools/codis2pika/scripts/commands/cluster-forget.json new file mode 100644 index 0000000000..8991f76667 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-forget.json @@ -0,0 +1,25 @@ +{ + "FORGET": { + "summary": "Remove a node from the nodes table", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": 3, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-getkeysinslot.json b/tools/codis2pika/scripts/commands/cluster-getkeysinslot.json new file mode 100644 index 0000000000..97c3a3b8ee --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-getkeysinslot.json @@ -0,0 +1,27 @@ +{ + "GETKEYSINSLOT": { + "summary": "Return local key names in the specified hash slot", + "complexity": "O(log(N)) where N is the number of requested keys", + "group": "cluster", + "since": "3.0.0", + "arity": 4, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "slot", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-help.json b/tools/codis2pika/scripts/commands/cluster-help.json new file mode 100644 index 0000000000..d0ddf11f28 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-help.json @@ -0,0 +1,15 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "cluster", + "since": "5.0.0", + "arity": 2, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "LOADING", + "STALE" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-info.json b/tools/codis2pika/scripts/commands/cluster-info.json new file mode 100644 index 0000000000..08250f15b8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-info.json @@ -0,0 +1,17 @@ +{ + "INFO": { + "summary": "Provides info about Redis Cluster node state", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": 2, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-keyslot.json b/tools/codis2pika/scripts/commands/cluster-keyslot.json new file mode 100644 index 0000000000..d078aa3c4a --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-keyslot.json @@ -0,0 +1,23 @@ +{ + "KEYSLOT": { + "summary": "Returns the hash slot of the specified key", + "complexity": "O(N) where N is the number of bytes in the key", + "group": "cluster", + "since": "3.0.0", + "arity": 3, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "key", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-links.json b/tools/codis2pika/scripts/commands/cluster-links.json new file mode 100644 index 0000000000..4d50247645 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-links.json @@ -0,0 +1,17 @@ +{ + "LINKS": { + "summary": "Returns a list of all TCP links to and from peer nodes in cluster", + "complexity": "O(N) where N is the total number of Cluster nodes", + "group": "cluster", + "since": "7.0.0", + "arity": 2, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-meet.json b/tools/codis2pika/scripts/commands/cluster-meet.json new file mode 100644 index 0000000000..c36b7007ef --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-meet.json @@ -0,0 +1,29 @@ +{ + "MEET": { + "summary": "Force a node cluster to handshake with another node", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": -4, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "ip", + "type": "string" + }, + { + "name": "port", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-myid.json b/tools/codis2pika/scripts/commands/cluster-myid.json new file mode 100644 index 0000000000..ccb94eb13b --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-myid.json @@ -0,0 +1,17 @@ +{ + "MYID": { + "summary": "Return the node id", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": 2, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-nodes.json b/tools/codis2pika/scripts/commands/cluster-nodes.json new file mode 100644 index 0000000000..9452139853 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-nodes.json @@ -0,0 +1,17 @@ +{ + "NODES": { + "summary": "Get Cluster config for the node", + "complexity": "O(N) where N is the total number of Cluster nodes", + "group": "cluster", + "since": "3.0.0", + "arity": 2, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-replicas.json b/tools/codis2pika/scripts/commands/cluster-replicas.json new file mode 100644 index 0000000000..e86322bc16 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-replicas.json @@ -0,0 +1,24 @@ +{ + "REPLICAS": { + "summary": "List replica nodes of the specified master node", + "complexity": "O(1)", + "group": "cluster", + "since": "5.0.0", + "arity": 3, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-replicate.json b/tools/codis2pika/scripts/commands/cluster-replicate.json new file mode 100644 index 0000000000..87c6cc9aa8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-replicate.json @@ -0,0 +1,25 @@ +{ + "REPLICATE": { + "summary": "Reconfigure a node as a replica of the specified master node", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": 3, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-reset.json b/tools/codis2pika/scripts/commands/cluster-reset.json new file mode 100644 index 0000000000..b7d675cd80 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-reset.json @@ -0,0 +1,38 @@ +{ + "RESET": { + "summary": "Reset a Redis Cluster node", + "complexity": "O(N) where N is the number of known nodes. The command may execute a FLUSHALL as a side effect.", + "group": "cluster", + "since": "3.0.0", + "arity": 3, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "ADMIN", + "STALE", + "NOSCRIPT" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "hard_soft", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "hard", + "type": "pure-token", + "token": "HARD" + }, + { + "name": "soft", + "type": "pure-token", + "token": "SOFT" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-saveconfig.json b/tools/codis2pika/scripts/commands/cluster-saveconfig.json new file mode 100644 index 0000000000..1bf49e7f73 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-saveconfig.json @@ -0,0 +1,19 @@ +{ + "SAVECONFIG": { + "summary": "Forces the node to save cluster state on disk", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": 2, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-set-config-epoch.json b/tools/codis2pika/scripts/commands/cluster-set-config-epoch.json new file mode 100644 index 0000000000..2f819052d7 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-set-config-epoch.json @@ -0,0 +1,25 @@ +{ + "SET-CONFIG-EPOCH": { + "summary": "Set the configuration epoch in a new node", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": 3, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "config-epoch", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-setslot.json b/tools/codis2pika/scripts/commands/cluster-setslot.json new file mode 100644 index 0000000000..5d1aa45fc5 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-setslot.json @@ -0,0 +1,51 @@ +{ + "SETSLOT": { + "summary": "Bind a hash slot to a specific node", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": -4, + "container": "CLUSTER", + "function": "clusterCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "slot", + "type": "integer" + }, + { + "name": "subcommand", + "type": "oneof", + "arguments": [ + { + "name": "node-id", + "type": "string", + "token": "IMPORTING" + }, + { + "name": "node-id", + "type": "string", + "token": "MIGRATING" + }, + { + "name": "node-id", + "type": "string", + "token": "NODE" + }, + { + "name": "stable", + "type": "pure-token", + "token": "STABLE" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-shards.json b/tools/codis2pika/scripts/commands/cluster-shards.json new file mode 100644 index 0000000000..925ce8bf30 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-shards.json @@ -0,0 +1,18 @@ +{ + "SHARDS": { + "summary": "Get array of cluster slots to node mappings", + "complexity": "O(N) where N is the total number of cluster nodes", + "group": "cluster", + "since": "7.0.0", + "arity": 2, + "container": "CLUSTER", + "function": "clusterCommand", + "history": [], + "command_flags": [ + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-slaves.json b/tools/codis2pika/scripts/commands/cluster-slaves.json new file mode 100644 index 0000000000..0ea77a8765 --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-slaves.json @@ -0,0 +1,29 @@ +{ + "SLAVES": { + "summary": "List replica nodes of the specified master node", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": 3, + "container": "CLUSTER", + "function": "clusterCommand", + "deprecated_since": "5.0.0", + "replaced_by": "`CLUSTER REPLICAS`", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "ADMIN", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster-slots.json b/tools/codis2pika/scripts/commands/cluster-slots.json new file mode 100644 index 0000000000..e8782420ec --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster-slots.json @@ -0,0 +1,32 @@ +{ + "SLOTS": { + "summary": "Get array of Cluster slot to node mappings", + "complexity": "O(N) where N is the total number of Cluster nodes", + "group": "cluster", + "since": "3.0.0", + "arity": 2, + "container": "CLUSTER", + "function": "clusterCommand", + "deprecated_since": "7.0.0", + "replaced_by": "`CLUSTER SHARDS`", + "doc_flags": [ + "DEPRECATED" + ], + "history": [ + [ + "4.0.0", + "Added node IDs." + ], + [ + "7.0.0", + "Added additional networking metadata field." + ] + ], + "command_flags": [ + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/cluster.json b/tools/codis2pika/scripts/commands/cluster.json new file mode 100644 index 0000000000..732a82954b --- /dev/null +++ b/tools/codis2pika/scripts/commands/cluster.json @@ -0,0 +1,9 @@ +{ + "CLUSTER": { + "summary": "A container for cluster commands", + "complexity": "Depends on subcommand.", + "group": "cluster", + "since": "3.0.0", + "arity": -2 + } +} diff --git a/tools/codis2pika/scripts/commands/command-count.json b/tools/codis2pika/scripts/commands/command-count.json new file mode 100644 index 0000000000..88f7873bd3 --- /dev/null +++ b/tools/codis2pika/scripts/commands/command-count.json @@ -0,0 +1,18 @@ +{ + "COUNT": { + "summary": "Get total number of Redis commands", + "complexity": "O(1)", + "group": "server", + "since": "2.8.13", + "arity": 2, + "container": "COMMAND", + "function": "commandCountCommand", + "command_flags": [ + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/command-docs.json b/tools/codis2pika/scripts/commands/command-docs.json new file mode 100644 index 0000000000..68a32f8a1e --- /dev/null +++ b/tools/codis2pika/scripts/commands/command-docs.json @@ -0,0 +1,29 @@ +{ + "DOCS": { + "summary": "Get array of specific Redis command documentation", + "complexity": "O(N) where N is the number of commands to look up", + "group": "server", + "since": "7.0.0", + "arity": -2, + "container": "COMMAND", + "function": "commandDocsCommand", + "command_flags": [ + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT_ORDER" + ], + "arguments": [ + { + "name": "command-name", + "type": "string", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/command-getkeys.json b/tools/codis2pika/scripts/commands/command-getkeys.json new file mode 100644 index 0000000000..ca655ac325 --- /dev/null +++ b/tools/codis2pika/scripts/commands/command-getkeys.json @@ -0,0 +1,18 @@ +{ + "GETKEYS": { + "summary": "Extract keys given a full Redis command", + "complexity": "O(N) where N is the number of arguments to the command", + "group": "server", + "since": "2.8.13", + "arity": -4, + "container": "COMMAND", + "function": "commandGetKeysCommand", + "command_flags": [ + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/command-getkeysandflags.json b/tools/codis2pika/scripts/commands/command-getkeysandflags.json new file mode 100644 index 0000000000..44b3ddcb12 --- /dev/null +++ b/tools/codis2pika/scripts/commands/command-getkeysandflags.json @@ -0,0 +1,18 @@ +{ + "GETKEYSANDFLAGS": { + "summary": "Extract keys and access flags given a full Redis command", + "complexity": "O(N) where N is the number of arguments to the command", + "group": "server", + "since": "7.0.0", + "arity": -4, + "container": "COMMAND", + "function": "commandGetKeysAndFlagsCommand", + "command_flags": [ + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/command-help.json b/tools/codis2pika/scripts/commands/command-help.json new file mode 100644 index 0000000000..ed759083f1 --- /dev/null +++ b/tools/codis2pika/scripts/commands/command-help.json @@ -0,0 +1,18 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "server", + "since": "5.0.0", + "arity": 2, + "container": "COMMAND", + "function": "commandHelpCommand", + "command_flags": [ + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/command-info.json b/tools/codis2pika/scripts/commands/command-info.json new file mode 100644 index 0000000000..40d60a3ece --- /dev/null +++ b/tools/codis2pika/scripts/commands/command-info.json @@ -0,0 +1,35 @@ +{ + "INFO": { + "summary": "Get array of specific Redis command details, or all when no argument is given.", + "complexity": "O(N) where N is the number of commands to look up", + "group": "server", + "since": "2.8.13", + "arity": -2, + "container": "COMMAND", + "function": "commandInfoCommand", + "history": [ + [ + "7.0.0", + "Allowed to be called with no argument to get info on all commands." + ] + ], + "command_flags": [ + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT_ORDER" + ], + "arguments": [ + { + "name": "command-name", + "type": "string", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/command-list.json b/tools/codis2pika/scripts/commands/command-list.json new file mode 100644 index 0000000000..49e9558ca9 --- /dev/null +++ b/tools/codis2pika/scripts/commands/command-list.json @@ -0,0 +1,46 @@ +{ + "LIST": { + "summary": "Get an array of Redis command names", + "complexity": "O(N) where N is the total number of Redis commands", + "group": "server", + "since": "7.0.0", + "arity": -2, + "container": "COMMAND", + "function": "commandListCommand", + "command_flags": [ + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT_ORDER" + ], + "arguments": [ + { + "name": "filterby", + "token": "FILTERBY", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "module-name", + "type": "string", + "token": "MODULE" + }, + { + "name": "category", + "type": "string", + "token": "ACLCAT" + }, + { + "name": "pattern", + "type": "pattern", + "token": "PATTERN" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/command.json b/tools/codis2pika/scripts/commands/command.json new file mode 100644 index 0000000000..0bed04040d --- /dev/null +++ b/tools/codis2pika/scripts/commands/command.json @@ -0,0 +1,21 @@ +{ + "COMMAND": { + "summary": "Get array of Redis command details", + "complexity": "O(N) where N is the total number of Redis commands", + "group": "server", + "since": "2.8.13", + "arity": -1, + "function": "commandCommand", + "command_flags": [ + "LOADING", + "STALE", + "SENTINEL" + ], + "acl_categories": [ + "CONNECTION" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT_ORDER" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/config-get.json b/tools/codis2pika/scripts/commands/config-get.json new file mode 100644 index 0000000000..26c060941c --- /dev/null +++ b/tools/codis2pika/scripts/commands/config-get.json @@ -0,0 +1,36 @@ +{ + "GET": { + "summary": "Get the values of configuration parameters", + "complexity": "O(N) when N is the number of configuration parameters provided", + "group": "server", + "since": "2.0.0", + "arity": -3, + "container": "CONFIG", + "function": "configGetCommand", + "history": [ + [ + "7.0.0", + "Added the ability to pass multiple pattern parameters in one call" + ] + ], + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "arguments": [ + { + "name": "parameter", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "parameter", + "type": "string" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/config-help.json b/tools/codis2pika/scripts/commands/config-help.json new file mode 100644 index 0000000000..537dd6bba6 --- /dev/null +++ b/tools/codis2pika/scripts/commands/config-help.json @@ -0,0 +1,15 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "server", + "since": "5.0.0", + "arity": 2, + "container": "CONFIG", + "function": "configHelpCommand", + "command_flags": [ + "LOADING", + "STALE" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/config-resetstat.json b/tools/codis2pika/scripts/commands/config-resetstat.json new file mode 100644 index 0000000000..0180402abe --- /dev/null +++ b/tools/codis2pika/scripts/commands/config-resetstat.json @@ -0,0 +1,17 @@ +{ + "RESETSTAT": { + "summary": "Reset the stats returned by INFO", + "complexity": "O(1)", + "group": "server", + "since": "2.0.0", + "arity": 2, + "container": "CONFIG", + "function": "configResetStatCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/config-rewrite.json b/tools/codis2pika/scripts/commands/config-rewrite.json new file mode 100644 index 0000000000..4e31dd82db --- /dev/null +++ b/tools/codis2pika/scripts/commands/config-rewrite.json @@ -0,0 +1,17 @@ +{ + "REWRITE": { + "summary": "Rewrite the configuration file with the in memory configuration", + "complexity": "O(1)", + "group": "server", + "since": "2.8.0", + "arity": 2, + "container": "CONFIG", + "function": "configRewriteCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/config-set.json b/tools/codis2pika/scripts/commands/config-set.json new file mode 100644 index 0000000000..656e4bca30 --- /dev/null +++ b/tools/codis2pika/scripts/commands/config-set.json @@ -0,0 +1,44 @@ +{ + "SET": { + "summary": "Set configuration parameters to the given values", + "complexity": "O(N) when N is the number of configuration parameters provided", + "group": "server", + "since": "2.0.0", + "arity": -4, + "container": "CONFIG", + "function": "configSetCommand", + "history": [ + [ + "7.0.0", + "Added the ability to set multiple parameters in one call." + ] + ], + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_NODES", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ], + "arguments": [ + { + "name": "parameter_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "parameter", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/config.json b/tools/codis2pika/scripts/commands/config.json new file mode 100644 index 0000000000..a12271dcc8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/config.json @@ -0,0 +1,9 @@ +{ + "CONFIG": { + "summary": "A container for server configuration commands", + "complexity": "Depends on subcommand.", + "group": "server", + "since": "2.0.0", + "arity": -2 + } +} diff --git a/tools/codis2pika/scripts/commands/copy.json b/tools/codis2pika/scripts/commands/copy.json new file mode 100644 index 0000000000..0ffb949976 --- /dev/null +++ b/tools/codis2pika/scripts/commands/copy.json @@ -0,0 +1,79 @@ +{ + "COPY": { + "summary": "Copy a key", + "complexity": "O(N) worst case for collections, where N is the number of nested items. O(1) for string values.", + "group": "generic", + "since": "6.2.0", + "arity": -3, + "function": "copyCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "KEYSPACE" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "source", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "destination", + "type": "key", + "key_spec_index": 1 + }, + { + "token": "DB", + "name": "destination-db", + "type": "integer", + "optional": true + }, + { + "name": "replace", + "token": "REPLACE", + "type": "pure-token", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/dbsize.json b/tools/codis2pika/scripts/commands/dbsize.json new file mode 100644 index 0000000000..4d65574dba --- /dev/null +++ b/tools/codis2pika/scripts/commands/dbsize.json @@ -0,0 +1,21 @@ +{ + "DBSIZE": { + "summary": "Return the number of keys in the selected database", + "complexity": "O(1)", + "group": "server", + "since": "1.0.0", + "arity": 1, + "function": "dbsizeCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:AGG_SUM" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/debug.json b/tools/codis2pika/scripts/commands/debug.json new file mode 100644 index 0000000000..092d2e9ab9 --- /dev/null +++ b/tools/codis2pika/scripts/commands/debug.json @@ -0,0 +1,20 @@ +{ + "DEBUG": { + "summary": "A container for debugging commands", + "complexity": "Depends on subcommand.", + "group": "server", + "since": "1.0.0", + "arity": -2, + "function": "debugCommand", + "doc_flags": [ + "SYSCMD" + ], + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE", + "PROTECTED" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/decr.json b/tools/codis2pika/scripts/commands/decr.json new file mode 100644 index 0000000000..4a5128d278 --- /dev/null +++ b/tools/codis2pika/scripts/commands/decr.json @@ -0,0 +1,46 @@ +{ + "DECR": { + "summary": "Decrement the integer value of a key by one", + "complexity": "O(1)", + "group": "string", + "since": "1.0.0", + "arity": 2, + "function": "decrCommand", + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/decrby.json b/tools/codis2pika/scripts/commands/decrby.json new file mode 100644 index 0000000000..19f376b8e7 --- /dev/null +++ b/tools/codis2pika/scripts/commands/decrby.json @@ -0,0 +1,50 @@ +{ + "DECRBY": { + "summary": "Decrement the integer value of a key by the given number", + "complexity": "O(1)", + "group": "string", + "since": "1.0.0", + "arity": 3, + "function": "decrbyCommand", + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "decrement", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/del.json b/tools/codis2pika/scripts/commands/del.json new file mode 100644 index 0000000000..bc500a9afa --- /dev/null +++ b/tools/codis2pika/scripts/commands/del.json @@ -0,0 +1,48 @@ +{ + "DEL": { + "summary": "Delete a key", + "complexity": "O(N) where N is the number of keys that will be removed. When a key to remove holds a value other than a string, the individual complexity for this key is O(M) where M is the number of elements in the list, set, sorted set or hash. Removing a single key that holds a string value is O(1).", + "group": "generic", + "since": "1.0.0", + "arity": -2, + "function": "delCommand", + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "REQUEST_POLICY:MULTI_SHARD", + "RESPONSE_POLICY:AGG_SUM" + ], + "key_specs": [ + { + "flags": [ + "RM", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/discard.json b/tools/codis2pika/scripts/commands/discard.json new file mode 100644 index 0000000000..56589a84f3 --- /dev/null +++ b/tools/codis2pika/scripts/commands/discard.json @@ -0,0 +1,20 @@ +{ + "DISCARD": { + "summary": "Discard all commands issued after MULTI", + "complexity": "O(N), when N is the number of queued commands", + "group": "transactions", + "since": "2.0.0", + "arity": 1, + "function": "discardCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE", + "FAST", + "ALLOW_BUSY" + ], + "acl_categories": [ + "TRANSACTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/dump.json b/tools/codis2pika/scripts/commands/dump.json new file mode 100644 index 0000000000..2e9453cddd --- /dev/null +++ b/tools/codis2pika/scripts/commands/dump.json @@ -0,0 +1,46 @@ +{ + "DUMP": { + "summary": "Return a serialized version of the value stored at the specified key.", + "complexity": "O(1) to access the key and additional O(N*M) to serialize it, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1).", + "group": "generic", + "since": "2.6.0", + "arity": 2, + "function": "dumpCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/echo.json b/tools/codis2pika/scripts/commands/echo.json new file mode 100644 index 0000000000..f38d10bc52 --- /dev/null +++ b/tools/codis2pika/scripts/commands/echo.json @@ -0,0 +1,24 @@ +{ + "ECHO": { + "summary": "Echo the given string", + "complexity": "O(1)", + "group": "connection", + "since": "1.0.0", + "arity": 2, + "function": "echoCommand", + "command_flags": [ + "LOADING", + "STALE", + "FAST" + ], + "acl_categories": [ + "CONNECTION" + ], + "arguments": [ + { + "name": "message", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/eval.json b/tools/codis2pika/scripts/commands/eval.json new file mode 100644 index 0000000000..50fc022a2a --- /dev/null +++ b/tools/codis2pika/scripts/commands/eval.json @@ -0,0 +1,66 @@ +{ + "EVAL": { + "summary": "Execute a Lua script server side", + "complexity": "Depends on the script that is executed.", + "group": "scripting", + "since": "2.6.0", + "arity": -3, + "function": "evalCommand", + "get_keys_function": "evalGetKeys", + "command_flags": [ + "NOSCRIPT", + "SKIP_MONITOR", + "MAY_REPLICATE", + "NO_MANDATORY_KEYS", + "STALE" + ], + "acl_categories": [ + "SCRIPTING" + ], + "key_specs": [ + { + "notes": "We cannot tell how the keys will be used so we assume the worst, RW and UPDATE", + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "script", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "optional": true, + "multiple": true + }, + { + "name": "arg", + "type": "string", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/eval_ro.json b/tools/codis2pika/scripts/commands/eval_ro.json new file mode 100644 index 0000000000..65fd866a06 --- /dev/null +++ b/tools/codis2pika/scripts/commands/eval_ro.json @@ -0,0 +1,63 @@ +{ + "EVAL_RO": { + "summary": "Execute a read-only Lua script server side", + "complexity": "Depends on the script that is executed.", + "group": "scripting", + "since": "7.0.0", + "arity": -3, + "function": "evalRoCommand", + "get_keys_function": "evalGetKeys", + "command_flags": [ + "NOSCRIPT", + "SKIP_MONITOR", + "NO_MANDATORY_KEYS", + "STALE", + "READONLY" + ], + "acl_categories": [ + "SCRIPTING" + ], + "key_specs": [ + { + "notes": "We cannot tell how the keys will be used so we assume the worst, RO and ACCESS", + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "script", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "arg", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/evalsha.json b/tools/codis2pika/scripts/commands/evalsha.json new file mode 100644 index 0000000000..9b68b87f1a --- /dev/null +++ b/tools/codis2pika/scripts/commands/evalsha.json @@ -0,0 +1,65 @@ +{ + "EVALSHA": { + "summary": "Execute a Lua script server side", + "complexity": "Depends on the script that is executed.", + "group": "scripting", + "since": "2.6.0", + "arity": -3, + "function": "evalShaCommand", + "get_keys_function": "evalGetKeys", + "command_flags": [ + "NOSCRIPT", + "SKIP_MONITOR", + "MAY_REPLICATE", + "NO_MANDATORY_KEYS", + "STALE" + ], + "acl_categories": [ + "SCRIPTING" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "sha1", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "optional": true, + "multiple": true + }, + { + "name": "arg", + "type": "string", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/evalsha_ro.json b/tools/codis2pika/scripts/commands/evalsha_ro.json new file mode 100644 index 0000000000..d76313540f --- /dev/null +++ b/tools/codis2pika/scripts/commands/evalsha_ro.json @@ -0,0 +1,62 @@ +{ + "EVALSHA_RO": { + "summary": "Execute a read-only Lua script server side", + "complexity": "Depends on the script that is executed.", + "group": "scripting", + "since": "7.0.0", + "arity": -3, + "function": "evalShaRoCommand", + "get_keys_function": "evalGetKeys", + "command_flags": [ + "NOSCRIPT", + "SKIP_MONITOR", + "NO_MANDATORY_KEYS", + "STALE", + "READONLY" + ], + "acl_categories": [ + "SCRIPTING" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "sha1", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "arg", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/exec.json b/tools/codis2pika/scripts/commands/exec.json new file mode 100644 index 0000000000..80856ef99f --- /dev/null +++ b/tools/codis2pika/scripts/commands/exec.json @@ -0,0 +1,19 @@ +{ + "EXEC": { + "summary": "Execute all commands issued after MULTI", + "complexity": "Depends on commands in the transaction", + "group": "transactions", + "since": "1.2.0", + "arity": 1, + "function": "execCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE", + "SKIP_SLOWLOG" + ], + "acl_categories": [ + "TRANSACTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/exists.json b/tools/codis2pika/scripts/commands/exists.json new file mode 100644 index 0000000000..b313633524 --- /dev/null +++ b/tools/codis2pika/scripts/commands/exists.json @@ -0,0 +1,54 @@ +{ + "EXISTS": { + "summary": "Determine if a key exists", + "complexity": "O(N) where N is the number of keys to check.", + "group": "generic", + "since": "1.0.0", + "arity": -2, + "function": "existsCommand", + "history": [ + [ + "3.0.3", + "Accepts multiple `key` arguments." + ] + ], + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "REQUEST_POLICY:MULTI_SHARD", + "RESPONSE_POLICY:AGG_SUM" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/expire.json b/tools/codis2pika/scripts/commands/expire.json new file mode 100644 index 0000000000..712830d3e9 --- /dev/null +++ b/tools/codis2pika/scripts/commands/expire.json @@ -0,0 +1,82 @@ +{ + "EXPIRE": { + "summary": "Set a key's time to live in seconds", + "complexity": "O(1)", + "group": "generic", + "since": "1.0.0", + "arity": -3, + "function": "expireCommand", + "history": [ + [ + "7.0.0", + "Added options: `NX`, `XX`, `GT` and `LT`." + ] + ], + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "seconds", + "type": "integer" + }, + { + "name": "condition", + "type": "oneof", + "optional": true, + "since": "7.0.0", + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + }, + { + "name": "gt", + "type": "pure-token", + "token": "GT" + }, + { + "name": "lt", + "type": "pure-token", + "token": "LT" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/expireat.json b/tools/codis2pika/scripts/commands/expireat.json new file mode 100644 index 0000000000..43d9b748e3 --- /dev/null +++ b/tools/codis2pika/scripts/commands/expireat.json @@ -0,0 +1,82 @@ +{ + "EXPIREAT": { + "summary": "Set the expiration for a key as a UNIX timestamp", + "complexity": "O(1)", + "group": "generic", + "since": "1.2.0", + "arity": -3, + "function": "expireatCommand", + "history": [ + [ + "7.0.0", + "Added options: `NX`, `XX`, `GT` and `LT`." + ] + ], + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "unix-time-seconds", + "type": "unix-time" + }, + { + "name": "condition", + "type": "oneof", + "optional": true, + "since": "7.0.0", + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + }, + { + "name": "gt", + "type": "pure-token", + "token": "GT" + }, + { + "name": "lt", + "type": "pure-token", + "token": "LT" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/expiretime.json b/tools/codis2pika/scripts/commands/expiretime.json new file mode 100644 index 0000000000..9393c1226e --- /dev/null +++ b/tools/codis2pika/scripts/commands/expiretime.json @@ -0,0 +1,44 @@ +{ + "EXPIRETIME": { + "summary": "Get the expiration Unix timestamp for a key", + "complexity": "O(1)", + "group": "generic", + "since": "7.0.0", + "arity": 2, + "function": "expiretimeCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/failover.json b/tools/codis2pika/scripts/commands/failover.json new file mode 100644 index 0000000000..dd2e2951ff --- /dev/null +++ b/tools/codis2pika/scripts/commands/failover.json @@ -0,0 +1,51 @@ +{ + "FAILOVER": { + "summary": "Start a coordinated failover between this server and one of its replicas.", + "complexity": "O(1)", + "group": "server", + "since": "6.2.0", + "arity": -1, + "function": "failoverCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "STALE" + ], + "arguments": [ + { + "name": "target", + "token": "TO", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "host", + "type": "string" + }, + { + "name": "port", + "type": "integer" + }, + { + "token": "FORCE", + "name": "force", + "type": "pure-token", + "optional": true + } + ] + }, + { + "token": "ABORT", + "name": "abort", + "type": "pure-token", + "optional": true + }, + { + "token": "TIMEOUT", + "name": "milliseconds", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/fcall.json b/tools/codis2pika/scripts/commands/fcall.json new file mode 100644 index 0000000000..27b7b4e353 --- /dev/null +++ b/tools/codis2pika/scripts/commands/fcall.json @@ -0,0 +1,64 @@ +{ + "FCALL": { + "summary": "Invoke a function", + "complexity": "Depends on the function that is executed.", + "group": "scripting", + "since": "7.0.0", + "arity": -3, + "function": "fcallCommand", + "get_keys_function": "functionGetKeys", + "command_flags": [ + "NOSCRIPT", + "SKIP_MONITOR", + "MAY_REPLICATE", + "NO_MANDATORY_KEYS", + "STALE" + ], + "acl_categories": [ + "SCRIPTING" + ], + "key_specs": [ + { + "notes": "We cannot tell how the keys will be used so we assume the worst, RW and UPDATE", + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "function", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "arg", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/fcall_ro.json b/tools/codis2pika/scripts/commands/fcall_ro.json new file mode 100644 index 0000000000..46085ebb02 --- /dev/null +++ b/tools/codis2pika/scripts/commands/fcall_ro.json @@ -0,0 +1,63 @@ +{ + "FCALL_RO": { + "summary": "Invoke a read-only function", + "complexity": "Depends on the function that is executed.", + "group": "scripting", + "since": "7.0.0", + "arity": -3, + "function": "fcallroCommand", + "get_keys_function": "functionGetKeys", + "command_flags": [ + "NOSCRIPT", + "SKIP_MONITOR", + "NO_MANDATORY_KEYS", + "STALE", + "READONLY" + ], + "acl_categories": [ + "SCRIPTING" + ], + "key_specs": [ + { + "notes": "We cannot tell how the keys will be used so we assume the worst, RO and ACCESS", + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "function", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "arg", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/flushall.json b/tools/codis2pika/scripts/commands/flushall.json new file mode 100644 index 0000000000..ef6a1f883c --- /dev/null +++ b/tools/codis2pika/scripts/commands/flushall.json @@ -0,0 +1,52 @@ +{ + "FLUSHALL": { + "summary": "Remove all keys from all databases", + "complexity": "O(N) where N is the total number of keys in all databases", + "group": "server", + "since": "1.0.0", + "arity": -1, + "function": "flushallCommand", + "history": [ + [ + "4.0.0", + "Added the `ASYNC` flushing mode modifier." + ], + [ + "6.2.0", + "Added the `SYNC` flushing mode modifier." + ] + ], + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "KEYSPACE", + "DANGEROUS" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ], + "arguments": [ + { + "name": "async", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "async", + "type": "pure-token", + "token": "ASYNC", + "since": "4.0.0" + }, + { + "name": "sync", + "type": "pure-token", + "token": "SYNC", + "since": "6.2.0" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/flushdb.json b/tools/codis2pika/scripts/commands/flushdb.json new file mode 100644 index 0000000000..408ab326b5 --- /dev/null +++ b/tools/codis2pika/scripts/commands/flushdb.json @@ -0,0 +1,52 @@ +{ + "FLUSHDB": { + "summary": "Remove all keys from the current database", + "complexity": "O(N) where N is the number of keys in the selected database", + "group": "server", + "since": "1.0.0", + "arity": -1, + "function": "flushdbCommand", + "history": [ + [ + "4.0.0", + "Added the `ASYNC` flushing mode modifier." + ], + [ + "6.2.0", + "Added the `SYNC` flushing mode modifier." + ] + ], + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "KEYSPACE", + "DANGEROUS" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ], + "arguments": [ + { + "name": "async", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "async", + "type": "pure-token", + "token": "ASYNC", + "since": "4.0.0" + }, + { + "name": "sync", + "type": "pure-token", + "token": "SYNC", + "since": "6.2.0" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/function-delete.json b/tools/codis2pika/scripts/commands/function-delete.json new file mode 100644 index 0000000000..01dc78ba4a --- /dev/null +++ b/tools/codis2pika/scripts/commands/function-delete.json @@ -0,0 +1,28 @@ +{ + "DELETE": { + "summary": "Delete a function by name", + "complexity": "O(1)", + "group": "scripting", + "since": "7.0.0", + "arity": 3, + "container": "FUNCTION", + "function": "functionDeleteCommand", + "command_flags": [ + "NOSCRIPT", + "WRITE" + ], + "acl_categories": [ + "SCRIPTING" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ], + "arguments": [ + { + "name": "library-name", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/function-dump.json b/tools/codis2pika/scripts/commands/function-dump.json new file mode 100644 index 0000000000..de402f5898 --- /dev/null +++ b/tools/codis2pika/scripts/commands/function-dump.json @@ -0,0 +1,17 @@ +{ + "DUMP": { + "summary": "Dump all functions into a serialized binary payload", + "complexity": "O(N) where N is the number of functions", + "group": "scripting", + "since": "7.0.0", + "arity": 2, + "container": "FUNCTION", + "function": "functionDumpCommand", + "command_flags": [ + "NOSCRIPT" + ], + "acl_categories": [ + "SCRIPTING" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/function-flush.json b/tools/codis2pika/scripts/commands/function-flush.json new file mode 100644 index 0000000000..a5ab2db75b --- /dev/null +++ b/tools/codis2pika/scripts/commands/function-flush.json @@ -0,0 +1,41 @@ +{ + "FLUSH": { + "summary": "Deleting all functions", + "complexity": "O(N) where N is the number of functions deleted", + "group": "scripting", + "since": "7.0.0", + "arity": -2, + "container": "FUNCTION", + "function": "functionFlushCommand", + "command_flags": [ + "NOSCRIPT", + "WRITE" + ], + "acl_categories": [ + "SCRIPTING" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ], + "arguments": [ + { + "name": "async", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "async", + "type": "pure-token", + "token": "ASYNC" + }, + { + "name": "sync", + "type": "pure-token", + "token": "SYNC" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/function-help.json b/tools/codis2pika/scripts/commands/function-help.json new file mode 100644 index 0000000000..b8213cb610 --- /dev/null +++ b/tools/codis2pika/scripts/commands/function-help.json @@ -0,0 +1,18 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "scripting", + "since": "7.0.0", + "arity": 2, + "container": "FUNCTION", + "function": "functionHelpCommand", + "command_flags": [ + "LOADING", + "STALE" + ], + "acl_categories": [ + "SCRIPTING" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/function-kill.json b/tools/codis2pika/scripts/commands/function-kill.json new file mode 100644 index 0000000000..87432f996c --- /dev/null +++ b/tools/codis2pika/scripts/commands/function-kill.json @@ -0,0 +1,22 @@ +{ + "KILL": { + "summary": "Kill the function currently in execution.", + "complexity": "O(1)", + "group": "scripting", + "since": "7.0.0", + "arity": 2, + "container": "FUNCTION", + "function": "functionKillCommand", + "command_flags": [ + "NOSCRIPT", + "ALLOW_BUSY" + ], + "acl_categories": [ + "SCRIPTING" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:ONE_SUCCEEDED" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/function-list.json b/tools/codis2pika/scripts/commands/function-list.json new file mode 100644 index 0000000000..6513b80cb4 --- /dev/null +++ b/tools/codis2pika/scripts/commands/function-list.json @@ -0,0 +1,34 @@ +{ + "LIST": { + "summary": "List information about all the functions", + "complexity": "O(N) where N is the number of functions", + "group": "scripting", + "since": "7.0.0", + "arity": -2, + "container": "FUNCTION", + "function": "functionListCommand", + "command_flags": [ + "NOSCRIPT" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT_ORDER" + ], + "acl_categories": [ + "SCRIPTING" + ], + "arguments": [ + { + "name": "library-name-pattern", + "type": "string", + "token": "LIBRARYNAME", + "optional": true + }, + { + "name": "withcode", + "type": "pure-token", + "token": "WITHCODE", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/function-load.json b/tools/codis2pika/scripts/commands/function-load.json new file mode 100644 index 0000000000..d047212793 --- /dev/null +++ b/tools/codis2pika/scripts/commands/function-load.json @@ -0,0 +1,35 @@ +{ + "LOAD": { + "summary": "Create a function with the given arguments (name, code, description)", + "complexity": "O(1) (considering compilation time is redundant)", + "group": "scripting", + "since": "7.0.0", + "arity": -3, + "container": "FUNCTION", + "function": "functionLoadCommand", + "command_flags": [ + "NOSCRIPT", + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "SCRIPTING" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ], + "arguments": [ + { + "name": "replace", + "type": "pure-token", + "token": "REPLACE", + "optional": true + }, + { + "name": "function-code", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/function-restore.json b/tools/codis2pika/scripts/commands/function-restore.json new file mode 100644 index 0000000000..ede016895f --- /dev/null +++ b/tools/codis2pika/scripts/commands/function-restore.json @@ -0,0 +1,51 @@ +{ + "RESTORE": { + "summary": "Restore all the functions on the given payload", + "complexity": "O(N) where N is the number of functions on the payload", + "group": "scripting", + "since": "7.0.0", + "arity": -3, + "container": "FUNCTION", + "function": "functionRestoreCommand", + "command_flags": [ + "NOSCRIPT", + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "SCRIPTING" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ], + "arguments": [ + { + "name": "serialized-value", + "type": "string" + }, + { + "name": "policy", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "flush", + "type": "pure-token", + "token": "FLUSH" + }, + { + "name": "append", + "type": "pure-token", + "token": "APPEND" + }, + { + "name": "replace", + "type": "pure-token", + "token": "REPLACE" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/function-stats.json b/tools/codis2pika/scripts/commands/function-stats.json new file mode 100644 index 0000000000..0d055b65b1 --- /dev/null +++ b/tools/codis2pika/scripts/commands/function-stats.json @@ -0,0 +1,23 @@ +{ + "STATS": { + "summary": "Return information about the function currently running (name, description, duration)", + "complexity": "O(1)", + "group": "scripting", + "since": "7.0.0", + "arity": 2, + "container": "FUNCTION", + "function": "functionStatsCommand", + "command_flags": [ + "NOSCRIPT", + "ALLOW_BUSY" + ], + "acl_categories": [ + "SCRIPTING" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT", + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:SPECIAL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/function.json b/tools/codis2pika/scripts/commands/function.json new file mode 100644 index 0000000000..3d33345cfb --- /dev/null +++ b/tools/codis2pika/scripts/commands/function.json @@ -0,0 +1,9 @@ +{ + "FUNCTION": { + "summary": "A container for function commands", + "complexity": "Depends on subcommand.", + "group": "scripting", + "since": "7.0.0", + "arity": -2 + } +} diff --git a/tools/codis2pika/scripts/commands/geoadd.json b/tools/codis2pika/scripts/commands/geoadd.json new file mode 100644 index 0000000000..acdc575e43 --- /dev/null +++ b/tools/codis2pika/scripts/commands/geoadd.json @@ -0,0 +1,94 @@ +{ + "GEOADD": { + "summary": "Add one or more geospatial items in the geospatial index represented using a sorted set", + "complexity": "O(log(N)) for each item added, where N is the number of elements in the sorted set.", + "group": "geo", + "since": "3.2.0", + "arity": -5, + "function": "geoaddCommand", + "history": [ + [ + "6.2.0", + "Added the `CH`, `NX` and `XX` options." + ] + ], + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "GEO" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "condition", + "type": "oneof", + "optional": true, + "since": "6.2.0", + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + } + ] + }, + { + "name": "change", + "token": "CH", + "type": "pure-token", + "optional": true, + "since": "6.2.0" + }, + { + "name": "longitude_latitude_member", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "longitude", + "type": "double" + }, + { + "name": "latitude", + "type": "double" + }, + { + "name": "member", + "type": "string" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/geodist.json b/tools/codis2pika/scripts/commands/geodist.json new file mode 100644 index 0000000000..87782b243a --- /dev/null +++ b/tools/codis2pika/scripts/commands/geodist.json @@ -0,0 +1,78 @@ +{ + "GEODIST": { + "summary": "Returns the distance between two members of a geospatial index", + "complexity": "O(log(N))", + "group": "geo", + "since": "3.2.0", + "arity": -4, + "function": "geodistCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "GEO" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member1", + "type": "string" + }, + { + "name": "member2", + "type": "string" + }, + { + "name": "unit", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "m" + }, + { + "name": "km", + "type": "pure-token", + "token": "km" + }, + { + "name": "ft", + "type": "pure-token", + "token": "ft" + }, + { + "name": "mi", + "type": "pure-token", + "token": "mi" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/geohash.json b/tools/codis2pika/scripts/commands/geohash.json new file mode 100644 index 0000000000..4cb383ca32 --- /dev/null +++ b/tools/codis2pika/scripts/commands/geohash.json @@ -0,0 +1,48 @@ +{ + "GEOHASH": { + "summary": "Returns members of a geospatial index as standard geohash strings", + "complexity": "O(log(N)) for each member requested, where N is the number of elements in the sorted set.", + "group": "geo", + "since": "3.2.0", + "arity": -2, + "function": "geohashCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "GEO" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/geopos.json b/tools/codis2pika/scripts/commands/geopos.json new file mode 100644 index 0000000000..a41d162b2f --- /dev/null +++ b/tools/codis2pika/scripts/commands/geopos.json @@ -0,0 +1,48 @@ +{ + "GEOPOS": { + "summary": "Returns longitude and latitude of members of a geospatial index", + "complexity": "O(N) where N is the number of members requested.", + "group": "geo", + "since": "3.2.0", + "arity": -2, + "function": "geoposCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "GEO" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/georadius.json b/tools/codis2pika/scripts/commands/georadius.json new file mode 100644 index 0000000000..ba58db562c --- /dev/null +++ b/tools/codis2pika/scripts/commands/georadius.json @@ -0,0 +1,200 @@ +{ + "GEORADIUS": { + "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point", + "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", + "group": "geo", + "since": "3.2.0", + "arity": -6, + "function": "georadiusCommand", + "get_keys_function": "georadiusGetKeys", + "history": [ + [ + "6.2.0", + "Added the `ANY` option for `COUNT`." + ] + ], + "deprecated_since": "6.2.0", + "replaced_by": "`GEOSEARCH` and `GEOSEARCHSTORE` with the `BYRADIUS` argument", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "GEO" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "keyword": { + "keyword": "STORE", + "startfrom": 6 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "keyword": { + "keyword": "STOREDIST", + "startfrom": 6 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "longitude", + "type": "double" + }, + { + "name": "latitude", + "type": "double" + }, + { + "name": "radius", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "m" + }, + { + "name": "km", + "type": "pure-token", + "token": "km" + }, + { + "name": "ft", + "type": "pure-token", + "token": "ft" + }, + { + "name": "mi", + "type": "pure-token", + "token": "mi" + } + ] + }, + { + "name": "withcoord", + "token": "WITHCOORD", + "type": "pure-token", + "optional": true + }, + { + "name": "withdist", + "token": "WITHDIST", + "type": "pure-token", + "optional": true + }, + { + "name": "withhash", + "token": "WITHHASH", + "type": "pure-token", + "optional": true + }, + { + "name": "count", + "type": "block", + "optional": true, + "arguments": [ + { + "token": "COUNT", + "name": "count", + "type": "integer" + }, + { + "name": "any", + "token": "ANY", + "type": "pure-token", + "optional": true, + "since": "6.2.0" + } + ] + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + }, + { + "token": "STORE", + "name": "key", + "type": "key", + "key_spec_index": 1, + "optional": true + }, + { + "token": "STOREDIST", + "name": "key", + "type": "key", + "key_spec_index": 2, + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/georadius_ro.json b/tools/codis2pika/scripts/commands/georadius_ro.json new file mode 100644 index 0000000000..9859d3067a --- /dev/null +++ b/tools/codis2pika/scripts/commands/georadius_ro.json @@ -0,0 +1,146 @@ +{ + "GEORADIUS_RO": { + "summary": "A read-only variant for GEORADIUS", + "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", + "group": "geo", + "since": "3.2.10", + "arity": -6, + "function": "georadiusroCommand", + "history": [ + [ + "6.2.0", + "Added the `ANY` option for `COUNT`." + ] + ], + "deprecated_since": "6.2.0", + "replaced_by": "`GEOSEARCH` with the `BYRADIUS` argument", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "GEO" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "longitude", + "type": "double" + }, + { + "name": "latitude", + "type": "double" + }, + { + "name": "radius", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "m" + }, + { + "name": "km", + "type": "pure-token", + "token": "km" + }, + { + "name": "ft", + "type": "pure-token", + "token": "ft" + }, + { + "name": "mi", + "type": "pure-token", + "token": "mi" + } + ] + }, + { + "name": "withcoord", + "token": "WITHCOORD", + "type": "pure-token", + "optional": true + }, + { + "name": "withdist", + "token": "WITHDIST", + "type": "pure-token", + "optional": true + }, + { + "name": "withhash", + "token": "WITHHASH", + "type": "pure-token", + "optional": true + }, + { + "name": "count", + "type": "block", + "optional": true, + "arguments": [ + { + "token": "COUNT", + "name": "count", + "type": "integer" + }, + { + "name": "any", + "token": "ANY", + "type": "pure-token", + "optional": true, + "since": "6.2.0" + } + ] + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/georadiusbymember.json b/tools/codis2pika/scripts/commands/georadiusbymember.json new file mode 100644 index 0000000000..c7a399fc36 --- /dev/null +++ b/tools/codis2pika/scripts/commands/georadiusbymember.json @@ -0,0 +1,189 @@ +{ + "GEORADIUSBYMEMBER": { + "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a member", + "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", + "group": "geo", + "since": "3.2.0", + "arity": -5, + "function": "georadiusbymemberCommand", + "get_keys_function": "georadiusGetKeys", + "deprecated_since": "6.2.0", + "replaced_by": "`GEOSEARCH` and `GEOSEARCHSTORE` with the `BYRADIUS` and `FROMMEMBER` arguments", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "GEO" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "keyword": { + "keyword": "STORE", + "startfrom": 5 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "keyword": { + "keyword": "STOREDIST", + "startfrom": 5 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string" + }, + { + "name": "radius", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "m" + }, + { + "name": "km", + "type": "pure-token", + "token": "km" + }, + { + "name": "ft", + "type": "pure-token", + "token": "ft" + }, + { + "name": "mi", + "type": "pure-token", + "token": "mi" + } + ] + }, + { + "name": "withcoord", + "token": "WITHCOORD", + "type": "pure-token", + "optional": true + }, + { + "name": "withdist", + "token": "WITHDIST", + "type": "pure-token", + "optional": true + }, + { + "name": "withhash", + "token": "WITHHASH", + "type": "pure-token", + "optional": true + }, + { + "name": "count", + "type": "block", + "optional": true, + "arguments": [ + { + "token": "COUNT", + "name": "count", + "type": "integer" + }, + { + "name": "any", + "token": "ANY", + "type": "pure-token", + "optional": true + } + ] + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + }, + { + "token": "STORE", + "name": "key", + "type": "key", + "key_spec_index": 1, + "optional": true + }, + { + "token": "STOREDIST", + "name": "key", + "type": "key", + "key_spec_index": 2, + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/georadiusbymember_ro.json b/tools/codis2pika/scripts/commands/georadiusbymember_ro.json new file mode 100644 index 0000000000..2a76838971 --- /dev/null +++ b/tools/codis2pika/scripts/commands/georadiusbymember_ro.json @@ -0,0 +1,135 @@ +{ + "GEORADIUSBYMEMBER_RO": { + "summary": "A read-only variant for GEORADIUSBYMEMBER", + "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", + "group": "geo", + "since": "3.2.10", + "arity": -5, + "function": "georadiusbymemberroCommand", + "deprecated_since": "6.2.0", + "replaced_by": "`GEOSEARCH` with the `BYRADIUS` and `FROMMEMBER` arguments", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "GEO" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string" + }, + { + "name": "radius", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "m" + }, + { + "name": "km", + "type": "pure-token", + "token": "km" + }, + { + "name": "ft", + "type": "pure-token", + "token": "ft" + }, + { + "name": "mi", + "type": "pure-token", + "token": "mi" + } + ] + }, + { + "name": "withcoord", + "token": "WITHCOORD", + "type": "pure-token", + "optional": true + }, + { + "name": "withdist", + "token": "WITHDIST", + "type": "pure-token", + "optional": true + }, + { + "name": "withhash", + "token": "WITHHASH", + "type": "pure-token", + "optional": true + }, + { + "name": "count", + "type": "block", + "optional": true, + "arguments": [ + { + "token": "COUNT", + "name": "count", + "type": "integer" + }, + { + "name": "any", + "token": "ANY", + "type": "pure-token", + "optional": true + } + ] + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/geosearch.json b/tools/codis2pika/scripts/commands/geosearch.json new file mode 100644 index 0000000000..a83dcaadb4 --- /dev/null +++ b/tools/codis2pika/scripts/commands/geosearch.json @@ -0,0 +1,206 @@ +{ + "GEOSEARCH": { + "summary": "Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle.", + "complexity": "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape", + "group": "geo", + "since": "6.2.0", + "arity": -7, + "function": "geosearchCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "GEO" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "from", + "type": "oneof", + "arguments": [ + { + "token": "FROMMEMBER", + "name": "member", + "type": "string" + }, + { + "token": "FROMLONLAT", + "name": "longitude_latitude", + "type": "block", + "arguments": [ + { + "name": "longitude", + "type": "double" + }, + { + "name": "latitude", + "type": "double" + } + ] + } + ] + }, + { + "name": "by", + "type": "oneof", + "arguments": [ + { + "name": "circle", + "type": "block", + "arguments": [ + { + "token": "BYRADIUS", + "name": "radius", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "m" + }, + { + "name": "km", + "type": "pure-token", + "token": "km" + }, + { + "name": "ft", + "type": "pure-token", + "token": "ft" + }, + { + "name": "mi", + "type": "pure-token", + "token": "mi" + } + ] + } + ] + }, + { + "name": "box", + "type": "block", + "arguments": [ + { + "token": "BYBOX", + "name": "width", + "type": "double" + }, + { + "name": "height", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "m" + }, + { + "name": "km", + "type": "pure-token", + "token": "km" + }, + { + "name": "ft", + "type": "pure-token", + "token": "ft" + }, + { + "name": "mi", + "type": "pure-token", + "token": "mi" + } + ] + } + ] + } + ] + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + }, + { + "name": "count", + "type": "block", + "optional": true, + "arguments": [ + { + "token": "COUNT", + "name": "count", + "type": "integer" + }, + { + "name": "any", + "token": "ANY", + "type": "pure-token", + "optional": true + } + ] + }, + { + "name": "withcoord", + "token": "WITHCOORD", + "type": "pure-token", + "optional": true + }, + { + "name": "withdist", + "token": "WITHDIST", + "type": "pure-token", + "optional": true + }, + { + "name": "withhash", + "token": "WITHHASH", + "type": "pure-token", + "optional": true + } + ] + } +} \ No newline at end of file diff --git a/tools/codis2pika/scripts/commands/geosearchstore.json b/tools/codis2pika/scripts/commands/geosearchstore.json new file mode 100644 index 0000000000..16db5d37e7 --- /dev/null +++ b/tools/codis2pika/scripts/commands/geosearchstore.json @@ -0,0 +1,218 @@ +{ + "GEOSEARCHSTORE": { + "summary": "Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle, and store the result in another key.", + "complexity": "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape", + "group": "geo", + "since": "6.2.0", + "arity": -8, + "function": "geosearchstoreCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "GEO" + ], + "key_specs": [ + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "source", + "type": "key", + "key_spec_index": 1 + }, + { + "name": "from", + "type": "oneof", + "arguments": [ + { + "token": "FROMMEMBER", + "name": "member", + "type": "string" + }, + { + "token": "FROMLONLAT", + "name": "longitude_latitude", + "type": "block", + "arguments": [ + { + "name": "longitude", + "type": "double" + }, + { + "name": "latitude", + "type": "double" + } + ] + } + ] + }, + { + "name": "by", + "type": "oneof", + "arguments": [ + { + "name": "circle", + "type": "block", + "arguments": [ + { + "token": "BYRADIUS", + "name": "radius", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "m" + }, + { + "name": "km", + "type": "pure-token", + "token": "km" + }, + { + "name": "ft", + "type": "pure-token", + "token": "ft" + }, + { + "name": "mi", + "type": "pure-token", + "token": "mi" + } + ] + } + ] + }, + { + "name": "box", + "type": "block", + "arguments": [ + { + "token": "BYBOX", + "name": "width", + "type": "double" + }, + { + "name": "height", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "m" + }, + { + "name": "km", + "type": "pure-token", + "token": "km" + }, + { + "name": "ft", + "type": "pure-token", + "token": "ft" + }, + { + "name": "mi", + "type": "pure-token", + "token": "mi" + } + ] + } + ] + } + ] + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + }, + { + "name": "count", + "type": "block", + "optional": true, + "arguments": [ + { + "token": "COUNT", + "name": "count", + "type": "integer" + }, + { + "name": "any", + "token": "ANY", + "type": "pure-token", + "optional": true + } + ] + }, + { + "name": "storedist", + "token": "STOREDIST", + "type": "pure-token", + "optional": true + } + ] + } +} \ No newline at end of file diff --git a/tools/codis2pika/scripts/commands/get.json b/tools/codis2pika/scripts/commands/get.json new file mode 100644 index 0000000000..342e100e96 --- /dev/null +++ b/tools/codis2pika/scripts/commands/get.json @@ -0,0 +1,44 @@ +{ + "GET": { + "summary": "Get the value of a key", + "complexity": "O(1)", + "group": "string", + "since": "1.0.0", + "arity": 2, + "function": "getCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/getbit.json b/tools/codis2pika/scripts/commands/getbit.json new file mode 100644 index 0000000000..759784e794 --- /dev/null +++ b/tools/codis2pika/scripts/commands/getbit.json @@ -0,0 +1,48 @@ +{ + "GETBIT": { + "summary": "Returns the bit value at offset in the string value stored at key", + "complexity": "O(1)", + "group": "bitmap", + "since": "2.2.0", + "arity": 3, + "function": "getbitCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "BITMAP" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "offset", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/getdel.json b/tools/codis2pika/scripts/commands/getdel.json new file mode 100644 index 0000000000..1d2f56e301 --- /dev/null +++ b/tools/codis2pika/scripts/commands/getdel.json @@ -0,0 +1,45 @@ +{ + "GETDEL": { + "summary": "Get the value of a key and delete the key", + "complexity": "O(1)", + "group": "string", + "since": "6.2.0", + "arity": 2, + "function": "getdelCommand", + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/getex.json b/tools/codis2pika/scripts/commands/getex.json new file mode 100644 index 0000000000..8557bcdd9a --- /dev/null +++ b/tools/codis2pika/scripts/commands/getex.json @@ -0,0 +1,78 @@ +{ + "GETEX": { + "summary": "Get the value of a key and optionally set its expiration", + "complexity": "O(1)", + "group": "string", + "since": "6.2.0", + "arity": -2, + "function": "getexCommand", + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "notes": "RW and UPDATE because it changes the TTL", + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "expiration", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "seconds", + "type": "integer", + "token": "EX" + }, + { + "name": "milliseconds", + "type": "integer", + "token": "PX" + }, + { + "name": "unix-time-seconds", + "type": "unix-time", + "token": "EXAT" + }, + { + "name": "unix-time-milliseconds", + "type": "unix-time", + "token": "PXAT" + }, + { + "name": "persist", + "type": "pure-token", + "token": "PERSIST" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/getrange.json b/tools/codis2pika/scripts/commands/getrange.json new file mode 100644 index 0000000000..03eb58e381 --- /dev/null +++ b/tools/codis2pika/scripts/commands/getrange.json @@ -0,0 +1,51 @@ +{ + "GETRANGE": { + "summary": "Get a substring of the string stored at a key", + "complexity": "O(N) where N is the length of the returned string. The complexity is ultimately determined by the returned length, but because creating a substring from an existing string is very cheap, it can be considered O(1) for small strings.", + "group": "string", + "since": "2.4.0", + "arity": 4, + "function": "getrangeCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "integer" + }, + { + "name": "end", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/getset.json b/tools/codis2pika/scripts/commands/getset.json new file mode 100644 index 0000000000..2f6b8911cd --- /dev/null +++ b/tools/codis2pika/scripts/commands/getset.json @@ -0,0 +1,55 @@ +{ + "GETSET": { + "summary": "Set the string value of a key and return its old value", + "complexity": "O(1)", + "group": "string", + "since": "1.0.0", + "arity": 3, + "function": "getsetCommand", + "deprecated_since": "6.2.0", + "replaced_by": "`SET` with the `!GET` argument", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "value", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hdel.json b/tools/codis2pika/scripts/commands/hdel.json new file mode 100644 index 0000000000..df70430d87 --- /dev/null +++ b/tools/codis2pika/scripts/commands/hdel.json @@ -0,0 +1,55 @@ +{ + "HDEL": { + "summary": "Delete one or more hash fields", + "complexity": "O(N) where N is the number of fields to be removed.", + "group": "hash", + "since": "2.0.0", + "arity": -3, + "function": "hdelCommand", + "history": [ + [ + "2.4.0", + "Accepts multiple `field` arguments." + ] + ], + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "HASH" + ], + "key_specs": [ + { + "flags": [ + "RW", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hello.json b/tools/codis2pika/scripts/commands/hello.json new file mode 100644 index 0000000000..8e80a81cfa --- /dev/null +++ b/tools/codis2pika/scripts/commands/hello.json @@ -0,0 +1,63 @@ +{ + "HELLO": { + "summary": "Handshake with Redis", + "complexity": "O(1)", + "group": "connection", + "since": "6.0.0", + "arity": -1, + "function": "helloCommand", + "history": [ + [ + "6.2.0", + "`protover` made optional; when called without arguments the command reports the current connection's context." + ] + ], + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE", + "FAST", + "NO_AUTH", + "SENTINEL", + "ALLOW_BUSY" + ], + "acl_categories": [ + "CONNECTION" + ], + "arguments": [ + { + "name": "arguments", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "protover", + "type": "integer" + }, + { + "token": "AUTH", + "name": "username_password", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "username", + "type": "string" + }, + { + "name": "password", + "type": "string" + } + ] + }, + { + "token": "SETNAME", + "name": "clientname", + "type": "string", + "optional": true + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hexists.json b/tools/codis2pika/scripts/commands/hexists.json new file mode 100644 index 0000000000..0518e62092 --- /dev/null +++ b/tools/codis2pika/scripts/commands/hexists.json @@ -0,0 +1,47 @@ +{ + "HEXISTS": { + "summary": "Determine if a hash field exists", + "complexity": "O(1)", + "group": "hash", + "since": "2.0.0", + "arity": 3, + "function": "hexistsCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "HASH" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hget.json b/tools/codis2pika/scripts/commands/hget.json new file mode 100644 index 0000000000..12e1fbfbc8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/hget.json @@ -0,0 +1,48 @@ +{ + "HGET": { + "summary": "Get the value of a hash field", + "complexity": "O(1)", + "group": "hash", + "since": "2.0.0", + "arity": 3, + "function": "hgetCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "HASH" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hgetall.json b/tools/codis2pika/scripts/commands/hgetall.json new file mode 100644 index 0000000000..9fef006468 --- /dev/null +++ b/tools/codis2pika/scripts/commands/hgetall.json @@ -0,0 +1,46 @@ +{ + "HGETALL": { + "summary": "Get all the fields and values in a hash", + "complexity": "O(N) where N is the size of the hash.", + "group": "hash", + "since": "2.0.0", + "arity": 2, + "function": "hgetallCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "HASH" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT_ORDER" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hincrby.json b/tools/codis2pika/scripts/commands/hincrby.json new file mode 100644 index 0000000000..124e365f12 --- /dev/null +++ b/tools/codis2pika/scripts/commands/hincrby.json @@ -0,0 +1,54 @@ +{ + "HINCRBY": { + "summary": "Increment the integer value of a hash field by the given number", + "complexity": "O(1)", + "group": "hash", + "since": "2.0.0", + "arity": 4, + "function": "hincrbyCommand", + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "HASH" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string" + }, + { + "name": "increment", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hincrbyfloat.json b/tools/codis2pika/scripts/commands/hincrbyfloat.json new file mode 100644 index 0000000000..b4c81d1811 --- /dev/null +++ b/tools/codis2pika/scripts/commands/hincrbyfloat.json @@ -0,0 +1,54 @@ +{ + "HINCRBYFLOAT": { + "summary": "Increment the float value of a hash field by the given amount", + "complexity": "O(1)", + "group": "hash", + "since": "2.6.0", + "arity": 4, + "function": "hincrbyfloatCommand", + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "HASH" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string" + }, + { + "name": "increment", + "type": "double" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hkeys.json b/tools/codis2pika/scripts/commands/hkeys.json new file mode 100644 index 0000000000..243566aa96 --- /dev/null +++ b/tools/codis2pika/scripts/commands/hkeys.json @@ -0,0 +1,46 @@ +{ + "HKEYS": { + "summary": "Get all the fields in a hash", + "complexity": "O(N) where N is the size of the hash.", + "group": "hash", + "since": "2.0.0", + "arity": 2, + "function": "hkeysCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "HASH" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT_ORDER" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hlen.json b/tools/codis2pika/scripts/commands/hlen.json new file mode 100644 index 0000000000..8320651aea --- /dev/null +++ b/tools/codis2pika/scripts/commands/hlen.json @@ -0,0 +1,43 @@ +{ + "HLEN": { + "summary": "Get the number of fields in a hash", + "complexity": "O(1)", + "group": "hash", + "since": "2.0.0", + "arity": 2, + "function": "hlenCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "HASH" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hmget.json b/tools/codis2pika/scripts/commands/hmget.json new file mode 100644 index 0000000000..d7e7c8e45b --- /dev/null +++ b/tools/codis2pika/scripts/commands/hmget.json @@ -0,0 +1,49 @@ +{ + "HMGET": { + "summary": "Get the values of all the given hash fields", + "complexity": "O(N) where N is the number of fields being requested.", + "group": "hash", + "since": "2.0.0", + "arity": -3, + "function": "hmgetCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "HASH" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hmset.json b/tools/codis2pika/scripts/commands/hmset.json new file mode 100644 index 0000000000..2e962d946f --- /dev/null +++ b/tools/codis2pika/scripts/commands/hmset.json @@ -0,0 +1,65 @@ +{ + "HMSET": { + "summary": "Set multiple hash fields to multiple values", + "complexity": "O(N) where N is the number of fields being set.", + "group": "hash", + "since": "2.0.0", + "arity": -4, + "function": "hsetCommand", + "deprecated_since": "4.0.0", + "replaced_by": "`HSET` with multiple field-value pairs", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "HASH" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "field", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hrandfield.json b/tools/codis2pika/scripts/commands/hrandfield.json new file mode 100644 index 0000000000..ef2ff41996 --- /dev/null +++ b/tools/codis2pika/scripts/commands/hrandfield.json @@ -0,0 +1,63 @@ +{ + "HRANDFIELD": { + "summary": "Get one or multiple random fields from a hash", + "complexity": "O(N) where N is the number of fields returned", + "group": "hash", + "since": "6.2.0", + "arity": -2, + "function": "hrandfieldCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "HASH" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "options", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "count", + "type": "integer" + }, + { + "name": "withvalues", + "token": "WITHVALUES", + "type": "pure-token", + "optional": true + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hscan.json b/tools/codis2pika/scripts/commands/hscan.json new file mode 100644 index 0000000000..7234526151 --- /dev/null +++ b/tools/codis2pika/scripts/commands/hscan.json @@ -0,0 +1,62 @@ +{ + "HSCAN": { + "summary": "Incrementally iterate hash fields and associated values", + "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection..", + "group": "hash", + "since": "2.8.0", + "arity": -3, + "function": "hscanCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "HASH" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "cursor", + "type": "integer" + }, + { + "token": "MATCH", + "name": "pattern", + "type": "pattern", + "optional": true + }, + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hset.json b/tools/codis2pika/scripts/commands/hset.json new file mode 100644 index 0000000000..ee5efa78be --- /dev/null +++ b/tools/codis2pika/scripts/commands/hset.json @@ -0,0 +1,66 @@ +{ + "HSET": { + "summary": "Set the string value of a hash field", + "complexity": "O(1) for each field/value pair added, so O(N) to add N field/value pairs when the command is called with multiple field/value pairs.", + "group": "hash", + "since": "2.0.0", + "arity": -4, + "function": "hsetCommand", + "history": [ + [ + "4.0.0", + "Accepts multiple `field` and `value` arguments." + ] + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "HASH" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "field", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hsetnx.json b/tools/codis2pika/scripts/commands/hsetnx.json new file mode 100644 index 0000000000..abd0ccafb5 --- /dev/null +++ b/tools/codis2pika/scripts/commands/hsetnx.json @@ -0,0 +1,53 @@ +{ + "HSETNX": { + "summary": "Set the value of a hash field, only if the field does not exist", + "complexity": "O(1)", + "group": "hash", + "since": "2.0.0", + "arity": 4, + "function": "hsetnxCommand", + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "HASH" + ], + "key_specs": [ + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hstrlen.json b/tools/codis2pika/scripts/commands/hstrlen.json new file mode 100644 index 0000000000..4ba4df7ad8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/hstrlen.json @@ -0,0 +1,47 @@ +{ + "HSTRLEN": { + "summary": "Get the length of the value of a hash field", + "complexity": "O(1)", + "group": "hash", + "since": "3.2.0", + "arity": 3, + "function": "hstrlenCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "HASH" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/hvals.json b/tools/codis2pika/scripts/commands/hvals.json new file mode 100644 index 0000000000..829f63dea6 --- /dev/null +++ b/tools/codis2pika/scripts/commands/hvals.json @@ -0,0 +1,46 @@ +{ + "HVALS": { + "summary": "Get all the values in a hash", + "complexity": "O(N) where N is the size of the hash.", + "group": "hash", + "since": "2.0.0", + "arity": 2, + "function": "hvalsCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "HASH" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT_ORDER" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/incr.json b/tools/codis2pika/scripts/commands/incr.json new file mode 100644 index 0000000000..09a4960560 --- /dev/null +++ b/tools/codis2pika/scripts/commands/incr.json @@ -0,0 +1,46 @@ +{ + "INCR": { + "summary": "Increment the integer value of a key by one", + "complexity": "O(1)", + "group": "string", + "since": "1.0.0", + "arity": 2, + "function": "incrCommand", + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/incrby.json b/tools/codis2pika/scripts/commands/incrby.json new file mode 100644 index 0000000000..27418114ad --- /dev/null +++ b/tools/codis2pika/scripts/commands/incrby.json @@ -0,0 +1,50 @@ +{ + "INCRBY": { + "summary": "Increment the integer value of a key by the given amount", + "complexity": "O(1)", + "group": "string", + "since": "1.0.0", + "arity": 3, + "function": "incrbyCommand", + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "increment", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/incrbyfloat.json b/tools/codis2pika/scripts/commands/incrbyfloat.json new file mode 100644 index 0000000000..f28b91be67 --- /dev/null +++ b/tools/codis2pika/scripts/commands/incrbyfloat.json @@ -0,0 +1,50 @@ +{ + "INCRBYFLOAT": { + "summary": "Increment the float value of a key by the given amount", + "complexity": "O(1)", + "group": "string", + "since": "2.6.0", + "arity": 3, + "function": "incrbyfloatCommand", + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "increment", + "type": "double" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/info.json b/tools/codis2pika/scripts/commands/info.json new file mode 100644 index 0000000000..612294d34b --- /dev/null +++ b/tools/codis2pika/scripts/commands/info.json @@ -0,0 +1,37 @@ +{ + "INFO": { + "summary": "Get information and statistics about the server", + "complexity": "O(1)", + "group": "server", + "since": "1.0.0", + "arity": -1, + "function": "infoCommand", + "history": [ + [ + "7.0.0", + "Added support for taking multiple section arguments." + ] + ], + "command_flags": [ + "LOADING", + "STALE", + "SENTINEL" + ], + "acl_categories": [ + "DANGEROUS" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT", + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:SPECIAL" + ], + "arguments": [ + { + "name": "section", + "type": "string", + "multiple": true, + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/keys.json b/tools/codis2pika/scripts/commands/keys.json new file mode 100644 index 0000000000..546241f95c --- /dev/null +++ b/tools/codis2pika/scripts/commands/keys.json @@ -0,0 +1,27 @@ +{ + "KEYS": { + "summary": "Find all keys matching the given pattern", + "complexity": "O(N) with N being the number of keys in the database, under the assumption that the key names in the database and the given pattern have limited length.", + "group": "generic", + "since": "1.0.0", + "arity": 2, + "function": "keysCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "KEYSPACE", + "DANGEROUS" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "NONDETERMINISTIC_OUTPUT_ORDER" + ], + "arguments": [ + { + "name": "pattern", + "type": "pattern" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/lastsave.json b/tools/codis2pika/scripts/commands/lastsave.json new file mode 100644 index 0000000000..9290584587 --- /dev/null +++ b/tools/codis2pika/scripts/commands/lastsave.json @@ -0,0 +1,22 @@ +{ + "LASTSAVE": { + "summary": "Get the UNIX time stamp of the last successful save to disk", + "complexity": "O(1)", + "group": "server", + "since": "1.0.0", + "arity": 1, + "function": "lastsaveCommand", + "command_flags": [ + "LOADING", + "STALE", + "FAST" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "acl_categories": [ + "ADMIN", + "DANGEROUS" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/latency-doctor.json b/tools/codis2pika/scripts/commands/latency-doctor.json new file mode 100644 index 0000000000..129b32358e --- /dev/null +++ b/tools/codis2pika/scripts/commands/latency-doctor.json @@ -0,0 +1,22 @@ +{ + "DOCTOR": { + "summary": "Return a human readable latency analysis report.", + "complexity": "O(1)", + "group": "server", + "since": "2.8.13", + "arity": 2, + "container": "LATENCY", + "function": "latencyCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT", + "REQUEST_POLICY:ALL_NODES", + "RESPONSE_POLICY:SPECIAL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/latency-graph.json b/tools/codis2pika/scripts/commands/latency-graph.json new file mode 100644 index 0000000000..0644c1cb00 --- /dev/null +++ b/tools/codis2pika/scripts/commands/latency-graph.json @@ -0,0 +1,28 @@ +{ + "GRAPH": { + "summary": "Return a latency graph for the event.", + "complexity": "O(1)", + "group": "server", + "since": "2.8.13", + "arity": 3, + "container": "LATENCY", + "function": "latencyCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT", + "REQUEST_POLICY:ALL_NODES", + "RESPONSE_POLICY:SPECIAL" + ], + "arguments": [ + { + "name": "event", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/latency-help.json b/tools/codis2pika/scripts/commands/latency-help.json new file mode 100644 index 0000000000..682beee2ad --- /dev/null +++ b/tools/codis2pika/scripts/commands/latency-help.json @@ -0,0 +1,15 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands.", + "complexity": "O(1)", + "group": "server", + "since": "2.8.13", + "arity": 2, + "container": "LATENCY", + "function": "latencyCommand", + "command_flags": [ + "LOADING", + "STALE" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/latency-histogram.json b/tools/codis2pika/scripts/commands/latency-histogram.json new file mode 100644 index 0000000000..dc14d47f83 --- /dev/null +++ b/tools/codis2pika/scripts/commands/latency-histogram.json @@ -0,0 +1,30 @@ +{ + "HISTOGRAM": { + "summary": "Return the cumulative distribution of latencies of a subset of commands or all.", + "complexity": "O(N) where N is the number of commands with latency information being retrieved.", + "group": "server", + "since": "7.0.0", + "arity": -2, + "container": "LATENCY", + "function": "latencyCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT", + "REQUEST_POLICY:ALL_NODES", + "RESPONSE_POLICY:SPECIAL" + ], + "arguments": [ + { + "name": "COMMAND", + "type": "string", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/latency-history.json b/tools/codis2pika/scripts/commands/latency-history.json new file mode 100644 index 0000000000..6d9267064c --- /dev/null +++ b/tools/codis2pika/scripts/commands/latency-history.json @@ -0,0 +1,28 @@ +{ + "HISTORY": { + "summary": "Return timestamp-latency samples for the event.", + "complexity": "O(1)", + "group": "server", + "since": "2.8.13", + "arity": 3, + "container": "LATENCY", + "function": "latencyCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT", + "REQUEST_POLICY:ALL_NODES", + "RESPONSE_POLICY:SPECIAL" + ], + "arguments": [ + { + "name": "event", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/latency-latest.json b/tools/codis2pika/scripts/commands/latency-latest.json new file mode 100644 index 0000000000..f513689c56 --- /dev/null +++ b/tools/codis2pika/scripts/commands/latency-latest.json @@ -0,0 +1,22 @@ +{ + "LATEST": { + "summary": "Return the latest latency samples for all events.", + "complexity": "O(1)", + "group": "server", + "since": "2.8.13", + "arity": 2, + "container": "LATENCY", + "function": "latencyCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT", + "REQUEST_POLICY:ALL_NODES", + "RESPONSE_POLICY:SPECIAL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/latency-reset.json b/tools/codis2pika/scripts/commands/latency-reset.json new file mode 100644 index 0000000000..30295cc057 --- /dev/null +++ b/tools/codis2pika/scripts/commands/latency-reset.json @@ -0,0 +1,29 @@ +{ + "RESET": { + "summary": "Reset latency data for one or more events.", + "complexity": "O(1)", + "group": "server", + "since": "2.8.13", + "arity": -2, + "container": "LATENCY", + "function": "latencyCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_NODES", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ], + "arguments": [ + { + "name": "event", + "type": "string", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/latency.json b/tools/codis2pika/scripts/commands/latency.json new file mode 100644 index 0000000000..1845fd4ff0 --- /dev/null +++ b/tools/codis2pika/scripts/commands/latency.json @@ -0,0 +1,9 @@ +{ + "LATENCY": { + "summary": "A container for latency diagnostics commands", + "complexity": "Depends on subcommand.", + "group": "server", + "since": "2.8.13", + "arity": -2 + } +} diff --git a/tools/codis2pika/scripts/commands/lcs.json b/tools/codis2pika/scripts/commands/lcs.json new file mode 100644 index 0000000000..193e6a8770 --- /dev/null +++ b/tools/codis2pika/scripts/commands/lcs.json @@ -0,0 +1,72 @@ +{ + "LCS": { + "summary": "Find longest common substring", + "complexity": "O(N*M) where N and M are the lengths of s1 and s2, respectively", + "group": "string", + "since": "7.0.0", + "arity": -3, + "function": "lcsCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key1", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "key2", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "len", + "token": "LEN", + "type": "pure-token", + "optional": true + }, + { + "name": "idx", + "token": "IDX", + "type": "pure-token", + "optional": true + }, + { + "token": "MINMATCHLEN", + "name": "len", + "type": "integer", + "optional": true + }, + { + "name": "withmatchlen", + "token": "WITHMATCHLEN", + "type": "pure-token", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/lindex.json b/tools/codis2pika/scripts/commands/lindex.json new file mode 100644 index 0000000000..b2397241f1 --- /dev/null +++ b/tools/codis2pika/scripts/commands/lindex.json @@ -0,0 +1,47 @@ +{ + "LINDEX": { + "summary": "Get an element from a list by its index", + "complexity": "O(N) where N is the number of elements to traverse to get to the element at index. This makes asking for the first or the last element of the list O(1).", + "group": "list", + "since": "1.0.0", + "arity": 3, + "function": "lindexCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "index", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/linsert.json b/tools/codis2pika/scripts/commands/linsert.json new file mode 100644 index 0000000000..71046fa583 --- /dev/null +++ b/tools/codis2pika/scripts/commands/linsert.json @@ -0,0 +1,68 @@ +{ + "LINSERT": { + "summary": "Insert an element before or after another element in a list", + "complexity": "O(N) where N is the number of elements to traverse before seeing the value pivot. This means that inserting somewhere on the left end on the list (head) can be considered O(1) and inserting somewhere on the right end (tail) is O(N).", + "group": "list", + "since": "2.2.0", + "arity": 5, + "function": "linsertCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "where", + "type": "oneof", + "arguments": [ + { + "name": "before", + "type": "pure-token", + "token": "BEFORE" + }, + { + "name": "after", + "type": "pure-token", + "token": "AFTER" + } + ] + }, + { + "name": "pivot", + "type": "string" + }, + { + "name": "element", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/llen.json b/tools/codis2pika/scripts/commands/llen.json new file mode 100644 index 0000000000..720b23778c --- /dev/null +++ b/tools/codis2pika/scripts/commands/llen.json @@ -0,0 +1,43 @@ +{ + "LLEN": { + "summary": "Get the length of a list", + "complexity": "O(1)", + "group": "list", + "since": "1.0.0", + "arity": 2, + "function": "llenCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/lmove.json b/tools/codis2pika/scripts/commands/lmove.json new file mode 100644 index 0000000000..060cc7a321 --- /dev/null +++ b/tools/codis2pika/scripts/commands/lmove.json @@ -0,0 +1,100 @@ +{ + "LMOVE": { + "summary": "Pop an element from a list, push it to another list and return it", + "complexity": "O(1)", + "group": "list", + "since": "6.2.0", + "arity": 5, + "function": "lmoveCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "source", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "destination", + "type": "key", + "key_spec_index": 1 + }, + { + "name": "wherefrom", + "type": "oneof", + "arguments": [ + { + "name": "left", + "type": "pure-token", + "token": "LEFT" + }, + { + "name": "right", + "type": "pure-token", + "token": "RIGHT" + } + ] + }, + { + "name": "whereto", + "type": "oneof", + "arguments": [ + { + "name": "left", + "type": "pure-token", + "token": "LEFT" + }, + { + "name": "right", + "type": "pure-token", + "token": "RIGHT" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/lmpop.json b/tools/codis2pika/scripts/commands/lmpop.json new file mode 100644 index 0000000000..0821e4c689 --- /dev/null +++ b/tools/codis2pika/scripts/commands/lmpop.json @@ -0,0 +1,72 @@ +{ + "LMPOP": { + "summary": "Pop elements from a list", + "complexity": "O(N+M) where N is the number of provided keys and M is the number of elements returned.", + "group": "list", + "since": "7.0.0", + "arity": -4, + "function": "lmpopCommand", + "get_keys_function": "lmpopGetKeys", + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "where", + "type": "oneof", + "arguments": [ + { + "name": "left", + "type": "pure-token", + "token": "LEFT" + }, + { + "name": "right", + "type": "pure-token", + "token": "RIGHT" + } + ] + }, + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/lolwut.json b/tools/codis2pika/scripts/commands/lolwut.json new file mode 100644 index 0000000000..cf0a98504e --- /dev/null +++ b/tools/codis2pika/scripts/commands/lolwut.json @@ -0,0 +1,21 @@ +{ + "LOLWUT": { + "summary": "Display some computer art and the Redis version", + "group": "server", + "since": "5.0.0", + "arity": -1, + "function": "lolwutCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "arguments": [ + { + "token": "VERSION", + "name": "version", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/lpop.json b/tools/codis2pika/scripts/commands/lpop.json new file mode 100644 index 0000000000..cd3de0c7ad --- /dev/null +++ b/tools/codis2pika/scripts/commands/lpop.json @@ -0,0 +1,57 @@ +{ + "LPOP": { + "summary": "Remove and get the first elements in a list", + "complexity": "O(N) where N is the number of elements returned", + "group": "list", + "since": "1.0.0", + "arity": -2, + "function": "lpopCommand", + "history": [ + [ + "6.2.0", + "Added the `count` argument." + ] + ], + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer", + "optional": true, + "since": "6.2.0" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/lpos.json b/tools/codis2pika/scripts/commands/lpos.json new file mode 100644 index 0000000000..3aea3191fc --- /dev/null +++ b/tools/codis2pika/scripts/commands/lpos.json @@ -0,0 +1,65 @@ +{ + "LPOS": { + "summary": "Return the index of matching elements on a list", + "complexity": "O(N) where N is the number of elements in the list, for the average case. When searching for elements near the head or the tail of the list, or when the MAXLEN option is provided, the command may run in constant time.", + "group": "list", + "since": "6.0.6", + "arity": -3, + "function": "lposCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "element", + "type": "string" + }, + { + "token": "RANK", + "name": "rank", + "type": "integer", + "optional": true + }, + { + "token": "COUNT", + "name": "num-matches", + "type": "integer", + "optional": true + }, + { + "token": "MAXLEN", + "name": "len", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/lpush.json b/tools/codis2pika/scripts/commands/lpush.json new file mode 100644 index 0000000000..23a9c36b43 --- /dev/null +++ b/tools/codis2pika/scripts/commands/lpush.json @@ -0,0 +1,56 @@ +{ + "LPUSH": { + "summary": "Prepend one or multiple elements to a list", + "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", + "group": "list", + "since": "1.0.0", + "arity": -3, + "function": "lpushCommand", + "history": [ + [ + "2.4.0", + "Accepts multiple `element` arguments." + ] + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "element", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/lpushx.json b/tools/codis2pika/scripts/commands/lpushx.json new file mode 100644 index 0000000000..b1629b6298 --- /dev/null +++ b/tools/codis2pika/scripts/commands/lpushx.json @@ -0,0 +1,56 @@ +{ + "LPUSHX": { + "summary": "Prepend an element to a list, only if the list exists", + "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", + "group": "list", + "since": "2.2.0", + "arity": -3, + "function": "lpushxCommand", + "history": [ + [ + "4.0.0", + "Accepts multiple `element` arguments." + ] + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "element", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/lrange.json b/tools/codis2pika/scripts/commands/lrange.json new file mode 100644 index 0000000000..8fa9352bb1 --- /dev/null +++ b/tools/codis2pika/scripts/commands/lrange.json @@ -0,0 +1,51 @@ +{ + "LRANGE": { + "summary": "Get a range of elements from a list", + "complexity": "O(S+N) where S is the distance of start offset from HEAD for small lists, from nearest end (HEAD or TAIL) for large lists; and N is the number of elements in the specified range.", + "group": "list", + "since": "1.0.0", + "arity": 4, + "function": "lrangeCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "integer" + }, + { + "name": "stop", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/lrem.json b/tools/codis2pika/scripts/commands/lrem.json new file mode 100644 index 0000000000..c845cc04a6 --- /dev/null +++ b/tools/codis2pika/scripts/commands/lrem.json @@ -0,0 +1,51 @@ +{ + "LREM": { + "summary": "Remove elements from a list", + "complexity": "O(N+M) where N is the length of the list and M is the number of elements removed.", + "group": "list", + "since": "1.0.0", + "arity": 4, + "function": "lremCommand", + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer" + }, + { + "name": "element", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/lset.json b/tools/codis2pika/scripts/commands/lset.json new file mode 100644 index 0000000000..9a9e4fd5e9 --- /dev/null +++ b/tools/codis2pika/scripts/commands/lset.json @@ -0,0 +1,52 @@ +{ + "LSET": { + "summary": "Set the value of an element in a list by its index", + "complexity": "O(N) where N is the length of the list. Setting either the first or the last element of the list is O(1).", + "group": "list", + "since": "1.0.0", + "arity": 4, + "function": "lsetCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "index", + "type": "integer" + }, + { + "name": "element", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/ltrim.json b/tools/codis2pika/scripts/commands/ltrim.json new file mode 100644 index 0000000000..f177d8f66c --- /dev/null +++ b/tools/codis2pika/scripts/commands/ltrim.json @@ -0,0 +1,51 @@ +{ + "LTRIM": { + "summary": "Trim a list to the specified range", + "complexity": "O(N) where N is the number of elements to be removed by the operation.", + "group": "list", + "since": "1.0.0", + "arity": 4, + "function": "ltrimCommand", + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "integer" + }, + { + "name": "stop", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/memory-doctor.json b/tools/codis2pika/scripts/commands/memory-doctor.json new file mode 100644 index 0000000000..b6691dfa0e --- /dev/null +++ b/tools/codis2pika/scripts/commands/memory-doctor.json @@ -0,0 +1,16 @@ +{ + "DOCTOR": { + "summary": "Outputs memory problems report", + "complexity": "O(1)", + "group": "server", + "since": "4.0.0", + "arity": 2, + "container": "MEMORY", + "function": "memoryCommand", + "command_tips": [ + "NONDETERMINISTIC_OUTPUT", + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:SPECIAL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/memory-help.json b/tools/codis2pika/scripts/commands/memory-help.json new file mode 100644 index 0000000000..a1cda71f84 --- /dev/null +++ b/tools/codis2pika/scripts/commands/memory-help.json @@ -0,0 +1,15 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "server", + "since": "4.0.0", + "arity": 2, + "container": "MEMORY", + "function": "memoryCommand", + "command_flags": [ + "LOADING", + "STALE" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/memory-malloc-stats.json b/tools/codis2pika/scripts/commands/memory-malloc-stats.json new file mode 100644 index 0000000000..5106781fe8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/memory-malloc-stats.json @@ -0,0 +1,16 @@ +{ + "MALLOC-STATS": { + "summary": "Show allocator internal stats", + "complexity": "Depends on how much memory is allocated, could be slow", + "group": "server", + "since": "4.0.0", + "arity": 2, + "container": "MEMORY", + "function": "memoryCommand", + "command_tips": [ + "NONDETERMINISTIC_OUTPUT", + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:SPECIAL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/memory-purge.json b/tools/codis2pika/scripts/commands/memory-purge.json new file mode 100644 index 0000000000..b862534d16 --- /dev/null +++ b/tools/codis2pika/scripts/commands/memory-purge.json @@ -0,0 +1,15 @@ +{ + "PURGE": { + "summary": "Ask the allocator to release memory", + "complexity": "Depends on how much memory is allocated, could be slow", + "group": "server", + "since": "4.0.0", + "arity": 2, + "container": "MEMORY", + "function": "memoryCommand", + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/memory-stats.json b/tools/codis2pika/scripts/commands/memory-stats.json new file mode 100644 index 0000000000..76e6baa3e2 --- /dev/null +++ b/tools/codis2pika/scripts/commands/memory-stats.json @@ -0,0 +1,16 @@ +{ + "STATS": { + "summary": "Show memory usage details", + "complexity": "O(1)", + "group": "server", + "since": "4.0.0", + "arity": 2, + "container": "MEMORY", + "function": "memoryCommand", + "command_tips": [ + "NONDETERMINISTIC_OUTPUT", + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:SPECIAL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/memory-usage.json b/tools/codis2pika/scripts/commands/memory-usage.json new file mode 100644 index 0000000000..fa6b7c7e84 --- /dev/null +++ b/tools/codis2pika/scripts/commands/memory-usage.json @@ -0,0 +1,46 @@ +{ + "USAGE": { + "summary": "Estimate the memory usage of a key", + "complexity": "O(N) where N is the number of samples.", + "group": "server", + "since": "4.0.0", + "arity": -3, + "container": "MEMORY", + "function": "memoryCommand", + "command_flags": [ + "READONLY" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "token": "SAMPLES", + "name": "count", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/memory.json b/tools/codis2pika/scripts/commands/memory.json new file mode 100644 index 0000000000..d3fa02f5a1 --- /dev/null +++ b/tools/codis2pika/scripts/commands/memory.json @@ -0,0 +1,9 @@ +{ + "MEMORY": { + "summary": "A container for memory diagnostics commands", + "complexity": "Depends on subcommand.", + "group": "server", + "since": "4.0.0", + "arity": -2 + } +} diff --git a/tools/codis2pika/scripts/commands/mget.json b/tools/codis2pika/scripts/commands/mget.json new file mode 100644 index 0000000000..fdff809980 --- /dev/null +++ b/tools/codis2pika/scripts/commands/mget.json @@ -0,0 +1,48 @@ +{ + "MGET": { + "summary": "Get the values of all the given keys", + "complexity": "O(N) where N is the number of keys to retrieve.", + "group": "string", + "since": "1.0.0", + "arity": -2, + "function": "mgetCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "STRING" + ], + "command_tips": [ + "REQUEST_POLICY:MULTI_SHARD" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/migrate.json b/tools/codis2pika/scripts/commands/migrate.json new file mode 100644 index 0000000000..b9a52aa69d --- /dev/null +++ b/tools/codis2pika/scripts/commands/migrate.json @@ -0,0 +1,169 @@ +{ + "MIGRATE": { + "summary": "Atomically transfer a key from a Redis instance to another one.", + "complexity": "This command actually executes a DUMP+DEL in the source instance, and a RESTORE in the target instance. See the pages of these commands for time complexity. Also an O(N) data transfer between the two instances is performed.", + "group": "generic", + "since": "2.6.0", + "arity": -6, + "function": "migrateCommand", + "get_keys_function": "migrateGetKeys", + "history": [ + [ + "3.0.0", + "Added the `COPY` and `REPLACE` options." + ], + [ + "3.0.6", + "Added the `KEYS` option." + ], + [ + "4.0.7", + "Added the `AUTH` option." + ], + [ + "6.0.0", + "Added the `AUTH2` option." + ] + ], + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "KEYSPACE", + "DANGEROUS" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 3 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RW", + "ACCESS", + "DELETE", + "INCOMPLETE" + ], + "begin_search": { + "keyword": { + "keyword": "KEYS", + "startfrom": -2 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "host", + "type": "string" + }, + { + "name": "port", + "type": "integer" + }, + { + "name": "key_or_empty_string", + "type": "oneof", + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "empty_string", + "type": "pure-token", + "token": "\"\"" + } + ] + }, + { + "name": "destination-db", + "type": "integer" + }, + { + "name": "timeout", + "type": "integer" + }, + { + "name": "copy", + "token": "COPY", + "type": "pure-token", + "optional": true, + "since": "3.0.0" + }, + { + "name": "replace", + "token": "REPLACE", + "type": "pure-token", + "optional": true, + "since": "3.0.0" + }, + { + "name": "authentication", + "type": "oneof", + "optional": true, + "arguments": [ + { + "token": "AUTH", + "name": "password", + "type": "string", + "optional": true, + "since": "4.0.7" + }, + { + "token": "AUTH2", + "name": "username_password", + "type": "block", + "optional": true, + "since": "6.0.0", + "arguments": [ + { + "name": "username", + "type": "string" + }, + { + "name": "password", + "type": "string" + } + ] + } + ] + }, + { + "token": "KEYS", + "name": "key", + "type": "key", + "key_spec_index": 1, + "optional": true, + "multiple": true, + "since": "3.0.6" + } + ] + } +} \ No newline at end of file diff --git a/tools/codis2pika/scripts/commands/module-help.json b/tools/codis2pika/scripts/commands/module-help.json new file mode 100644 index 0000000000..b8db8aee3b --- /dev/null +++ b/tools/codis2pika/scripts/commands/module-help.json @@ -0,0 +1,15 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "server", + "since": "5.0.0", + "arity": 2, + "container": "MODULE", + "function": "moduleCommand", + "command_flags": [ + "LOADING", + "STALE" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/module-list.json b/tools/codis2pika/scripts/commands/module-list.json new file mode 100644 index 0000000000..ed6e7d19be --- /dev/null +++ b/tools/codis2pika/scripts/commands/module-list.json @@ -0,0 +1,18 @@ +{ + "LIST": { + "summary": "List all modules loaded by the server", + "complexity": "O(N) where N is the number of loaded modules.", + "group": "server", + "since": "4.0.0", + "arity": 2, + "container": "MODULE", + "function": "moduleCommand", + "command_flags": [ + "ADMIN", + "NOSCRIPT" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT_ORDER" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/module-load.json b/tools/codis2pika/scripts/commands/module-load.json new file mode 100644 index 0000000000..84e6d35967 --- /dev/null +++ b/tools/codis2pika/scripts/commands/module-load.json @@ -0,0 +1,29 @@ +{ + "LOAD": { + "summary": "Load a module", + "complexity": "O(1)", + "group": "server", + "since": "4.0.0", + "arity": -3, + "container": "MODULE", + "function": "moduleCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "NOSCRIPT", + "PROTECTED" + ], + "arguments": [ + { + "name": "path", + "type": "string" + }, + { + "name": "arg", + "type": "string", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/module-loadex.json b/tools/codis2pika/scripts/commands/module-loadex.json new file mode 100644 index 0000000000..97e8f2b582 --- /dev/null +++ b/tools/codis2pika/scripts/commands/module-loadex.json @@ -0,0 +1,54 @@ +{ + "LOADEX": { + "summary": "Load a module with extended parameters", + "complexity": "O(1)", + "group": "server", + "since": "7.0.0", + "arity": -3, + "container": "MODULE", + "function": "moduleCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "NOSCRIPT", + "PROTECTED" + ], + "arguments": [ + { + "name": "path", + "type": "string" + }, + { + "name": "configs", + "token": "CONFIG", + "type": "block", + "multiple": true, + "multiple_token": true, + "optional": true, + "arguments": [ + { + "name": "name", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ] + }, + { + "name": "args", + "token": "ARGS", + "type": "block", + "multiple": true, + "optional": true, + "arguments": [ + { + "name": "arg", + "type": "string" + } + ] + } + ] + } +} \ No newline at end of file diff --git a/tools/codis2pika/scripts/commands/module-unload.json b/tools/codis2pika/scripts/commands/module-unload.json new file mode 100644 index 0000000000..8820ba3aa7 --- /dev/null +++ b/tools/codis2pika/scripts/commands/module-unload.json @@ -0,0 +1,23 @@ +{ + "UNLOAD": { + "summary": "Unload a module", + "complexity": "O(1)", + "group": "server", + "since": "4.0.0", + "arity": 3, + "container": "MODULE", + "function": "moduleCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "NOSCRIPT", + "PROTECTED" + ], + "arguments": [ + { + "name": "name", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/module.json b/tools/codis2pika/scripts/commands/module.json new file mode 100644 index 0000000000..f04d5daf67 --- /dev/null +++ b/tools/codis2pika/scripts/commands/module.json @@ -0,0 +1,9 @@ +{ + "MODULE": { + "summary": "A container for module commands", + "complexity": "Depends on subcommand.", + "group": "server", + "since": "4.0.0", + "arity": -2 + } +} diff --git a/tools/codis2pika/scripts/commands/monitor.json b/tools/codis2pika/scripts/commands/monitor.json new file mode 100644 index 0000000000..a305c4fcef --- /dev/null +++ b/tools/codis2pika/scripts/commands/monitor.json @@ -0,0 +1,16 @@ +{ + "MONITOR": { + "summary": "Listen for all requests received by the server in real time", + "group": "server", + "since": "1.0.0", + "arity": 1, + "function": "monitorCommand", + "history": [], + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/move.json b/tools/codis2pika/scripts/commands/move.json new file mode 100644 index 0000000000..0c7c71e058 --- /dev/null +++ b/tools/codis2pika/scripts/commands/move.json @@ -0,0 +1,49 @@ +{ + "MOVE": { + "summary": "Move a key to another database", + "complexity": "O(1)", + "group": "generic", + "since": "1.0.0", + "arity": 3, + "function": "moveCommand", + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "db", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/mset.json b/tools/codis2pika/scripts/commands/mset.json new file mode 100644 index 0000000000..09b9d6d265 --- /dev/null +++ b/tools/codis2pika/scripts/commands/mset.json @@ -0,0 +1,59 @@ +{ + "MSET": { + "summary": "Set multiple keys to multiple values", + "complexity": "O(N) where N is the number of keys to set.", + "group": "string", + "since": "1.0.1", + "arity": -3, + "function": "msetCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "STRING" + ], + "command_tips": [ + "REQUEST_POLICY:MULTI_SHARD", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ], + "key_specs": [ + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 2, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "value", + "type": "string" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/msetnx.json b/tools/codis2pika/scripts/commands/msetnx.json new file mode 100644 index 0000000000..544ac64505 --- /dev/null +++ b/tools/codis2pika/scripts/commands/msetnx.json @@ -0,0 +1,59 @@ +{ + "MSETNX": { + "summary": "Set multiple keys to multiple values, only if none of the keys exist", + "complexity": "O(N) where N is the number of keys to set.", + "group": "string", + "since": "1.0.1", + "arity": -3, + "function": "msetnxCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "STRING" + ], + "command_tips": [ + "REQUEST_POLICY:MULTI_SHARD", + "RESPONSE_POLICY:AGG_MIN" + ], + "key_specs": [ + { + "flags": [ + "OW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 2, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "value", + "type": "string" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/multi.json b/tools/codis2pika/scripts/commands/multi.json new file mode 100644 index 0000000000..f1299a6f4d --- /dev/null +++ b/tools/codis2pika/scripts/commands/multi.json @@ -0,0 +1,20 @@ +{ + "MULTI": { + "summary": "Mark the start of a transaction block", + "complexity": "O(1)", + "group": "transactions", + "since": "1.2.0", + "arity": 1, + "function": "multiCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE", + "FAST", + "ALLOW_BUSY" + ], + "acl_categories": [ + "TRANSACTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/object-encoding.json b/tools/codis2pika/scripts/commands/object-encoding.json new file mode 100644 index 0000000000..2d39a07ef0 --- /dev/null +++ b/tools/codis2pika/scripts/commands/object-encoding.json @@ -0,0 +1,46 @@ +{ + "ENCODING": { + "summary": "Inspect the internal encoding of a Redis object", + "complexity": "O(1)", + "group": "generic", + "since": "2.2.3", + "arity": 3, + "container": "OBJECT", + "function": "objectCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/object-freq.json b/tools/codis2pika/scripts/commands/object-freq.json new file mode 100644 index 0000000000..d184f2e7e8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/object-freq.json @@ -0,0 +1,46 @@ +{ + "FREQ": { + "summary": "Get the logarithmic access frequency counter of a Redis object", + "complexity": "O(1)", + "group": "generic", + "since": "4.0.0", + "arity": 3, + "container": "OBJECT", + "function": "objectCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/object-help.json b/tools/codis2pika/scripts/commands/object-help.json new file mode 100644 index 0000000000..22864bafa6 --- /dev/null +++ b/tools/codis2pika/scripts/commands/object-help.json @@ -0,0 +1,18 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "generic", + "since": "6.2.0", + "arity": 2, + "container": "OBJECT", + "function": "objectCommand", + "command_flags": [ + "LOADING", + "STALE" + ], + "acl_categories": [ + "KEYSPACE" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/object-idletime.json b/tools/codis2pika/scripts/commands/object-idletime.json new file mode 100644 index 0000000000..162d6f514e --- /dev/null +++ b/tools/codis2pika/scripts/commands/object-idletime.json @@ -0,0 +1,46 @@ +{ + "IDLETIME": { + "summary": "Get the time since a Redis object was last accessed", + "complexity": "O(1)", + "group": "generic", + "since": "2.2.3", + "arity": 3, + "container": "OBJECT", + "function": "objectCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/object-refcount.json b/tools/codis2pika/scripts/commands/object-refcount.json new file mode 100644 index 0000000000..0f36f50924 --- /dev/null +++ b/tools/codis2pika/scripts/commands/object-refcount.json @@ -0,0 +1,46 @@ +{ + "REFCOUNT": { + "summary": "Get the number of references to the value of the key", + "complexity": "O(1)", + "group": "generic", + "since": "2.2.3", + "arity": 3, + "container": "OBJECT", + "function": "objectCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/object.json b/tools/codis2pika/scripts/commands/object.json new file mode 100644 index 0000000000..f51988cd72 --- /dev/null +++ b/tools/codis2pika/scripts/commands/object.json @@ -0,0 +1,9 @@ +{ + "OBJECT": { + "summary": "A container for object introspection commands", + "complexity": "Depends on subcommand.", + "group": "generic", + "since": "2.2.3", + "arity": -2 + } +} diff --git a/tools/codis2pika/scripts/commands/persist.json b/tools/codis2pika/scripts/commands/persist.json new file mode 100644 index 0000000000..f08df4c1a9 --- /dev/null +++ b/tools/codis2pika/scripts/commands/persist.json @@ -0,0 +1,44 @@ +{ + "PERSIST": { + "summary": "Remove the expiration from a key", + "complexity": "O(1)", + "group": "generic", + "since": "2.2.0", + "arity": 2, + "function": "persistCommand", + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pexpire.json b/tools/codis2pika/scripts/commands/pexpire.json new file mode 100644 index 0000000000..9638b8eaa8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/pexpire.json @@ -0,0 +1,82 @@ +{ + "PEXPIRE": { + "summary": "Set a key's time to live in milliseconds", + "complexity": "O(1)", + "group": "generic", + "since": "2.6.0", + "arity": -3, + "function": "pexpireCommand", + "history": [ + [ + "7.0.0", + "Added options: `NX`, `XX`, `GT` and `LT`." + ] + ], + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "milliseconds", + "type": "integer" + }, + { + "name": "condition", + "type": "oneof", + "optional": true, + "since": "7.0.0", + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + }, + { + "name": "gt", + "type": "pure-token", + "token": "GT" + }, + { + "name": "lt", + "type": "pure-token", + "token": "LT" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pexpireat.json b/tools/codis2pika/scripts/commands/pexpireat.json new file mode 100644 index 0000000000..c08782c261 --- /dev/null +++ b/tools/codis2pika/scripts/commands/pexpireat.json @@ -0,0 +1,82 @@ +{ + "PEXPIREAT": { + "summary": "Set the expiration for a key as a UNIX timestamp specified in milliseconds", + "complexity": "O(1)", + "group": "generic", + "since": "2.6.0", + "arity": -3, + "function": "pexpireatCommand", + "history": [ + [ + "7.0.0", + "Added options: `NX`, `XX`, `GT` and `LT`." + ] + ], + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "unix-time-milliseconds", + "type": "unix-time" + }, + { + "name": "condition", + "type": "oneof", + "optional": true, + "since": "7.0.0", + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + }, + { + "name": "gt", + "type": "pure-token", + "token": "GT" + }, + { + "name": "lt", + "type": "pure-token", + "token": "LT" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pexpiretime.json b/tools/codis2pika/scripts/commands/pexpiretime.json new file mode 100644 index 0000000000..6ba9212786 --- /dev/null +++ b/tools/codis2pika/scripts/commands/pexpiretime.json @@ -0,0 +1,44 @@ +{ + "PEXPIRETIME": { + "summary": "Get the expiration Unix timestamp for a key in milliseconds", + "complexity": "O(1)", + "group": "generic", + "since": "7.0.0", + "arity": 2, + "function": "pexpiretimeCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pfadd.json b/tools/codis2pika/scripts/commands/pfadd.json new file mode 100644 index 0000000000..8076a7631e --- /dev/null +++ b/tools/codis2pika/scripts/commands/pfadd.json @@ -0,0 +1,51 @@ +{ + "PFADD": { + "summary": "Adds the specified elements to the specified HyperLogLog.", + "complexity": "O(1) to add every element.", + "group": "hyperloglog", + "since": "2.8.9", + "arity": -2, + "function": "pfaddCommand", + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "HYPERLOGLOG" + ], + "key_specs": [ + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "element", + "type": "string", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pfcount.json b/tools/codis2pika/scripts/commands/pfcount.json new file mode 100644 index 0000000000..4d89e1d380 --- /dev/null +++ b/tools/codis2pika/scripts/commands/pfcount.json @@ -0,0 +1,46 @@ +{ + "PFCOUNT": { + "summary": "Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s).", + "complexity": "O(1) with a very small average constant time when called with a single key. O(N) with N being the number of keys, and much bigger constant times, when called with multiple keys.", + "group": "hyperloglog", + "since": "2.8.9", + "arity": -2, + "function": "pfcountCommand", + "command_flags": [ + "READONLY", + "MAY_REPLICATE" + ], + "acl_categories": [ + "HYPERLOGLOG" + ], + "key_specs": [ + { + "notes": "RW because it may change the internal representation of the key, and propagate to replicas", + "flags": [ + "RW", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pfdebug.json b/tools/codis2pika/scripts/commands/pfdebug.json new file mode 100644 index 0000000000..b07e725bc3 --- /dev/null +++ b/tools/codis2pika/scripts/commands/pfdebug.json @@ -0,0 +1,52 @@ +{ + "PFDEBUG": { + "summary": "Internal commands for debugging HyperLogLog values", + "complexity": "N/A", + "group": "hyperloglog", + "since": "2.8.9", + "arity": 3, + "function": "pfdebugCommand", + "doc_flags": [ + "SYSCMD" + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "ADMIN" + ], + "acl_categories": [ + "HYPERLOGLOG" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "subcommand", + "type": "string" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pfmerge.json b/tools/codis2pika/scripts/commands/pfmerge.json new file mode 100644 index 0000000000..ae18dad1e6 --- /dev/null +++ b/tools/codis2pika/scripts/commands/pfmerge.json @@ -0,0 +1,69 @@ +{ + "PFMERGE": { + "summary": "Merge N different HyperLogLogs into a single one.", + "complexity": "O(N) to merge N HyperLogLogs, but with high constant times.", + "group": "hyperloglog", + "since": "2.8.9", + "arity": -2, + "function": "pfmergeCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "HYPERLOGLOG" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "destkey", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "sourcekey", + "type": "key", + "key_spec_index": 1, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pfselftest.json b/tools/codis2pika/scripts/commands/pfselftest.json new file mode 100644 index 0000000000..b75ce03699 --- /dev/null +++ b/tools/codis2pika/scripts/commands/pfselftest.json @@ -0,0 +1,19 @@ +{ + "PFSELFTEST": { + "summary": "An internal command for testing HyperLogLog values", + "complexity": "N/A", + "group": "hyperloglog", + "since": "2.8.9", + "arity": 1, + "function": "pfselftestCommand", + "doc_flags": [ + "SYSCMD" + ], + "command_flags": [ + "ADMIN" + ], + "acl_categories": [ + "HYPERLOGLOG" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/ping.json b/tools/codis2pika/scripts/commands/ping.json new file mode 100644 index 0000000000..e7db2c3324 --- /dev/null +++ b/tools/codis2pika/scripts/commands/ping.json @@ -0,0 +1,28 @@ +{ + "PING": { + "summary": "Ping the server", + "complexity": "O(1)", + "group": "connection", + "since": "1.0.0", + "arity": -1, + "function": "pingCommand", + "command_flags": [ + "FAST", + "SENTINEL" + ], + "acl_categories": [ + "CONNECTION" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ], + "arguments": [ + { + "name": "message", + "type": "string", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/psetex.json b/tools/codis2pika/scripts/commands/psetex.json new file mode 100644 index 0000000000..b746a7b147 --- /dev/null +++ b/tools/codis2pika/scripts/commands/psetex.json @@ -0,0 +1,52 @@ +{ + "PSETEX": { + "summary": "Set the value and expiration in milliseconds of a key", + "complexity": "O(1)", + "group": "string", + "since": "2.6.0", + "arity": 4, + "function": "psetexCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "milliseconds", + "type": "integer" + }, + { + "name": "value", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/psubscribe.json b/tools/codis2pika/scripts/commands/psubscribe.json new file mode 100644 index 0000000000..707fdf45cf --- /dev/null +++ b/tools/codis2pika/scripts/commands/psubscribe.json @@ -0,0 +1,30 @@ +{ + "PSUBSCRIBE": { + "summary": "Listen for messages published to channels matching the given patterns", + "complexity": "O(N) where N is the number of patterns the client is already subscribed to.", + "group": "pubsub", + "since": "2.0.0", + "arity": -2, + "function": "psubscribeCommand", + "command_flags": [ + "PUBSUB", + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ], + "arguments": [ + { + "name": "pattern", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "pattern", + "type": "pattern" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/psync.json b/tools/codis2pika/scripts/commands/psync.json new file mode 100644 index 0000000000..91175a1986 --- /dev/null +++ b/tools/codis2pika/scripts/commands/psync.json @@ -0,0 +1,25 @@ +{ + "PSYNC": { + "summary": "Internal command used for replication", + "group": "server", + "since": "2.8.0", + "arity": -3, + "function": "syncCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "NO_MULTI", + "NOSCRIPT" + ], + "arguments": [ + { + "name": "replicationid", + "type": "string" + }, + { + "name": "offset", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pttl.json b/tools/codis2pika/scripts/commands/pttl.json new file mode 100644 index 0000000000..1d37b9a497 --- /dev/null +++ b/tools/codis2pika/scripts/commands/pttl.json @@ -0,0 +1,53 @@ +{ + "PTTL": { + "summary": "Get the time to live for a key in milliseconds", + "complexity": "O(1)", + "group": "generic", + "since": "2.6.0", + "arity": 2, + "function": "pttlCommand", + "history": [ + [ + "2.8.0", + "Added the -2 reply." + ] + ], + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/publish.json b/tools/codis2pika/scripts/commands/publish.json new file mode 100644 index 0000000000..3c9b12f4d5 --- /dev/null +++ b/tools/codis2pika/scripts/commands/publish.json @@ -0,0 +1,28 @@ +{ + "PUBLISH": { + "summary": "Post a message to a channel", + "complexity": "O(N+M) where N is the number of clients subscribed to the receiving channel and M is the total number of subscribed patterns (by any client).", + "group": "pubsub", + "since": "2.0.0", + "arity": 3, + "function": "publishCommand", + "command_flags": [ + "PUBSUB", + "LOADING", + "STALE", + "FAST", + "MAY_REPLICATE", + "SENTINEL" + ], + "arguments": [ + { + "name": "channel", + "type": "string" + }, + { + "name": "message", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pubsub-channels.json b/tools/codis2pika/scripts/commands/pubsub-channels.json new file mode 100644 index 0000000000..0522504b1c --- /dev/null +++ b/tools/codis2pika/scripts/commands/pubsub-channels.json @@ -0,0 +1,23 @@ +{ + "CHANNELS": { + "summary": "List active channels", + "complexity": "O(N) where N is the number of active channels, and assuming constant time pattern matching (relatively short channels and patterns)", + "group": "pubsub", + "since": "2.8.0", + "arity": -2, + "container": "PUBSUB", + "function": "pubsubCommand", + "command_flags": [ + "PUBSUB", + "LOADING", + "STALE" + ], + "arguments": [ + { + "name": "pattern", + "type": "pattern", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pubsub-help.json b/tools/codis2pika/scripts/commands/pubsub-help.json new file mode 100644 index 0000000000..e0c2a61234 --- /dev/null +++ b/tools/codis2pika/scripts/commands/pubsub-help.json @@ -0,0 +1,15 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "pubsub", + "since": "6.2.0", + "arity": 2, + "container": "PUBSUB", + "function": "pubsubCommand", + "command_flags": [ + "LOADING", + "STALE" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pubsub-numpat.json b/tools/codis2pika/scripts/commands/pubsub-numpat.json new file mode 100644 index 0000000000..382ec1b70d --- /dev/null +++ b/tools/codis2pika/scripts/commands/pubsub-numpat.json @@ -0,0 +1,16 @@ +{ + "NUMPAT": { + "summary": "Get the count of unique patterns pattern subscriptions", + "complexity": "O(1)", + "group": "pubsub", + "since": "2.8.0", + "arity": 2, + "container": "PUBSUB", + "function": "pubsubCommand", + "command_flags": [ + "PUBSUB", + "LOADING", + "STALE" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pubsub-numsub.json b/tools/codis2pika/scripts/commands/pubsub-numsub.json new file mode 100644 index 0000000000..fae05c8fd2 --- /dev/null +++ b/tools/codis2pika/scripts/commands/pubsub-numsub.json @@ -0,0 +1,24 @@ +{ + "NUMSUB": { + "summary": "Get the count of subscribers for channels", + "complexity": "O(N) for the NUMSUB subcommand, where N is the number of requested channels", + "group": "pubsub", + "since": "2.8.0", + "arity": -2, + "container": "PUBSUB", + "function": "pubsubCommand", + "command_flags": [ + "PUBSUB", + "LOADING", + "STALE" + ], + "arguments": [ + { + "name": "channel", + "type": "string", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pubsub-shardchannels.json b/tools/codis2pika/scripts/commands/pubsub-shardchannels.json new file mode 100644 index 0000000000..90b907d302 --- /dev/null +++ b/tools/codis2pika/scripts/commands/pubsub-shardchannels.json @@ -0,0 +1,23 @@ +{ + "SHARDCHANNELS": { + "summary": "List active shard channels", + "complexity": "O(N) where N is the number of active shard channels, and assuming constant time pattern matching (relatively short shard channels).", + "group": "pubsub", + "since": "7.0.0", + "arity": -2, + "container": "PUBSUB", + "function": "pubsubCommand", + "command_flags": [ + "PUBSUB", + "LOADING", + "STALE" + ], + "arguments": [ + { + "name": "pattern", + "type": "pattern", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pubsub-shardnumsub.json b/tools/codis2pika/scripts/commands/pubsub-shardnumsub.json new file mode 100644 index 0000000000..89187696a4 --- /dev/null +++ b/tools/codis2pika/scripts/commands/pubsub-shardnumsub.json @@ -0,0 +1,24 @@ +{ + "SHARDNUMSUB": { + "summary": "Get the count of subscribers for shard channels", + "complexity": "O(N) for the SHARDNUMSUB subcommand, where N is the number of requested shard channels", + "group": "pubsub", + "since": "7.0.0", + "arity": -2, + "container": "PUBSUB", + "function": "pubsubCommand", + "command_flags": [ + "PUBSUB", + "LOADING", + "STALE" + ], + "arguments": [ + { + "name": "shardchannel", + "type": "string", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/pubsub.json b/tools/codis2pika/scripts/commands/pubsub.json new file mode 100644 index 0000000000..e31bd4043f --- /dev/null +++ b/tools/codis2pika/scripts/commands/pubsub.json @@ -0,0 +1,9 @@ +{ + "PUBSUB": { + "summary": "A container for Pub/Sub commands", + "complexity": "Depends on subcommand.", + "group": "pubsub", + "since": "2.8.0", + "arity": -2 + } +} diff --git a/tools/codis2pika/scripts/commands/punsubscribe.json b/tools/codis2pika/scripts/commands/punsubscribe.json new file mode 100644 index 0000000000..30136b3166 --- /dev/null +++ b/tools/codis2pika/scripts/commands/punsubscribe.json @@ -0,0 +1,25 @@ +{ + "PUNSUBSCRIBE": { + "summary": "Stop listening for messages posted to channels matching the given patterns", + "complexity": "O(N+M) where N is the number of patterns the client is already subscribed and M is the number of total patterns subscribed in the system (by any client).", + "group": "pubsub", + "since": "2.0.0", + "arity": -1, + "function": "punsubscribeCommand", + "command_flags": [ + "PUBSUB", + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ], + "arguments": [ + { + "name": "pattern", + "type": "pattern", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/quit.json b/tools/codis2pika/scripts/commands/quit.json new file mode 100644 index 0000000000..cf13f1ee91 --- /dev/null +++ b/tools/codis2pika/scripts/commands/quit.json @@ -0,0 +1,21 @@ +{ + "QUIT": { + "summary": "Close the connection", + "complexity": "O(1)", + "group": "connection", + "since": "1.0.0", + "arity": -1, + "function": "quitCommand", + "command_flags": [ + "ALLOW_BUSY", + "NOSCRIPT", + "LOADING", + "STALE", + "FAST", + "NO_AUTH" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/randomkey.json b/tools/codis2pika/scripts/commands/randomkey.json new file mode 100644 index 0000000000..93473968c4 --- /dev/null +++ b/tools/codis2pika/scripts/commands/randomkey.json @@ -0,0 +1,20 @@ +{ + "RANDOMKEY": { + "summary": "Return a random key from the keyspace", + "complexity": "O(1)", + "group": "generic", + "since": "1.0.0", + "arity": 1, + "function": "randomkeyCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "NONDETERMINISTIC_OUTPUT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/readonly.json b/tools/codis2pika/scripts/commands/readonly.json new file mode 100644 index 0000000000..1bbc220eda --- /dev/null +++ b/tools/codis2pika/scripts/commands/readonly.json @@ -0,0 +1,18 @@ +{ + "READONLY": { + "summary": "Enables read queries for a connection to a cluster replica node", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": 1, + "function": "readonlyCommand", + "command_flags": [ + "FAST", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/readwrite.json b/tools/codis2pika/scripts/commands/readwrite.json new file mode 100644 index 0000000000..81e505ffe8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/readwrite.json @@ -0,0 +1,18 @@ +{ + "READWRITE": { + "summary": "Disables read queries for a connection to a cluster replica node", + "complexity": "O(1)", + "group": "cluster", + "since": "3.0.0", + "arity": 1, + "function": "readwriteCommand", + "command_flags": [ + "FAST", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/rename.json b/tools/codis2pika/scripts/commands/rename.json new file mode 100644 index 0000000000..561bf22b64 --- /dev/null +++ b/tools/codis2pika/scripts/commands/rename.json @@ -0,0 +1,69 @@ +{ + "RENAME": { + "summary": "Rename a key", + "complexity": "O(1)", + "group": "generic", + "since": "1.0.0", + "arity": 3, + "function": "renameCommand", + "history": [], + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "KEYSPACE" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "OW", + "UPDATE" + + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "newkey", + "type": "key", + "key_spec_index": 1 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/renamenx.json b/tools/codis2pika/scripts/commands/renamenx.json new file mode 100644 index 0000000000..afa4f658b1 --- /dev/null +++ b/tools/codis2pika/scripts/commands/renamenx.json @@ -0,0 +1,74 @@ +{ + "RENAMENX": { + "summary": "Rename a key, only if the new key does not exist", + "complexity": "O(1)", + "group": "generic", + "since": "1.0.0", + "arity": 3, + "function": "renamenxCommand", + "history": [ + [ + "3.2.0", + "The command no longer returns an error when source and destination names are the same." + ] + ], + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "OW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "newkey", + "type": "key", + "key_spec_index": 1 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/replconf.json b/tools/codis2pika/scripts/commands/replconf.json new file mode 100644 index 0000000000..630b62136e --- /dev/null +++ b/tools/codis2pika/scripts/commands/replconf.json @@ -0,0 +1,20 @@ +{ + "REPLCONF": { + "summary": "An internal command for configuring the replication stream", + "complexity": "O(1)", + "group": "server", + "since": "3.0.0", + "arity": -1, + "function": "replconfCommand", + "doc_flags": [ + "SYSCMD" + ], + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE", + "ALLOW_BUSY" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/replicaof.json b/tools/codis2pika/scripts/commands/replicaof.json new file mode 100644 index 0000000000..6299ea3ff5 --- /dev/null +++ b/tools/codis2pika/scripts/commands/replicaof.json @@ -0,0 +1,26 @@ +{ + "REPLICAOF": { + "summary": "Make the server a replica of another instance, or promote it as master.", + "complexity": "O(1)", + "group": "server", + "since": "5.0.0", + "arity": 3, + "function": "replicaofCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "NOSCRIPT", + "STALE" + ], + "arguments": [ + { + "name": "host", + "type": "string" + }, + { + "name": "port", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/reset.json b/tools/codis2pika/scripts/commands/reset.json new file mode 100644 index 0000000000..40041cd8c7 --- /dev/null +++ b/tools/codis2pika/scripts/commands/reset.json @@ -0,0 +1,21 @@ +{ + "RESET": { + "summary": "Reset the connection", + "complexity": "O(1)", + "group": "connection", + "since": "6.2.0", + "arity": 1, + "function": "resetCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE", + "FAST", + "NO_AUTH", + "ALLOW_BUSY" + ], + "acl_categories": [ + "CONNECTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/restore-asking.json b/tools/codis2pika/scripts/commands/restore-asking.json new file mode 100644 index 0000000000..f4602f9715 --- /dev/null +++ b/tools/codis2pika/scripts/commands/restore-asking.json @@ -0,0 +1,99 @@ +{ + "RESTORE-ASKING": { + "summary": "An internal command for migrating keys in a cluster", + "complexity": "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).", + "group": "server", + "since": "3.0.0", + "arity": -4, + "function": "restoreCommand", + "history": [ + [ + "3.0.0", + "Added the `REPLACE` modifier." + ], + [ + "5.0.0", + "Added the `ABSTTL` modifier." + ], + [ + "5.0.0", + "Added the `IDLETIME` and `FREQ` options." + ] + ], + "doc_flags": [ + "SYSCMD" + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "ASKING" + ], + "acl_categories": [ + "KEYSPACE", + "DANGEROUS" + ], + "key_specs": [ + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "ttl", + "type": "integer" + }, + { + "name": "serialized-value", + "type": "string" + }, + { + "name": "replace", + "token": "REPLACE", + "type": "pure-token", + "optional": true, + "since": "3.0.0" + }, + { + "name": "absttl", + "token": "ABSTTL", + "type": "pure-token", + "optional": true, + "since": "5.0.0" + }, + { + "token": "IDLETIME", + "name": "seconds", + "type": "integer", + "optional": true, + "since": "5.0.0" + }, + { + "token": "FREQ", + "name": "frequency", + "type": "integer", + "optional": true, + "since": "5.0.0" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/restore.json b/tools/codis2pika/scripts/commands/restore.json new file mode 100644 index 0000000000..d6cebf578e --- /dev/null +++ b/tools/codis2pika/scripts/commands/restore.json @@ -0,0 +1,95 @@ +{ + "RESTORE": { + "summary": "Create a key using the provided serialized value, previously obtained using DUMP.", + "complexity": "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).", + "group": "generic", + "since": "2.6.0", + "arity": -4, + "function": "restoreCommand", + "history": [ + [ + "3.0.0", + "Added the `REPLACE` modifier." + ], + [ + "5.0.0", + "Added the `ABSTTL` modifier." + ], + [ + "5.0.0", + "Added the `IDLETIME` and `FREQ` options." + ] + ], + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "KEYSPACE", + "DANGEROUS" + ], + "key_specs": [ + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "ttl", + "type": "integer" + }, + { + "name": "serialized-value", + "type": "string" + }, + { + "name": "replace", + "token": "REPLACE", + "type": "pure-token", + "optional": true, + "since": "3.0.0" + }, + { + "name": "absttl", + "token": "ABSTTL", + "type": "pure-token", + "optional": true, + "since": "5.0.0" + }, + { + "token": "IDLETIME", + "name": "seconds", + "type": "integer", + "optional": true, + "since": "5.0.0" + }, + { + "token": "FREQ", + "name": "frequency", + "type": "integer", + "optional": true, + "since": "5.0.0" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/role.json b/tools/codis2pika/scripts/commands/role.json new file mode 100644 index 0000000000..4d470e3508 --- /dev/null +++ b/tools/codis2pika/scripts/commands/role.json @@ -0,0 +1,21 @@ +{ + "ROLE": { + "summary": "Return the role of the instance in the context of replication", + "complexity": "O(1)", + "group": "server", + "since": "2.8.12", + "arity": 1, + "function": "roleCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE", + "FAST", + "SENTINEL" + ], + "acl_categories": [ + "ADMIN", + "DANGEROUS" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/rpop.json b/tools/codis2pika/scripts/commands/rpop.json new file mode 100644 index 0000000000..10518a8271 --- /dev/null +++ b/tools/codis2pika/scripts/commands/rpop.json @@ -0,0 +1,57 @@ +{ + "RPOP": { + "summary": "Remove and get the last elements in a list", + "complexity": "O(N) where N is the number of elements returned", + "group": "list", + "since": "1.0.0", + "arity": -2, + "function": "rpopCommand", + "history": [ + [ + "6.2.0", + "Added the `count` argument." + ] + ], + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer", + "optional": true, + "since": "6.2.0" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/rpoplpush.json b/tools/codis2pika/scripts/commands/rpoplpush.json new file mode 100644 index 0000000000..ea3c7749da --- /dev/null +++ b/tools/codis2pika/scripts/commands/rpoplpush.json @@ -0,0 +1,73 @@ +{ + "RPOPLPUSH": { + "summary": "Remove the last element in a list, prepend it to another list and return it", + "complexity": "O(1)", + "group": "list", + "since": "1.2.0", + "arity": 3, + "function": "rpoplpushCommand", + "deprecated_since": "6.2.0", + "replaced_by": "`LMOVE` with the `RIGHT` and `LEFT` arguments", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "source", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "destination", + "type": "key", + "key_spec_index": 1 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/rpush.json b/tools/codis2pika/scripts/commands/rpush.json new file mode 100644 index 0000000000..03c1f862a5 --- /dev/null +++ b/tools/codis2pika/scripts/commands/rpush.json @@ -0,0 +1,56 @@ +{ + "RPUSH": { + "summary": "Append one or multiple elements to a list", + "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", + "group": "list", + "since": "1.0.0", + "arity": -3, + "function": "rpushCommand", + "history": [ + [ + "2.4.0", + "Accepts multiple `element` arguments." + ] + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "element", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/rpushx.json b/tools/codis2pika/scripts/commands/rpushx.json new file mode 100644 index 0000000000..9d8c14eedd --- /dev/null +++ b/tools/codis2pika/scripts/commands/rpushx.json @@ -0,0 +1,56 @@ +{ + "RPUSHX": { + "summary": "Append an element to a list, only if the list exists", + "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", + "group": "list", + "since": "2.2.0", + "arity": -3, + "function": "rpushxCommand", + "history": [ + [ + "4.0.0", + "Accepts multiple `element` arguments." + ] + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "LIST" + ], + "key_specs": [ + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "element", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sadd.json b/tools/codis2pika/scripts/commands/sadd.json new file mode 100644 index 0000000000..841eb1ffa7 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sadd.json @@ -0,0 +1,56 @@ +{ + "SADD": { + "summary": "Add one or more members to a set", + "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", + "group": "set", + "since": "1.0.0", + "arity": -3, + "function": "saddCommand", + "history": [ + [ + "2.4.0", + "Accepts multiple `member` arguments." + ] + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "SET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/save.json b/tools/codis2pika/scripts/commands/save.json new file mode 100644 index 0000000000..7cf5cd2462 --- /dev/null +++ b/tools/codis2pika/scripts/commands/save.json @@ -0,0 +1,16 @@ +{ + "SAVE": { + "summary": "Synchronously save the dataset to disk", + "complexity": "O(N) where N is the total number of keys in all databases", + "group": "server", + "since": "1.0.0", + "arity": 1, + "function": "saveCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "NOSCRIPT", + "NO_MULTI" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/scan.json b/tools/codis2pika/scripts/commands/scan.json new file mode 100644 index 0000000000..a689bacca5 --- /dev/null +++ b/tools/codis2pika/scripts/commands/scan.json @@ -0,0 +1,51 @@ +{ + "SCAN": { + "summary": "Incrementally iterate the keys space", + "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection.", + "group": "generic", + "since": "2.8.0", + "arity": -2, + "function": "scanCommand", + "history": [ + [ + "6.0.0", + "Added the `TYPE` subcommand." + ] + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT", + "REQUEST_POLICY:SPECIAL" + ], + "arguments": [ + { + "name": "cursor", + "type": "integer" + }, + { + "token": "MATCH", + "name": "pattern", + "type": "pattern", + "optional": true + }, + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + }, + { + "token": "TYPE", + "name": "type", + "type": "string", + "optional": true, + "since": "6.0.0" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/scard.json b/tools/codis2pika/scripts/commands/scard.json new file mode 100644 index 0000000000..a1f1f8ef2f --- /dev/null +++ b/tools/codis2pika/scripts/commands/scard.json @@ -0,0 +1,43 @@ +{ + "SCARD": { + "summary": "Get the number of members in a set", + "complexity": "O(1)", + "group": "set", + "since": "1.0.0", + "arity": 2, + "function": "scardCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "SET" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/script-debug.json b/tools/codis2pika/scripts/commands/script-debug.json new file mode 100644 index 0000000000..a69ddcac1b --- /dev/null +++ b/tools/codis2pika/scripts/commands/script-debug.json @@ -0,0 +1,40 @@ +{ + "DEBUG": { + "summary": "Set the debug mode for executed scripts.", + "complexity": "O(1)", + "group": "scripting", + "since": "3.2.0", + "arity": 3, + "container": "SCRIPT", + "function": "scriptCommand", + "command_flags": [ + "NOSCRIPT" + ], + "acl_categories": [ + "SCRIPTING" + ], + "arguments": [ + { + "name": "mode", + "type": "oneof", + "arguments": [ + { + "name": "yes", + "type": "pure-token", + "token": "YES" + }, + { + "name": "sync", + "type": "pure-token", + "token": "SYNC" + }, + { + "name": "no", + "type": "pure-token", + "token": "NO" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/script-exists.json b/tools/codis2pika/scripts/commands/script-exists.json new file mode 100644 index 0000000000..e4070f2391 --- /dev/null +++ b/tools/codis2pika/scripts/commands/script-exists.json @@ -0,0 +1,28 @@ +{ + "EXISTS": { + "summary": "Check existence of scripts in the script cache.", + "complexity": "O(N) with N being the number of scripts to check (so checking a single script is an O(1) operation).", + "group": "scripting", + "since": "2.6.0", + "arity": -3, + "container": "SCRIPT", + "function": "scriptCommand", + "command_flags": [ + "NOSCRIPT" + ], + "acl_categories": [ + "SCRIPTING" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:AGG_LOGICAL_AND" + ], + "arguments": [ + { + "name": "sha1", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/script-flush.json b/tools/codis2pika/scripts/commands/script-flush.json new file mode 100644 index 0000000000..b246bdef5c --- /dev/null +++ b/tools/codis2pika/scripts/commands/script-flush.json @@ -0,0 +1,47 @@ +{ + "FLUSH": { + "summary": "Remove all the scripts from the script cache.", + "complexity": "O(N) with N being the number of scripts in cache", + "group": "scripting", + "since": "2.6.0", + "arity": -2, + "container": "SCRIPT", + "function": "scriptCommand", + "history": [ + [ + "6.2.0", + "Added the `ASYNC` and `SYNC` flushing mode modifiers." + ] + ], + "command_flags": [ + "NOSCRIPT" + ], + "acl_categories": [ + "SCRIPTING" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_NODES", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ], + "arguments": [ + { + "name": "async", + "type": "oneof", + "optional": true, + "since": "6.2.0", + "arguments": [ + { + "name": "async", + "type": "pure-token", + "token": "ASYNC" + }, + { + "name": "sync", + "type": "pure-token", + "token": "SYNC" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/script-help.json b/tools/codis2pika/scripts/commands/script-help.json new file mode 100644 index 0000000000..7b3bc89ecb --- /dev/null +++ b/tools/codis2pika/scripts/commands/script-help.json @@ -0,0 +1,18 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "scripting", + "since": "5.0.0", + "arity": 2, + "container": "SCRIPT", + "function": "scriptCommand", + "command_flags": [ + "LOADING", + "STALE" + ], + "acl_categories": [ + "SCRIPTING" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/script-kill.json b/tools/codis2pika/scripts/commands/script-kill.json new file mode 100644 index 0000000000..970ccd4075 --- /dev/null +++ b/tools/codis2pika/scripts/commands/script-kill.json @@ -0,0 +1,22 @@ +{ + "KILL": { + "summary": "Kill the script currently in execution.", + "complexity": "O(1)", + "group": "scripting", + "since": "2.6.0", + "arity": 2, + "container": "SCRIPT", + "function": "scriptCommand", + "command_flags": [ + "NOSCRIPT", + "ALLOW_BUSY" + ], + "acl_categories": [ + "SCRIPTING" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:ONE_SUCCEEDED" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/script-load.json b/tools/codis2pika/scripts/commands/script-load.json new file mode 100644 index 0000000000..b0b4e67e56 --- /dev/null +++ b/tools/codis2pika/scripts/commands/script-load.json @@ -0,0 +1,28 @@ +{ + "LOAD": { + "summary": "Load the specified Lua script into the script cache.", + "complexity": "O(N) with N being the length in bytes of the script body.", + "group": "scripting", + "since": "2.6.0", + "arity": 3, + "container": "SCRIPT", + "function": "scriptCommand", + "command_flags": [ + "NOSCRIPT", + "STALE" + ], + "acl_categories": [ + "SCRIPTING" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_NODES", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ], + "arguments": [ + { + "name": "script", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/script.json b/tools/codis2pika/scripts/commands/script.json new file mode 100644 index 0000000000..16e307a3cf --- /dev/null +++ b/tools/codis2pika/scripts/commands/script.json @@ -0,0 +1,9 @@ +{ + "SCRIPT": { + "summary": "A container for Lua scripts management commands", + "complexity": "Depends on subcommand.", + "group": "scripting", + "since": "2.6.0", + "arity": -2 + } +} diff --git a/tools/codis2pika/scripts/commands/sdiff.json b/tools/codis2pika/scripts/commands/sdiff.json new file mode 100644 index 0000000000..6f5fd0a81c --- /dev/null +++ b/tools/codis2pika/scripts/commands/sdiff.json @@ -0,0 +1,47 @@ +{ + "SDIFF": { + "summary": "Subtract multiple sets", + "complexity": "O(N) where N is the total number of elements in all given sets.", + "group": "set", + "since": "1.0.0", + "arity": -2, + "function": "sdiffCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SET" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT_ORDER" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sdiffstore.json b/tools/codis2pika/scripts/commands/sdiffstore.json new file mode 100644 index 0000000000..c78cd89992 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sdiffstore.json @@ -0,0 +1,68 @@ +{ + "SDIFFSTORE": { + "summary": "Subtract multiple sets and store the resulting set in a key", + "complexity": "O(N) where N is the total number of elements in all given sets.", + "group": "set", + "since": "1.0.0", + "arity": -3, + "function": "sdiffstoreCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "SET" + ], + "key_specs": [ + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/select.json b/tools/codis2pika/scripts/commands/select.json new file mode 100644 index 0000000000..4375cac646 --- /dev/null +++ b/tools/codis2pika/scripts/commands/select.json @@ -0,0 +1,24 @@ +{ + "SELECT": { + "summary": "Change the selected database for the current connection", + "complexity": "O(1)", + "group": "connection", + "since": "1.0.0", + "arity": 2, + "function": "selectCommand", + "command_flags": [ + "LOADING", + "STALE", + "FAST" + ], + "acl_categories": [ + "CONNECTION" + ], + "arguments": [ + { + "name": "index", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-ckquorum.json b/tools/codis2pika/scripts/commands/sentinel-ckquorum.json new file mode 100644 index 0000000000..6180614cc4 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-ckquorum.json @@ -0,0 +1,21 @@ +{ + "CKQUORUM": { + "summary": "Check for a Sentinel quorum", + "group": "sentinel", + "since": "2.8.4", + "arity": 3, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "master-name", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-config.json b/tools/codis2pika/scripts/commands/sentinel-config.json new file mode 100644 index 0000000000..74bcdbd503 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-config.json @@ -0,0 +1,46 @@ +{ + "CONFIG": { + "summary": "Configure Sentinel", + "complexity": "O(1)", + "group": "sentinel", + "since": "6.2.0", + "arity": -3, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name":"set_or_get", + "type":"oneof", + "arguments":[ + { + "name":"set_param_value", + "token":"SET", + "type":"block", + "multiple":true, + "arguments":[ + { + "name":"parameter", + "type":"string" + }, + { + "name":"value", + "type":"string" + } + ] + }, + { + "token":"GET", + "multiple":true, + "name":"parameter", + "type":"string" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-debug.json b/tools/codis2pika/scripts/commands/sentinel-debug.json new file mode 100644 index 0000000000..b3335409da --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-debug.json @@ -0,0 +1,33 @@ +{ + "DEBUG": { + "summary": "List or update the current configurable parameters", + "complexity": "O(N) where N is the number of configurable parameters", + "group": "sentinel", + "since": "7.0.0", + "arity": -2, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "parameter_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "parameter", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-failover.json b/tools/codis2pika/scripts/commands/sentinel-failover.json new file mode 100644 index 0000000000..f6640168a6 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-failover.json @@ -0,0 +1,21 @@ +{ + "FAILOVER": { + "summary": "Force a failover", + "group": "sentinel", + "since": "2.8.4", + "arity": 3, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "master-name", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-flushconfig.json b/tools/codis2pika/scripts/commands/sentinel-flushconfig.json new file mode 100644 index 0000000000..7d48cd482d --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-flushconfig.json @@ -0,0 +1,16 @@ +{ + "FLUSHCONFIG": { + "summary": "Rewrite configuration file", + "complexity": "O(1)", + "group": "sentinel", + "since": "2.8.4", + "arity": 2, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-get-master-addr-by-name.json b/tools/codis2pika/scripts/commands/sentinel-get-master-addr-by-name.json new file mode 100644 index 0000000000..e0fde851cd --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-get-master-addr-by-name.json @@ -0,0 +1,22 @@ +{ + "GET-MASTER-ADDR-BY-NAME": { + "summary": "Get port and address of a master", + "complexity": "O(1)", + "group": "sentinel", + "since": "2.8.4", + "arity": 3, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "master-name", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-help.json b/tools/codis2pika/scripts/commands/sentinel-help.json new file mode 100644 index 0000000000..4c20313eb9 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-help.json @@ -0,0 +1,17 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "sentinel", + "since": "6.2.0", + "arity": 2, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "LOADING", + "STALE", + "SENTINEL", + "ONLY_SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-info-cache.json b/tools/codis2pika/scripts/commands/sentinel-info-cache.json new file mode 100644 index 0000000000..5c7855663e --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-info-cache.json @@ -0,0 +1,23 @@ +{ + "INFO-CACHE": { + "summary": "Get cached INFO from the instances in the deployment", + "complexity": "O(N) where N is the number of instances", + "group": "sentinel", + "since": "3.2.0", + "arity": -3, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "nodename", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-is-master-down-by-addr.json b/tools/codis2pika/scripts/commands/sentinel-is-master-down-by-addr.json new file mode 100644 index 0000000000..456ad183ae --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-is-master-down-by-addr.json @@ -0,0 +1,34 @@ +{ + "IS-MASTER-DOWN-BY-ADDR": { + "summary": "Check if a master is down", + "complexity": "O(1)", + "group": "sentinel", + "since": "2.8.4", + "arity": 6, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "ip", + "type": "string" + }, + { + "name": "port", + "type": "integer" + }, + { + "name": "current-epoch", + "type": "integer" + }, + { + "name": "runid", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-master.json b/tools/codis2pika/scripts/commands/sentinel-master.json new file mode 100644 index 0000000000..ec10f43fda --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-master.json @@ -0,0 +1,22 @@ +{ + "MASTER": { + "summary": "Shows the state of a master", + "complexity": "O(1)", + "group": "sentinel", + "since": "2.8.4", + "arity": 3, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "master-name", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-masters.json b/tools/codis2pika/scripts/commands/sentinel-masters.json new file mode 100644 index 0000000000..abfb0b5bff --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-masters.json @@ -0,0 +1,16 @@ +{ + "MASTERS": { + "summary": "List the monitored masters", + "complexity": "O(N) where N is the number of masters", + "group": "sentinel", + "since": "2.8.4", + "arity": 2, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-monitor.json b/tools/codis2pika/scripts/commands/sentinel-monitor.json new file mode 100644 index 0000000000..2c01df9009 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-monitor.json @@ -0,0 +1,34 @@ +{ + "MONITOR": { + "summary": "Start monitoring", + "complexity": "O(1)", + "group": "sentinel", + "since": "2.8.4", + "arity": 6, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "name", + "type": "string" + }, + { + "name": "ip", + "type": "string" + }, + { + "name": "port", + "type": "integer" + }, + { + "name": "quorum", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-myid.json b/tools/codis2pika/scripts/commands/sentinel-myid.json new file mode 100644 index 0000000000..1f85859427 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-myid.json @@ -0,0 +1,16 @@ +{ + "MYID": { + "summary": "Get the Sentinel instance ID", + "complexity": "O(1)", + "group": "sentinel", + "since": "6.2.0", + "arity": 2, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-pending-scripts.json b/tools/codis2pika/scripts/commands/sentinel-pending-scripts.json new file mode 100644 index 0000000000..6ef33ec69b --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-pending-scripts.json @@ -0,0 +1,15 @@ +{ + "PENDING-SCRIPTS": { + "summary": "Get information about pending scripts", + "group": "sentinel", + "since": "2.8.4", + "arity": 2, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-remove.json b/tools/codis2pika/scripts/commands/sentinel-remove.json new file mode 100644 index 0000000000..2e655e7f44 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-remove.json @@ -0,0 +1,22 @@ +{ + "REMOVE": { + "summary": "Stop monitoring", + "complexity": "O(1)", + "group": "sentinel", + "since": "2.8.4", + "arity": 3, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "master-name", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-replicas.json b/tools/codis2pika/scripts/commands/sentinel-replicas.json new file mode 100644 index 0000000000..dc175a7ecd --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-replicas.json @@ -0,0 +1,22 @@ +{ + "REPLICAS": { + "summary": "List the monitored replicas", + "complexity": "O(N) where N is the number of replicas", + "group": "sentinel", + "since": "5.0.0", + "arity": 3, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "master-name", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-reset.json b/tools/codis2pika/scripts/commands/sentinel-reset.json new file mode 100644 index 0000000000..9c60c6be7e --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-reset.json @@ -0,0 +1,22 @@ +{ + "RESET": { + "summary": "Reset masters by name pattern", + "complexity": "O(N) where N is the number of monitored masters", + "group": "sentinel", + "since": "2.8.4", + "arity": 3, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "pattern", + "type": "pattern" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-sentinels.json b/tools/codis2pika/scripts/commands/sentinel-sentinels.json new file mode 100644 index 0000000000..01319ce83a --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-sentinels.json @@ -0,0 +1,22 @@ +{ + "SENTINELS": { + "summary": "List the Sentinel instances", + "complexity": "O(N) where N is the number of Sentinels", + "group": "sentinel", + "since": "2.8.4", + "arity": 3, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "master-name", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-set.json b/tools/codis2pika/scripts/commands/sentinel-set.json new file mode 100644 index 0000000000..afe0360653 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-set.json @@ -0,0 +1,37 @@ +{ + "SET": { + "summary": "Change the configuration of a monitored master", + "complexity": "O(1)", + "group": "sentinel", + "since": "2.8.4", + "arity": -5, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "master-name", + "type": "string" + }, + { + "name": "option_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "option", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-simulate-failure.json b/tools/codis2pika/scripts/commands/sentinel-simulate-failure.json new file mode 100644 index 0000000000..4912a8b709 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-simulate-failure.json @@ -0,0 +1,37 @@ +{ + "SIMULATE-FAILURE": { + "summary": "Simulate failover scenarios", + "group": "sentinel", + "since": "3.2.0", + "arity": -3, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "mode", + "type": "oneof", + "optional":true, + "multiple":true, + "arguments": [ + { + "name": "crash-after-election", + "type": "pure-token" + }, + { + "name": "crash-after-promotion", + "type": "pure-token" + }, + { + "name": "help", + "type": "pure-token" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel-slaves.json b/tools/codis2pika/scripts/commands/sentinel-slaves.json new file mode 100644 index 0000000000..bce5c7692a --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel-slaves.json @@ -0,0 +1,27 @@ +{ + "SLAVES": { + "summary": "List the monitored slaves", + "complexity": "O(N) where N is the number of slaves", + "group": "sentinel", + "since": "2.8.0", + "arity": 3, + "container": "SENTINEL", + "function": "sentinelCommand", + "deprecated_since": "5.0.0", + "replaced_by": "`SENTINEL REPLICAS`", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "arguments": [ + { + "name": "master-name", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sentinel.json b/tools/codis2pika/scripts/commands/sentinel.json new file mode 100644 index 0000000000..81466e2ddc --- /dev/null +++ b/tools/codis2pika/scripts/commands/sentinel.json @@ -0,0 +1,14 @@ +{ + "SENTINEL": { + "summary": "A container for Sentinel commands", + "complexity": "Depends on subcommand.", + "group": "sentinel", + "since": "2.8.4", + "arity": -2, + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/set.json b/tools/codis2pika/scripts/commands/set.json new file mode 100644 index 0000000000..688d534d71 --- /dev/null +++ b/tools/codis2pika/scripts/commands/set.json @@ -0,0 +1,132 @@ +{ + "SET": { + "summary": "Set the string value of a key", + "complexity": "O(1)", + "group": "string", + "since": "1.0.0", + "arity": -3, + "function": "setCommand", + "get_keys_function": "setGetKeys", + "history": [ + [ + "2.6.12", + "Added the `EX`, `PX`, `NX` and `XX` options." + ], + [ + "6.0.0", + "Added the `KEEPTTL` option." + ], + [ + "6.2.0", + "Added the `GET`, `EXAT` and `PXAT` option." + ], + [ + "7.0.0", + "Allowed the `NX` and `GET` options to be used together." + ] + ], + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "notes": "RW and ACCESS due to the optional `GET` argument", + "flags": [ + "RW", + "ACCESS", + "UPDATE", + "VARIABLE_FLAGS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "value", + "type": "string" + }, + { + "name": "condition", + "type": "oneof", + "optional": true, + "since": "2.6.12", + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + } + ] + }, + { + "name": "get", + "token": "GET", + "type": "pure-token", + "optional": true, + "since": "6.2.0" + }, + { + "name": "expiration", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "seconds", + "type": "integer", + "token": "EX", + "since": "2.6.12" + }, + { + "name": "milliseconds", + "type": "integer", + "token": "PX", + "since": "2.6.12" + }, + { + "name": "unix-time-seconds", + "type": "unix-time", + "token": "EXAT", + "since": "6.2.0" + }, + { + "name": "unix-time-milliseconds", + "type": "unix-time", + "token": "PXAT", + "since": "6.2.0" + }, + { + "name": "keepttl", + "type": "pure-token", + "token": "KEEPTTL", + "since": "6.0.0" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/setbit.json b/tools/codis2pika/scripts/commands/setbit.json new file mode 100644 index 0000000000..723dfd8f4d --- /dev/null +++ b/tools/codis2pika/scripts/commands/setbit.json @@ -0,0 +1,53 @@ +{ + "SETBIT": { + "summary": "Sets or clears the bit at offset in the string value stored at key", + "complexity": "O(1)", + "group": "bitmap", + "since": "2.2.0", + "arity": 4, + "function": "setbitCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "BITMAP" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "offset", + "type": "integer" + }, + { + "name": "value", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/setex.json b/tools/codis2pika/scripts/commands/setex.json new file mode 100644 index 0000000000..b7c43db0ab --- /dev/null +++ b/tools/codis2pika/scripts/commands/setex.json @@ -0,0 +1,52 @@ +{ + "SETEX": { + "summary": "Set the value and expiration of a key", + "complexity": "O(1)", + "group": "string", + "since": "2.0.0", + "arity": 4, + "function": "setexCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "seconds", + "type": "integer" + }, + { + "name": "value", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/setnx.json b/tools/codis2pika/scripts/commands/setnx.json new file mode 100644 index 0000000000..5332a134ae --- /dev/null +++ b/tools/codis2pika/scripts/commands/setnx.json @@ -0,0 +1,49 @@ +{ + "SETNX": { + "summary": "Set the value of a key, only if the key does not exist", + "complexity": "O(1)", + "group": "string", + "since": "1.0.0", + "arity": 3, + "function": "setnxCommand", + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "OW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "value", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/setrange.json b/tools/codis2pika/scripts/commands/setrange.json new file mode 100644 index 0000000000..d1336719af --- /dev/null +++ b/tools/codis2pika/scripts/commands/setrange.json @@ -0,0 +1,52 @@ +{ + "SETRANGE": { + "summary": "Overwrite part of a string at key starting at the specified offset", + "complexity": "O(1), not counting the time taken to copy the new string in place. Usually, this string is very small so the amortized complexity is O(1). Otherwise, complexity is O(M) with M being the length of the value argument.", + "group": "string", + "since": "2.2.0", + "arity": 4, + "function": "setrangeCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "offset", + "type": "integer" + }, + { + "name": "value", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/shutdown.json b/tools/codis2pika/scripts/commands/shutdown.json new file mode 100644 index 0000000000..63da3ca03f --- /dev/null +++ b/tools/codis2pika/scripts/commands/shutdown.json @@ -0,0 +1,65 @@ +{ + "SHUTDOWN": { + "summary": "Synchronously save the dataset to disk and then shut down the server", + "complexity": "O(N) when saving, where N is the total number of keys in all databases when saving data, otherwise O(1)", + "group": "server", + "since": "1.0.0", + "arity": -1, + "function": "shutdownCommand", + "history": [ + [ + "7.0.0", + "Added the `NOW`, `FORCE` and `ABORT` modifiers." + ] + ], + "command_flags": [ + "ADMIN", + "NOSCRIPT", + "LOADING", + "STALE", + "NO_MULTI", + "SENTINEL", + "ALLOW_BUSY" + ], + "arguments": [ + { + "name": "nosave_save", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "nosave", + "type": "pure-token", + "token": "NOSAVE" + }, + { + "name": "save", + "type": "pure-token", + "token": "SAVE" + } + ] + }, + { + "name": "now", + "type": "pure-token", + "token": "NOW", + "optional": true, + "since": "7.0.0" + }, + { + "name": "force", + "type": "pure-token", + "token": "FORCE", + "optional": true, + "since": "7.0.0" + }, + { + "name": "abort", + "type": "pure-token", + "token": "ABORT", + "optional": true, + "since": "7.0.0" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sinter.json b/tools/codis2pika/scripts/commands/sinter.json new file mode 100644 index 0000000000..63f9e8676b --- /dev/null +++ b/tools/codis2pika/scripts/commands/sinter.json @@ -0,0 +1,47 @@ +{ + "SINTER": { + "summary": "Intersect multiple sets", + "complexity": "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets.", + "group": "set", + "since": "1.0.0", + "arity": -2, + "function": "sinterCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SET" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT_ORDER" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sintercard.json b/tools/codis2pika/scripts/commands/sintercard.json new file mode 100644 index 0000000000..8047f7a69d --- /dev/null +++ b/tools/codis2pika/scripts/commands/sintercard.json @@ -0,0 +1,55 @@ +{ + "SINTERCARD": { + "summary": "Intersect multiple sets and return the cardinality of the result", + "complexity": "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets.", + "group": "set", + "since": "7.0.0", + "arity": -3, + "function": "sinterCardCommand", + "get_keys_function": "sintercardGetKeys", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "token": "LIMIT", + "name": "limit", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sinterstore.json b/tools/codis2pika/scripts/commands/sinterstore.json new file mode 100644 index 0000000000..85e462e3e1 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sinterstore.json @@ -0,0 +1,68 @@ +{ + "SINTERSTORE": { + "summary": "Intersect multiple sets and store the resulting set in a key", + "complexity": "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets.", + "group": "set", + "since": "1.0.0", + "arity": -3, + "function": "sinterstoreCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "SET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sismember.json b/tools/codis2pika/scripts/commands/sismember.json new file mode 100644 index 0000000000..7a814b82b8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sismember.json @@ -0,0 +1,47 @@ +{ + "SISMEMBER": { + "summary": "Determine if a given value is a member of a set", + "complexity": "O(1)", + "group": "set", + "since": "1.0.0", + "arity": 3, + "function": "sismemberCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "SET" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/slaveof.json b/tools/codis2pika/scripts/commands/slaveof.json new file mode 100644 index 0000000000..70560f1b66 --- /dev/null +++ b/tools/codis2pika/scripts/commands/slaveof.json @@ -0,0 +1,31 @@ +{ + "SLAVEOF": { + "summary": "Make the server a replica of another instance, or promote it as master.", + "complexity": "O(1)", + "group": "server", + "since": "1.0.0", + "arity": 3, + "function": "replicaofCommand", + "deprecated_since": "5.0.0", + "replaced_by": "`REPLICAOF`", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "NOSCRIPT", + "STALE" + ], + "arguments": [ + { + "name": "host", + "type": "string" + }, + { + "name": "port", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/slowlog-get.json b/tools/codis2pika/scripts/commands/slowlog-get.json new file mode 100644 index 0000000000..11212643ef --- /dev/null +++ b/tools/codis2pika/scripts/commands/slowlog-get.json @@ -0,0 +1,33 @@ +{ + "GET": { + "summary": "Get the slow log's entries", + "complexity": "O(N) where N is the number of entries returned", + "group": "server", + "since": "2.2.12", + "arity": -2, + "container": "SLOWLOG", + "function": "slowlogCommand", + "history": [ + [ + "4.0.0", + "Added client IP address, port and name to the reply." + ] + ], + "command_flags": [ + "ADMIN", + "LOADING", + "STALE" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_NODES", + "NONDETERMINISTIC_OUTPUT" + ], + "arguments": [ + { + "name": "count", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/slowlog-help.json b/tools/codis2pika/scripts/commands/slowlog-help.json new file mode 100644 index 0000000000..cf2707d38b --- /dev/null +++ b/tools/codis2pika/scripts/commands/slowlog-help.json @@ -0,0 +1,15 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "server", + "since": "6.2.0", + "arity": 2, + "container": "SLOWLOG", + "function": "slowlogCommand", + "command_flags": [ + "LOADING", + "STALE" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/slowlog-len.json b/tools/codis2pika/scripts/commands/slowlog-len.json new file mode 100644 index 0000000000..9a8969b0dd --- /dev/null +++ b/tools/codis2pika/scripts/commands/slowlog-len.json @@ -0,0 +1,21 @@ +{ + "LEN": { + "summary": "Get the slow log's length", + "complexity": "O(1)", + "group": "server", + "since": "2.2.12", + "arity": 2, + "container": "SLOWLOG", + "function": "slowlogCommand", + "command_flags": [ + "ADMIN", + "LOADING", + "STALE" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_NODES", + "RESPONSE_POLICY:AGG_SUM", + "NONDETERMINISTIC_OUTPUT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/slowlog-reset.json b/tools/codis2pika/scripts/commands/slowlog-reset.json new file mode 100644 index 0000000000..36c024156c --- /dev/null +++ b/tools/codis2pika/scripts/commands/slowlog-reset.json @@ -0,0 +1,20 @@ +{ + "RESET": { + "summary": "Clear all entries from the slow log", + "complexity": "O(N) where N is the number of entries in the slowlog", + "group": "server", + "since": "2.2.12", + "arity": 2, + "container": "SLOWLOG", + "function": "slowlogCommand", + "command_flags": [ + "ADMIN", + "LOADING", + "STALE" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_NODES", + "RESPONSE_POLICY:ALL_SUCCEEDED" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/slowlog.json b/tools/codis2pika/scripts/commands/slowlog.json new file mode 100644 index 0000000000..fab266c1de --- /dev/null +++ b/tools/codis2pika/scripts/commands/slowlog.json @@ -0,0 +1,9 @@ +{ + "SLOWLOG": { + "summary": "A container for slow log commands", + "complexity": "Depends on subcommand.", + "group": "server", + "since": "2.2.12", + "arity": -2 + } +} diff --git a/tools/codis2pika/scripts/commands/smembers.json b/tools/codis2pika/scripts/commands/smembers.json new file mode 100644 index 0000000000..b5d4ff29d4 --- /dev/null +++ b/tools/codis2pika/scripts/commands/smembers.json @@ -0,0 +1,46 @@ +{ + "SMEMBERS": { + "summary": "Get all the members in a set", + "complexity": "O(N) where N is the set cardinality.", + "group": "set", + "since": "1.0.0", + "arity": 2, + "function": "sinterCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SET" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT_ORDER" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/smismember.json b/tools/codis2pika/scripts/commands/smismember.json new file mode 100644 index 0000000000..cb4dd2e672 --- /dev/null +++ b/tools/codis2pika/scripts/commands/smismember.json @@ -0,0 +1,49 @@ +{ + "SMISMEMBER": { + "summary": "Returns the membership associated with the given elements for a set", + "complexity": "O(N) where N is the number of elements being checked for membership", + "group": "set", + "since": "6.2.0", + "arity": -3, + "function": "smismemberCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "SET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/smove.json b/tools/codis2pika/scripts/commands/smove.json new file mode 100644 index 0000000000..de5756de91 --- /dev/null +++ b/tools/codis2pika/scripts/commands/smove.json @@ -0,0 +1,72 @@ +{ + "SMOVE": { + "summary": "Move a member from one set to another", + "complexity": "O(1)", + "group": "set", + "since": "1.0.0", + "arity": 4, + "function": "smoveCommand", + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "SET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "source", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "destination", + "type": "key", + "key_spec_index": 1 + }, + { + "name": "member", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sort.json b/tools/codis2pika/scripts/commands/sort.json new file mode 100644 index 0000000000..3f077e0e55 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sort.json @@ -0,0 +1,136 @@ +{ + "SORT": { + "summary": "Sort the elements in a list, set or sorted set", + "complexity": "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N).", + "group": "generic", + "since": "1.0.0", + "arity": -2, + "function": "sortCommand", + "get_keys_function": "sortGetKeys", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "SET", + "SORTEDSET", + "LIST", + "DANGEROUS" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "notes": "For the optional BY/GET keyword. It is marked 'unknown' because the key names derive from the content of the key we sort", + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "unknown": null + }, + "find_keys": { + "unknown": null + } + }, + { + "notes": "For the optional STORE keyword. It is marked 'unknown' because the keyword can appear anywhere in the argument array", + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "unknown": null + }, + "find_keys": { + "unknown": null + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "token": "BY", + "name": "pattern", + "type": "pattern", + "key_spec_index": 1, + "optional": true + }, + { + "token": "LIMIT", + "name": "offset_count", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + }, + { + "token": "GET", + "name": "pattern", + "key_spec_index": 1, + "type": "pattern", + "optional": true, + "multiple": true, + "multiple_token": true + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + }, + { + "name": "sorting", + "token": "ALPHA", + "type": "pure-token", + "optional": true + }, + { + "token": "STORE", + "name": "destination", + "type": "key", + "key_spec_index": 2, + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sort_ro.json b/tools/codis2pika/scripts/commands/sort_ro.json new file mode 100644 index 0000000000..83b48d1b30 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sort_ro.json @@ -0,0 +1,115 @@ +{ + "SORT_RO": { + "summary": "Sort the elements in a list, set or sorted set. Read-only variant of SORT.", + "complexity": "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N).", + "group": "generic", + "since": "7.0.0", + "arity": -2, + "function": "sortroCommand", + "get_keys_function": "sortROGetKeys", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SET", + "SORTEDSET", + "LIST", + "DANGEROUS" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "notes": "For the optional BY/GET keyword. It is marked 'unknown' because the key names derive from the content of the key we sort", + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "unknown": null + }, + "find_keys": { + "unknown": null + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "token": "BY", + "name": "pattern", + "type": "pattern", + "key_spec_index": 1, + "optional": true + }, + { + "token": "LIMIT", + "name": "offset_count", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + }, + { + "token": "GET", + "name": "pattern", + "key_spec_index": 1, + "type": "pattern", + "optional": true, + "multiple": true, + "multiple_token": true + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + }, + { + "name": "sorting", + "token": "ALPHA", + "type": "pure-token", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/spop.json b/tools/codis2pika/scripts/commands/spop.json new file mode 100644 index 0000000000..c93e426e1c --- /dev/null +++ b/tools/codis2pika/scripts/commands/spop.json @@ -0,0 +1,60 @@ +{ + "SPOP": { + "summary": "Remove and return one or multiple random members from a set", + "complexity": "Without the count argument O(1), otherwise O(N) where N is the value of the passed count.", + "group": "set", + "since": "1.0.0", + "arity": -2, + "function": "spopCommand", + "history": [ + [ + "3.2.0", + "Added the `count` argument." + ] + ], + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "SET" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer", + "optional": true, + "since": "3.2.0" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/spublish.json b/tools/codis2pika/scripts/commands/spublish.json new file mode 100644 index 0000000000..6ed748f959 --- /dev/null +++ b/tools/codis2pika/scripts/commands/spublish.json @@ -0,0 +1,46 @@ +{ + "SPUBLISH": { + "summary": "Post a message to a shard channel", + "complexity": "O(N) where N is the number of clients subscribed to the receiving shard channel.", + "group": "pubsub", + "since": "7.0.0", + "arity": 3, + "function": "spublishCommand", + "command_flags": [ + "PUBSUB", + "LOADING", + "STALE", + "FAST", + "MAY_REPLICATE" + ], + "arguments": [ + { + "name": "shardchannel", + "type": "string" + }, + { + "name": "message", + "type": "string" + } + ], + "key_specs": [ + { + "flags": [ + "NOT_KEY" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/srandmember.json b/tools/codis2pika/scripts/commands/srandmember.json new file mode 100644 index 0000000000..67efc87cad --- /dev/null +++ b/tools/codis2pika/scripts/commands/srandmember.json @@ -0,0 +1,58 @@ +{ + "SRANDMEMBER": { + "summary": "Get one or multiple random members from a set", + "complexity": "Without the count argument O(1), otherwise O(N) where N is the absolute value of the passed count.", + "group": "set", + "since": "1.0.0", + "arity": -2, + "function": "srandmemberCommand", + "history": [ + [ + "2.6.0", + "Added the optional `count` argument." + ] + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SET" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer", + "optional": true, + "since": "2.6.0" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/srem.json b/tools/codis2pika/scripts/commands/srem.json new file mode 100644 index 0000000000..82433a4a89 --- /dev/null +++ b/tools/codis2pika/scripts/commands/srem.json @@ -0,0 +1,55 @@ +{ + "SREM": { + "summary": "Remove one or more members from a set", + "complexity": "O(N) where N is the number of members to be removed.", + "group": "set", + "since": "1.0.0", + "arity": -3, + "function": "sremCommand", + "history": [ + [ + "2.4.0", + "Accepts multiple `member` arguments." + ] + ], + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "SET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sscan.json b/tools/codis2pika/scripts/commands/sscan.json new file mode 100644 index 0000000000..f8f21ad0fd --- /dev/null +++ b/tools/codis2pika/scripts/commands/sscan.json @@ -0,0 +1,62 @@ +{ + "SSCAN": { + "summary": "Incrementally iterate Set elements", + "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection..", + "group": "set", + "since": "2.8.0", + "arity": -3, + "function": "sscanCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SET" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "cursor", + "type": "integer" + }, + { + "token": "MATCH", + "name": "pattern", + "type": "pattern", + "optional": true + }, + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/ssubscribe.json b/tools/codis2pika/scripts/commands/ssubscribe.json new file mode 100644 index 0000000000..a63d520f18 --- /dev/null +++ b/tools/codis2pika/scripts/commands/ssubscribe.json @@ -0,0 +1,42 @@ +{ + "SSUBSCRIBE": { + "summary": "Listen for messages published to the given shard channels", + "complexity": "O(N) where N is the number of shard channels to subscribe to.", + "group": "pubsub", + "since": "7.0.0", + "arity": -2, + "function": "ssubscribeCommand", + "command_flags": [ + "PUBSUB", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "arguments": [ + { + "name": "shardchannel", + "type": "string", + "multiple": true + } + ], + "key_specs": [ + { + "flags": [ + "NOT_KEY" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/strlen.json b/tools/codis2pika/scripts/commands/strlen.json new file mode 100644 index 0000000000..a5e2d6ffb8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/strlen.json @@ -0,0 +1,43 @@ +{ + "STRLEN": { + "summary": "Get the length of the value stored in a key", + "complexity": "O(1)", + "group": "string", + "since": "2.2.0", + "arity": 2, + "function": "strlenCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/subscribe.json b/tools/codis2pika/scripts/commands/subscribe.json new file mode 100644 index 0000000000..fa6ac076a2 --- /dev/null +++ b/tools/codis2pika/scripts/commands/subscribe.json @@ -0,0 +1,25 @@ +{ + "SUBSCRIBE": { + "summary": "Listen for messages published to the given channels", + "complexity": "O(N) where N is the number of channels to subscribe to.", + "group": "pubsub", + "since": "2.0.0", + "arity": -2, + "function": "subscribeCommand", + "history": [], + "command_flags": [ + "PUBSUB", + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ], + "arguments": [ + { + "name": "channel", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/substr.json b/tools/codis2pika/scripts/commands/substr.json new file mode 100644 index 0000000000..9f3f2bf03b --- /dev/null +++ b/tools/codis2pika/scripts/commands/substr.json @@ -0,0 +1,56 @@ +{ + "SUBSTR": { + "summary": "Get a substring of the string stored at a key", + "complexity": "O(N) where N is the length of the returned string. The complexity is ultimately determined by the returned length, but because creating a substring from an existing string is very cheap, it can be considered O(1) for small strings.", + "group": "string", + "since": "1.0.0", + "arity": 4, + "function": "getrangeCommand", + "deprecated_since": "2.0.0", + "replaced_by": "`GETRANGE`", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "STRING" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "integer" + }, + { + "name": "end", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sunion.json b/tools/codis2pika/scripts/commands/sunion.json new file mode 100644 index 0000000000..9bdccacd45 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sunion.json @@ -0,0 +1,47 @@ +{ + "SUNION": { + "summary": "Add multiple sets", + "complexity": "O(N) where N is the total number of elements in all given sets.", + "group": "set", + "since": "1.0.0", + "arity": -2, + "function": "sunionCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SET" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT_ORDER" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sunionstore.json b/tools/codis2pika/scripts/commands/sunionstore.json new file mode 100644 index 0000000000..f4ef0b3b24 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sunionstore.json @@ -0,0 +1,68 @@ +{ + "SUNIONSTORE": { + "summary": "Add multiple sets and store the resulting set in a key", + "complexity": "O(N) where N is the total number of elements in all given sets.", + "group": "set", + "since": "1.0.0", + "arity": -3, + "function": "sunionstoreCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "SET" + ], + "key_specs": [ + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sunsubscribe.json b/tools/codis2pika/scripts/commands/sunsubscribe.json new file mode 100644 index 0000000000..df9ae9cacd --- /dev/null +++ b/tools/codis2pika/scripts/commands/sunsubscribe.json @@ -0,0 +1,43 @@ +{ + "SUNSUBSCRIBE": { + "summary": "Stop listening for messages posted to the given shard channels", + "complexity": "O(N) where N is the number of clients already subscribed to a shard channel.", + "group": "pubsub", + "since": "7.0.0", + "arity": -1, + "function": "sunsubscribeCommand", + "command_flags": [ + "PUBSUB", + "NOSCRIPT", + "LOADING", + "STALE" + ], + "arguments": [ + { + "name": "shardchannel", + "type": "string", + "optional": true, + "multiple": true + } + ], + "key_specs": [ + { + "flags": [ + "NOT_KEY" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/swapdb.json b/tools/codis2pika/scripts/commands/swapdb.json new file mode 100644 index 0000000000..6ea2baeaac --- /dev/null +++ b/tools/codis2pika/scripts/commands/swapdb.json @@ -0,0 +1,28 @@ +{ + "SWAPDB": { + "summary": "Swaps two Redis databases", + "complexity": "O(N) where N is the count of clients watching or blocking on keys from both databases.", + "group": "server", + "since": "4.0.0", + "arity": 3, + "function": "swapdbCommand", + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "KEYSPACE", + "DANGEROUS" + ], + "arguments": [ + { + "name": "index1", + "type": "integer" + }, + { + "name": "index2", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/sync.json b/tools/codis2pika/scripts/commands/sync.json new file mode 100644 index 0000000000..85672d6ed7 --- /dev/null +++ b/tools/codis2pika/scripts/commands/sync.json @@ -0,0 +1,15 @@ +{ + "SYNC": { + "summary": "Internal command used for replication", + "group": "server", + "since": "1.0.0", + "arity": 1, + "function": "syncCommand", + "command_flags": [ + "NO_ASYNC_LOADING", + "ADMIN", + "NO_MULTI", + "NOSCRIPT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/time.json b/tools/codis2pika/scripts/commands/time.json new file mode 100644 index 0000000000..b5711a3ba2 --- /dev/null +++ b/tools/codis2pika/scripts/commands/time.json @@ -0,0 +1,18 @@ +{ + "TIME": { + "summary": "Return the current server time", + "complexity": "O(1)", + "group": "server", + "since": "2.6.0", + "arity": 1, + "function": "timeCommand", + "command_flags": [ + "LOADING", + "STALE", + "FAST" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/touch.json b/tools/codis2pika/scripts/commands/touch.json new file mode 100644 index 0000000000..ef4c1c9262 --- /dev/null +++ b/tools/codis2pika/scripts/commands/touch.json @@ -0,0 +1,48 @@ +{ + "TOUCH": { + "summary": "Alters the last access time of a key(s). Returns the number of existing keys specified.", + "complexity": "O(N) where N is the number of keys that will be touched.", + "group": "generic", + "since": "3.2.1", + "arity": -2, + "function": "touchCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "REQUEST_POLICY:MULTI_SHARD", + "RESPONSE_POLICY:AGG_SUM" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/ttl.json b/tools/codis2pika/scripts/commands/ttl.json new file mode 100644 index 0000000000..36297eeb90 --- /dev/null +++ b/tools/codis2pika/scripts/commands/ttl.json @@ -0,0 +1,53 @@ +{ + "TTL": { + "summary": "Get the time to live for a key in seconds", + "complexity": "O(1)", + "group": "generic", + "since": "1.0.0", + "arity": 2, + "function": "ttlCommand", + "history": [ + [ + "2.8.0", + "Added the -2 reply." + ] + ], + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/type.json b/tools/codis2pika/scripts/commands/type.json new file mode 100644 index 0000000000..df8e453529 --- /dev/null +++ b/tools/codis2pika/scripts/commands/type.json @@ -0,0 +1,43 @@ +{ + "TYPE": { + "summary": "Determine the type stored at key", + "complexity": "O(1)", + "group": "generic", + "since": "1.0.0", + "arity": 2, + "function": "typeCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/unlink.json b/tools/codis2pika/scripts/commands/unlink.json new file mode 100644 index 0000000000..511e728a28 --- /dev/null +++ b/tools/codis2pika/scripts/commands/unlink.json @@ -0,0 +1,49 @@ +{ + "UNLINK": { + "summary": "Delete a key asynchronously in another thread. Otherwise it is just as DEL, but non blocking.", + "complexity": "O(1) for each key removed regardless of its size. Then the command does O(N) work in a different thread in order to reclaim memory, where N is the number of allocations the deleted objects where composed of.", + "group": "generic", + "since": "4.0.0", + "arity": -2, + "function": "unlinkCommand", + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "KEYSPACE" + ], + "command_tips": [ + "REQUEST_POLICY:MULTI_SHARD", + "RESPONSE_POLICY:AGG_SUM" + ], + "key_specs": [ + { + "flags": [ + "RM", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/unsubscribe.json b/tools/codis2pika/scripts/commands/unsubscribe.json new file mode 100644 index 0000000000..d98aaa2786 --- /dev/null +++ b/tools/codis2pika/scripts/commands/unsubscribe.json @@ -0,0 +1,25 @@ +{ + "UNSUBSCRIBE": { + "summary": "Stop listening for messages posted to the given channels", + "complexity": "O(N) where N is the number of clients already subscribed to a channel.", + "group": "pubsub", + "since": "2.0.0", + "arity": -1, + "function": "unsubscribeCommand", + "command_flags": [ + "PUBSUB", + "NOSCRIPT", + "LOADING", + "STALE", + "SENTINEL" + ], + "arguments": [ + { + "name": "channel", + "type": "string", + "optional": true, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/unwatch.json b/tools/codis2pika/scripts/commands/unwatch.json new file mode 100644 index 0000000000..820ea5b93e --- /dev/null +++ b/tools/codis2pika/scripts/commands/unwatch.json @@ -0,0 +1,20 @@ +{ + "UNWATCH": { + "summary": "Forget about all watched keys", + "complexity": "O(1)", + "group": "transactions", + "since": "2.2.0", + "arity": 1, + "function": "unwatchCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE", + "FAST", + "ALLOW_BUSY" + ], + "acl_categories": [ + "TRANSACTION" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/wait.json b/tools/codis2pika/scripts/commands/wait.json new file mode 100644 index 0000000000..4930932d93 --- /dev/null +++ b/tools/codis2pika/scripts/commands/wait.json @@ -0,0 +1,30 @@ +{ + "WAIT": { + "summary": "Wait for the synchronous replication of all the write commands sent in the context of the current connection", + "complexity": "O(1)", + "group": "generic", + "since": "3.0.0", + "arity": 3, + "function": "waitCommand", + "command_flags": [ + "NOSCRIPT" + ], + "acl_categories": [ + "CONNECTION" + ], + "command_tips": [ + "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:AGG_MIN" + ], + "arguments": [ + { + "name": "numreplicas", + "type": "integer" + }, + { + "name": "timeout", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/watch.json b/tools/codis2pika/scripts/commands/watch.json new file mode 100644 index 0000000000..0a9e3703e7 --- /dev/null +++ b/tools/codis2pika/scripts/commands/watch.json @@ -0,0 +1,47 @@ +{ + "WATCH": { + "summary": "Watch the given keys to determine execution of the MULTI/EXEC block", + "complexity": "O(1) for every key.", + "group": "transactions", + "since": "2.2.0", + "arity": -2, + "function": "watchCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE", + "FAST", + "ALLOW_BUSY" + ], + "acl_categories": [ + "TRANSACTION" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xack.json b/tools/codis2pika/scripts/commands/xack.json new file mode 100644 index 0000000000..b9d0aa4ddb --- /dev/null +++ b/tools/codis2pika/scripts/commands/xack.json @@ -0,0 +1,53 @@ +{ + "XACK": { + "summary": "Marks a pending message as correctly processed, effectively removing it from the pending entries list of the consumer group. Return value of the command is the number of messages successfully acknowledged, that is, the IDs we were actually able to resolve in the PEL.", + "complexity": "O(1) for each message ID processed.", + "group": "stream", + "since": "5.0.0", + "arity": -4, + "function": "xackCommand", + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "group", + "type": "string" + }, + { + "name": "ID", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xadd.json b/tools/codis2pika/scripts/commands/xadd.json new file mode 100644 index 0000000000..e8914ebb36 --- /dev/null +++ b/tools/codis2pika/scripts/commands/xadd.json @@ -0,0 +1,148 @@ +{ + "XADD": { + "summary": "Appends a new entry to a stream", + "complexity": "O(1) when adding a new entry, O(N) when trimming where N being the number of entries evicted.", + "group": "stream", + "since": "5.0.0", + "arity": -5, + "function": "xaddCommand", + "history": [ + [ + "6.2.0", + "Added the `NOMKSTREAM` option, `MINID` trimming strategy and the `LIMIT` option." + ], + [ + "7.0.0", + "Added support for the `-*` explicit ID form." + ] + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "STREAM" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "notes": "UPDATE instead of INSERT because of the optional trimming feature", + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "token": "NOMKSTREAM", + "name": "nomkstream", + "type": "pure-token", + "optional": true, + "since": "6.2.0" + }, + { + "name": "trim", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "strategy", + "type": "oneof", + "arguments": [ + { + "name": "maxlen", + "type": "pure-token", + "token": "MAXLEN" + }, + { + "name": "minid", + "type": "pure-token", + "token": "MINID", + "since": "6.2.0" + } + ] + }, + { + "name": "operator", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "equal", + "type": "pure-token", + "token": "=" + }, + { + "name": "approximately", + "type": "pure-token", + "token": "~" + } + ] + }, + { + "name": "threshold", + "type": "string" + }, + { + "token": "LIMIT", + "name": "count", + "type": "integer", + "optional": true, + "since": "6.2.0" + } + ] + }, + { + "name": "id_or_auto", + "type": "oneof", + "arguments": [ + { + "name": "auto_id", + "type": "pure-token", + "token": "*" + }, + { + "name": "ID", + "type": "string" + } + ] + }, + { + "name": "field_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "field", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xautoclaim.json b/tools/codis2pika/scripts/commands/xautoclaim.json new file mode 100644 index 0000000000..726bf38fec --- /dev/null +++ b/tools/codis2pika/scripts/commands/xautoclaim.json @@ -0,0 +1,81 @@ +{ + "XAUTOCLAIM": { + "summary": "Changes (or acquires) ownership of messages in a consumer group, as if the messages were delivered to the specified consumer.", + "complexity": "O(1) if COUNT is small.", + "group": "stream", + "since": "6.2.0", + "arity": -6, + "function": "xautoclaimCommand", + "history": [ + [ + "7.0.0", + "Added an element to the reply array, containing deleted entries the command cleared from the PEL" + ] + ], + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "STREAM" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RW", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "group", + "type": "string" + }, + { + "name": "consumer", + "type": "string" + }, + { + "name": "min-idle-time", + "type": "string" + }, + { + "name": "start", + "type": "string" + }, + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + }, + { + "name": "justid", + "token": "JUSTID", + "type": "pure-token", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xclaim.json b/tools/codis2pika/scripts/commands/xclaim.json new file mode 100644 index 0000000000..5dcf8d508b --- /dev/null +++ b/tools/codis2pika/scripts/commands/xclaim.json @@ -0,0 +1,94 @@ +{ + "XCLAIM": { + "summary": "Changes (or acquires) ownership of a message in a consumer group, as if the message was delivered to the specified consumer.", + "complexity": "O(log N) with N being the number of messages in the PEL of the consumer group.", + "group": "stream", + "since": "5.0.0", + "arity": -6, + "function": "xclaimCommand", + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "STREAM" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "group", + "type": "string" + }, + { + "name": "consumer", + "type": "string" + }, + { + "name": "min-idle-time", + "type": "string" + }, + { + "name": "ID", + "type": "string", + "multiple": true + }, + { + "token": "IDLE", + "name": "ms", + "type": "integer", + "optional": true + }, + { + "token": "TIME", + "name": "unix-time-milliseconds", + "type": "unix-time", + "optional": true + }, + { + "token": "RETRYCOUNT", + "name": "count", + "type": "integer", + "optional": true + }, + { + "name": "force", + "token": "FORCE", + "type": "pure-token", + "optional": true + }, + { + "name": "justid", + "token": "JUSTID", + "type": "pure-token", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xdel.json b/tools/codis2pika/scripts/commands/xdel.json new file mode 100644 index 0000000000..061ea80c05 --- /dev/null +++ b/tools/codis2pika/scripts/commands/xdel.json @@ -0,0 +1,49 @@ +{ + "XDEL": { + "summary": "Removes the specified entries from the stream. Returns the number of items actually deleted, that may be different from the number of IDs passed in case certain IDs do not exist.", + "complexity": "O(1) for each single item to delete in the stream, regardless of the stream size.", + "group": "stream", + "since": "5.0.0", + "arity": -3, + "function": "xdelCommand", + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RW", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "ID", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xgroup-create.json b/tools/codis2pika/scripts/commands/xgroup-create.json new file mode 100644 index 0000000000..2b1ee03b46 --- /dev/null +++ b/tools/codis2pika/scripts/commands/xgroup-create.json @@ -0,0 +1,82 @@ +{ + "CREATE": { + "summary": "Create a consumer group.", + "complexity": "O(1)", + "group": "stream", + "since": "5.0.0", + "arity": -5, + "container": "XGROUP", + "function": "xgroupCommand", + "history": [ + [ + "7.0.0", + "Added the `entries_read` named argument." + ] + ], + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "groupname", + "type": "string" + }, + { + "name": "id", + "type": "oneof", + "arguments": [ + { + "name": "ID", + "type": "string" + }, + { + "name": "new_id", + "type": "pure-token", + "token": "$" + } + ] + }, + { + "token": "MKSTREAM", + "name": "mkstream", + "type": "pure-token", + "optional": true + }, + { + "token": "ENTRIESREAD", + "name": "entries_read", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xgroup-createconsumer.json b/tools/codis2pika/scripts/commands/xgroup-createconsumer.json new file mode 100644 index 0000000000..30c450e639 --- /dev/null +++ b/tools/codis2pika/scripts/commands/xgroup-createconsumer.json @@ -0,0 +1,53 @@ +{ + "CREATECONSUMER": { + "summary": "Create a consumer in a consumer group.", + "complexity": "O(1)", + "group": "stream", + "since": "6.2.0", + "arity": 5, + "container": "XGROUP", + "function": "xgroupCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RW", + "INSERT" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "groupname", + "type": "string" + }, + { + "name": "consumername", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xgroup-delconsumer.json b/tools/codis2pika/scripts/commands/xgroup-delconsumer.json new file mode 100644 index 0000000000..e98c3bbc8e --- /dev/null +++ b/tools/codis2pika/scripts/commands/xgroup-delconsumer.json @@ -0,0 +1,52 @@ +{ + "DELCONSUMER": { + "summary": "Delete a consumer from a consumer group.", + "complexity": "O(1)", + "group": "stream", + "since": "5.0.0", + "arity": 5, + "container": "XGROUP", + "function": "xgroupCommand", + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RW", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "groupname", + "type": "string" + }, + { + "name": "consumername", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xgroup-destroy.json b/tools/codis2pika/scripts/commands/xgroup-destroy.json new file mode 100644 index 0000000000..79be27abf7 --- /dev/null +++ b/tools/codis2pika/scripts/commands/xgroup-destroy.json @@ -0,0 +1,48 @@ +{ + "DESTROY": { + "summary": "Destroy a consumer group.", + "complexity": "O(N) where N is the number of entries in the group's pending entries list (PEL).", + "group": "stream", + "since": "5.0.0", + "arity": 4, + "container": "XGROUP", + "function": "xgroupCommand", + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RW", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "groupname", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xgroup-help.json b/tools/codis2pika/scripts/commands/xgroup-help.json new file mode 100644 index 0000000000..4c5a2b9572 --- /dev/null +++ b/tools/codis2pika/scripts/commands/xgroup-help.json @@ -0,0 +1,18 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "stream", + "since": "5.0.0", + "arity": 2, + "container": "XGROUP", + "function": "xgroupCommand", + "command_flags": [ + "LOADING", + "STALE" + ], + "acl_categories": [ + "STREAM" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xgroup-setid.json b/tools/codis2pika/scripts/commands/xgroup-setid.json new file mode 100644 index 0000000000..af4b83c190 --- /dev/null +++ b/tools/codis2pika/scripts/commands/xgroup-setid.json @@ -0,0 +1,75 @@ +{ + "SETID": { + "summary": "Set a consumer group to an arbitrary last delivered ID value.", + "complexity": "O(1)", + "group": "stream", + "since": "5.0.0", + "arity": -5, + "container": "XGROUP", + "function": "xgroupCommand", + "history": [ + [ + "7.0.0", + "Added the optional `entries_read` argument." + ] + ], + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "groupname", + "type": "string" + }, + { + "name": "id", + "type": "oneof", + "arguments": [ + { + "name": "ID", + "type": "string" + }, + { + "name": "new_id", + "type": "pure-token", + "token": "$" + } + ] + }, + { + "name": "entries_read", + "token": "ENTRIESREAD", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xgroup.json b/tools/codis2pika/scripts/commands/xgroup.json new file mode 100644 index 0000000000..2af53cfccd --- /dev/null +++ b/tools/codis2pika/scripts/commands/xgroup.json @@ -0,0 +1,9 @@ +{ + "XGROUP": { + "summary": "A container for consumer groups commands", + "complexity": "Depends on subcommand.", + "group": "stream", + "since": "5.0.0", + "arity": -2 + } +} diff --git a/tools/codis2pika/scripts/commands/xinfo-consumers.json b/tools/codis2pika/scripts/commands/xinfo-consumers.json new file mode 100644 index 0000000000..634b52887c --- /dev/null +++ b/tools/codis2pika/scripts/commands/xinfo-consumers.json @@ -0,0 +1,51 @@ +{ + "CONSUMERS": { + "summary": "List the consumers in a consumer group", + "complexity": "O(1)", + "group": "stream", + "since": "5.0.0", + "arity": 4, + "container": "XINFO", + "function": "xinfoCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "STREAM" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "groupname", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xinfo-groups.json b/tools/codis2pika/scripts/commands/xinfo-groups.json new file mode 100644 index 0000000000..e9b61ba069 --- /dev/null +++ b/tools/codis2pika/scripts/commands/xinfo-groups.json @@ -0,0 +1,50 @@ +{ + "GROUPS": { + "summary": "List the consumer groups of a stream", + "complexity": "O(1)", + "group": "stream", + "since": "5.0.0", + "arity": 3, + "container": "XINFO", + "history": [ + [ + "7.0.0", + "Added the `entries-read` and `lag` fields" + ] + ], + "function": "xinfoCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xinfo-help.json b/tools/codis2pika/scripts/commands/xinfo-help.json new file mode 100644 index 0000000000..e11468353c --- /dev/null +++ b/tools/codis2pika/scripts/commands/xinfo-help.json @@ -0,0 +1,18 @@ +{ + "HELP": { + "summary": "Show helpful text about the different subcommands", + "complexity": "O(1)", + "group": "stream", + "since": "5.0.0", + "arity": 2, + "container": "XINFO", + "function": "xinfoCommand", + "command_flags": [ + "LOADING", + "STALE" + ], + "acl_categories": [ + "STREAM" + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xinfo-stream.json b/tools/codis2pika/scripts/commands/xinfo-stream.json new file mode 100644 index 0000000000..36c427fbe6 --- /dev/null +++ b/tools/codis2pika/scripts/commands/xinfo-stream.json @@ -0,0 +1,68 @@ +{ + "STREAM": { + "summary": "Get information about a stream", + "complexity": "O(1)", + "group": "stream", + "since": "5.0.0", + "arity": -3, + "container": "XINFO", + "history": [ + [ + "6.0.0", + "Added the `FULL` modifier." + ], + [ + "7.0.0", + "Added the `max-deleted-entry-id`, `entries-added`, `recorded-first-entry-id`, `entries-read` and `lag` fields" + ] + ], + "function": "xinfoCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "full", + "token": "FULL", + "type": "block", + "optional": true, + "arguments": [ + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xinfo.json b/tools/codis2pika/scripts/commands/xinfo.json new file mode 100644 index 0000000000..c969e59a75 --- /dev/null +++ b/tools/codis2pika/scripts/commands/xinfo.json @@ -0,0 +1,9 @@ +{ + "XINFO": { + "summary": "A container for stream introspection commands", + "complexity": "Depends on subcommand.", + "group": "stream", + "since": "5.0.0", + "arity": -2 + } +} diff --git a/tools/codis2pika/scripts/commands/xlen.json b/tools/codis2pika/scripts/commands/xlen.json new file mode 100644 index 0000000000..9adc261f7f --- /dev/null +++ b/tools/codis2pika/scripts/commands/xlen.json @@ -0,0 +1,43 @@ +{ + "XLEN": { + "summary": "Return the number of entries in a stream", + "complexity": "O(1)", + "group": "stream", + "since": "5.0.0", + "arity": 2, + "function": "xlenCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xpending.json b/tools/codis2pika/scripts/commands/xpending.json new file mode 100644 index 0000000000..cd8ee8dd2d --- /dev/null +++ b/tools/codis2pika/scripts/commands/xpending.json @@ -0,0 +1,87 @@ +{ + "XPENDING": { + "summary": "Return information and entries from a stream consumer group pending entries list, that are messages fetched but never acknowledged.", + "complexity": "O(N) with N being the number of elements returned, so asking for a small fixed number of entries per call is O(1). O(M), where M is the total number of entries scanned when used with the IDLE filter. When the command returns just the summary and the list of consumers is small, it runs in O(1) time; otherwise, an additional O(N) time for iterating every consumer.", + "group": "stream", + "since": "5.0.0", + "arity": -3, + "function": "xpendingCommand", + "history": [ + [ + "6.2.0", + "Added the `IDLE` option and exclusive range intervals." + ] + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "STREAM" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "group", + "type": "string" + }, + { + "name": "filters", + "type": "block", + "optional": true, + "arguments": [ + { + "token": "IDLE", + "name": "min-idle-time", + "type": "integer", + "optional": true, + "since": "6.2.0" + }, + { + "name": "start", + "type": "string" + }, + { + "name": "end", + "type": "string" + }, + { + "name": "count", + "type": "integer" + }, + { + "name": "consumer", + "type": "string", + "optional": true + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xrange.json b/tools/codis2pika/scripts/commands/xrange.json new file mode 100644 index 0000000000..9a3ddeac0d --- /dev/null +++ b/tools/codis2pika/scripts/commands/xrange.json @@ -0,0 +1,63 @@ +{ + "XRANGE": { + "summary": "Return a range of elements in a stream, with IDs matching the specified IDs interval", + "complexity": "O(N) with N being the number of elements being returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).", + "group": "stream", + "since": "5.0.0", + "arity": -4, + "function": "xrangeCommand", + "history": [ + [ + "6.2.0", + "Added exclusive ranges." + ] + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "string" + }, + { + "name": "end", + "type": "string" + }, + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xread.json b/tools/codis2pika/scripts/commands/xread.json new file mode 100644 index 0000000000..8f66d7ee5b --- /dev/null +++ b/tools/codis2pika/scripts/commands/xread.json @@ -0,0 +1,72 @@ +{ + "XREAD": { + "summary": "Return never seen elements in multiple streams, with IDs greater than the ones reported by the caller for each stream. Can block.", + "complexity": "For each stream mentioned: O(N) with N being the number of elements being returned, it means that XREAD-ing with a fixed COUNT is O(1). Note that when the BLOCK option is used, XADD will pay O(M) time in order to serve the M clients blocked on the stream getting new data.", + "group": "stream", + "since": "5.0.0", + "arity": -4, + "function": "xreadCommand", + "get_keys_function": "xreadGetKeys", + "command_flags": [ + "BLOCKING", + "READONLY", + "BLOCKING" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "keyword": { + "keyword": "STREAMS", + "startfrom": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 2 + } + } + } + ], + "arguments": [ + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + }, + { + "token": "BLOCK", + "name": "milliseconds", + "type": "integer", + "optional": true + }, + { + "name": "streams", + "token": "STREAMS", + "type": "block", + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "ID", + "type": "string", + "multiple": true + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xreadgroup.json b/tools/codis2pika/scripts/commands/xreadgroup.json new file mode 100644 index 0000000000..a148147559 --- /dev/null +++ b/tools/codis2pika/scripts/commands/xreadgroup.json @@ -0,0 +1,92 @@ +{ + "XREADGROUP": { + "summary": "Return new entries from a stream using a consumer group, or access the history of the pending entries for a given consumer. Can block.", + "complexity": "For each stream mentioned: O(M) with M being the number of elements returned. If M is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1). On the other side when XREADGROUP blocks, XADD will pay the O(N) time in order to serve the N clients blocked on the stream getting new data.", + "group": "stream", + "since": "5.0.0", + "arity": -7, + "function": "xreadCommand", + "get_keys_function": "xreadGetKeys", + "command_flags": [ + "BLOCKING", + "WRITE" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "keyword": { + "keyword": "STREAMS", + "startfrom": 4 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 2 + } + } + } + ], + "arguments": [ + { + "token": "GROUP", + "name": "group_consumer", + "type": "block", + "arguments": [ + { + "name": "group", + "type": "string" + }, + { + "name": "consumer", + "type": "string" + } + ] + }, + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + }, + { + "token": "BLOCK", + "name": "milliseconds", + "type": "integer", + "optional": true + }, + { + "name": "noack", + "token": "NOACK", + "type": "pure-token", + "optional": true + }, + { + "name": "streams", + "token": "STREAMS", + "type": "block", + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "ID", + "type": "string", + "multiple": true + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xrevrange.json b/tools/codis2pika/scripts/commands/xrevrange.json new file mode 100644 index 0000000000..65d81db813 --- /dev/null +++ b/tools/codis2pika/scripts/commands/xrevrange.json @@ -0,0 +1,63 @@ +{ + "XREVRANGE": { + "summary": "Return a range of elements in a stream, with IDs matching the specified IDs interval, in reverse order (from greater to smaller IDs) compared to XRANGE", + "complexity": "O(N) with N being the number of elements returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).", + "group": "stream", + "since": "5.0.0", + "arity": -4, + "function": "xrevrangeCommand", + "history": [ + [ + "6.2.0", + "Added exclusive ranges." + ] + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "end", + "type": "string" + }, + { + "name": "start", + "type": "string" + }, + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xsetid.json b/tools/codis2pika/scripts/commands/xsetid.json new file mode 100644 index 0000000000..7654784e11 --- /dev/null +++ b/tools/codis2pika/scripts/commands/xsetid.json @@ -0,0 +1,67 @@ +{ + "XSETID": { + "summary": "An internal command for replicating stream values", + "complexity": "O(1)", + "group": "stream", + "since": "5.0.0", + "arity": -3, + "function": "xsetidCommand", + "history": [ + [ + "7.0.0", + "Added the `entries_added` and `max_deleted_entry_id` arguments." + ] + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "STREAM" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "last-id", + "type": "string" + }, + { + "name": "entries_added", + "token": "ENTRIESADDED", + "type": "integer", + "optional": true + }, + { + "name": "max_deleted_entry_id", + "token": "MAXDELETEDID", + "type": "string", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/xtrim.json b/tools/codis2pika/scripts/commands/xtrim.json new file mode 100644 index 0000000000..03c48ebb5b --- /dev/null +++ b/tools/codis2pika/scripts/commands/xtrim.json @@ -0,0 +1,103 @@ +{ + "XTRIM": { + "summary": "Trims the stream to (approximately if '~' is passed) a certain size", + "complexity": "O(N), with N being the number of evicted entries. Constant times are very small however, since entries are organized in macro nodes containing multiple entries that can be released with a single deallocation.", + "group": "stream", + "since": "5.0.0", + "arity": -4, + "function": "xtrimCommand", + "history": [ + [ + "6.2.0", + "Added the `MINID` trimming strategy and the `LIMIT` option." + ] + ], + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "STREAM" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RW", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "trim", + "type": "block", + "arguments": [ + { + "name": "strategy", + "type": "oneof", + "arguments": [ + { + "name": "maxlen", + "type": "pure-token", + "token": "MAXLEN" + }, + { + "name": "minid", + "type": "pure-token", + "token": "MINID", + "since": "6.2.0" + } + ] + }, + { + "name": "operator", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "equal", + "type": "pure-token", + "token": "=" + }, + { + "name": "approximately", + "type": "pure-token", + "token": "~" + } + ] + }, + { + "name": "threshold", + "type": "string" + }, + { + "token": "LIMIT", + "name": "count", + "type": "integer", + "optional": true, + "since": "6.2.0" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zadd.json b/tools/codis2pika/scripts/commands/zadd.json new file mode 100644 index 0000000000..300db1180f --- /dev/null +++ b/tools/codis2pika/scripts/commands/zadd.json @@ -0,0 +1,124 @@ +{ + "ZADD": { + "summary": "Add one or more members to a sorted set, or update its score if it already exists", + "complexity": "O(log(N)) for each item added, where N is the number of elements in the sorted set.", + "group": "sorted_set", + "since": "1.2.0", + "arity": -4, + "function": "zaddCommand", + "history": [ + [ + "2.4.0", + "Accepts multiple elements." + ], + [ + "3.0.2", + "Added the `XX`, `NX`, `CH` and `INCR` options." + ], + [ + "6.2.0", + "Added the `GT` and `LT` options." + ] + ], + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "condition", + "type": "oneof", + "optional": true, + "since": "3.0.2", + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + } + ] + }, + { + "name": "comparison", + "type": "oneof", + "optional": true, + "since": "6.2.0", + "arguments": [ + { + "name": "gt", + "type": "pure-token", + "token": "GT" + }, + { + "name": "lt", + "type": "pure-token", + "token": "LT" + } + ] + }, + { + "name": "change", + "token": "CH", + "type": "pure-token", + "optional": true, + "since": "3.0.2" + }, + { + "name": "increment", + "token": "INCR", + "type": "pure-token", + "optional": true, + "since": "3.0.2" + }, + { + "name": "score_member", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "score", + "type": "double" + }, + { + "name": "member", + "type": "string" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zcard.json b/tools/codis2pika/scripts/commands/zcard.json new file mode 100644 index 0000000000..84022a7f5e --- /dev/null +++ b/tools/codis2pika/scripts/commands/zcard.json @@ -0,0 +1,43 @@ +{ + "ZCARD": { + "summary": "Get the number of members in a sorted set", + "complexity": "O(1)", + "group": "sorted_set", + "since": "1.2.0", + "arity": 2, + "function": "zcardCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zcount.json b/tools/codis2pika/scripts/commands/zcount.json new file mode 100644 index 0000000000..6572d4a511 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zcount.json @@ -0,0 +1,52 @@ +{ + "ZCOUNT": { + "summary": "Count the members in a sorted set with scores within the given values", + "complexity": "O(log(N)) with N being the number of elements in the sorted set.", + "group": "sorted_set", + "since": "2.0.0", + "arity": 4, + "function": "zcountCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "min", + "type": "double" + }, + { + "name": "max", + "type": "double" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zdiff.json b/tools/codis2pika/scripts/commands/zdiff.json new file mode 100644 index 0000000000..3eee28983d --- /dev/null +++ b/tools/codis2pika/scripts/commands/zdiff.json @@ -0,0 +1,55 @@ +{ + "ZDIFF": { + "summary": "Subtract multiple sorted sets", + "complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.", + "group": "sorted_set", + "since": "6.2.0", + "arity": -3, + "function": "zdiffCommand", + "get_keys_function": "zunionInterDiffGetKeys", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "withscores", + "token": "WITHSCORES", + "type": "pure-token", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zdiffstore.json b/tools/codis2pika/scripts/commands/zdiffstore.json new file mode 100644 index 0000000000..7fc0102fd8 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zdiffstore.json @@ -0,0 +1,73 @@ +{ + "ZDIFFSTORE": { + "summary": "Subtract multiple sorted sets and store the resulting sorted set in a new key", + "complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.", + "group": "sorted_set", + "since": "6.2.0", + "arity": -4, + "function": "zdiffstoreCommand", + "get_keys_function": "zunionInterDiffStoreGetKeys", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zincrby.json b/tools/codis2pika/scripts/commands/zincrby.json new file mode 100644 index 0000000000..2ebafe0a15 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zincrby.json @@ -0,0 +1,54 @@ +{ + "ZINCRBY": { + "summary": "Increment the score of a member in a sorted set", + "complexity": "O(log(N)) where N is the number of elements in the sorted set.", + "group": "sorted_set", + "since": "1.2.0", + "arity": 4, + "function": "zincrbyCommand", + "command_flags": [ + "WRITE", + "DENYOOM", + "FAST" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "increment", + "type": "integer" + }, + { + "name": "member", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zinter.json b/tools/codis2pika/scripts/commands/zinter.json new file mode 100644 index 0000000000..b05dc8d3a1 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zinter.json @@ -0,0 +1,85 @@ +{ + "ZINTER": { + "summary": "Intersect multiple sorted sets", + "complexity": "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set.", + "group": "sorted_set", + "since": "6.2.0", + "arity": -3, + "function": "zinterCommand", + "get_keys_function": "zunionInterDiffGetKeys", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "token": "WEIGHTS", + "name": "weight", + "type": "integer", + "optional": true, + "multiple": true + }, + { + "token": "AGGREGATE", + "name": "aggregate", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "sum", + "type": "pure-token", + "token": "SUM" + }, + { + "name": "min", + "type": "pure-token", + "token": "MIN" + }, + { + "name": "max", + "type": "pure-token", + "token": "MAX" + } + ] + }, + { + "name": "withscores", + "token": "WITHSCORES", + "type": "pure-token", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zintercard.json b/tools/codis2pika/scripts/commands/zintercard.json new file mode 100644 index 0000000000..2c2359968d --- /dev/null +++ b/tools/codis2pika/scripts/commands/zintercard.json @@ -0,0 +1,55 @@ +{ + "ZINTERCARD": { + "summary": "Intersect multiple sorted sets and return the cardinality of the result", + "complexity": "O(N*K) worst case with N being the smallest input sorted set, K being the number of input sorted sets.", + "group": "sorted_set", + "since": "7.0.0", + "arity": -3, + "function": "zinterCardCommand", + "get_keys_function": "zunionInterDiffGetKeys", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "token": "LIMIT", + "name": "limit", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zinterstore.json b/tools/codis2pika/scripts/commands/zinterstore.json new file mode 100644 index 0000000000..bd40460ace --- /dev/null +++ b/tools/codis2pika/scripts/commands/zinterstore.json @@ -0,0 +1,103 @@ +{ + "ZINTERSTORE": { + "summary": "Intersect multiple sorted sets and store the resulting sorted set in a new key", + "complexity": "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set.", + "group": "sorted_set", + "since": "2.0.0", + "arity": -4, + "function": "zinterstoreCommand", + "get_keys_function": "zunionInterDiffStoreGetKeys", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + }, + { + "token": "WEIGHTS", + "name": "weight", + "type": "integer", + "optional": true, + "multiple": true + }, + { + "token": "AGGREGATE", + "name": "aggregate", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "sum", + "type": "pure-token", + "token": "SUM" + }, + { + "name": "min", + "type": "pure-token", + "token": "MIN" + }, + { + "name": "max", + "type": "pure-token", + "token": "MAX" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zlexcount.json b/tools/codis2pika/scripts/commands/zlexcount.json new file mode 100644 index 0000000000..5dff46e4ba --- /dev/null +++ b/tools/codis2pika/scripts/commands/zlexcount.json @@ -0,0 +1,52 @@ +{ + "ZLEXCOUNT": { + "summary": "Count the number of members in a sorted set between a given lexicographical range", + "complexity": "O(log(N)) with N being the number of elements in the sorted set.", + "group": "sorted_set", + "since": "2.8.9", + "arity": 4, + "function": "zlexcountCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "min", + "type": "string" + }, + { + "name": "max", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zmpop.json b/tools/codis2pika/scripts/commands/zmpop.json new file mode 100644 index 0000000000..964d5c842b --- /dev/null +++ b/tools/codis2pika/scripts/commands/zmpop.json @@ -0,0 +1,72 @@ +{ + "ZMPOP": { + "summary": "Remove and return members with scores in a sorted set", + "complexity": "O(K) + O(N*log(M)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.", + "group": "sorted_set", + "since": "7.0.0", + "arity": -4, + "function": "zmpopCommand", + "get_keys_function": "zmpopGetKeys", + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "where", + "type": "oneof", + "arguments": [ + { + "name": "min", + "type": "pure-token", + "token": "MIN" + }, + { + "name": "max", + "type": "pure-token", + "token": "MAX" + } + ] + }, + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zmscore.json b/tools/codis2pika/scripts/commands/zmscore.json new file mode 100644 index 0000000000..4db291fa32 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zmscore.json @@ -0,0 +1,49 @@ +{ + "ZMSCORE": { + "summary": "Get the score associated with the given members in a sorted set", + "complexity": "O(N) where N is the number of members being requested.", + "group": "sorted_set", + "since": "6.2.0", + "arity": -3, + "function": "zmscoreCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zpopmax.json b/tools/codis2pika/scripts/commands/zpopmax.json new file mode 100644 index 0000000000..2e792431a7 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zpopmax.json @@ -0,0 +1,50 @@ +{ + "ZPOPMAX": { + "summary": "Remove and return members with the highest scores in a sorted set", + "complexity": "O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped.", + "group": "sorted_set", + "since": "5.0.0", + "arity": -2, + "function": "zpopmaxCommand", + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zpopmin.json b/tools/codis2pika/scripts/commands/zpopmin.json new file mode 100644 index 0000000000..9ccce2fe1e --- /dev/null +++ b/tools/codis2pika/scripts/commands/zpopmin.json @@ -0,0 +1,50 @@ +{ + "ZPOPMIN": { + "summary": "Remove and return members with the lowest scores in a sorted set", + "complexity": "O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped.", + "group": "sorted_set", + "since": "5.0.0", + "arity": -2, + "function": "zpopminCommand", + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "ACCESS", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zrandmember.json b/tools/codis2pika/scripts/commands/zrandmember.json new file mode 100644 index 0000000000..e602a154d0 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zrandmember.json @@ -0,0 +1,63 @@ +{ + "ZRANDMEMBER": { + "summary": "Get one or multiple random elements from a sorted set", + "complexity": "O(N) where N is the number of elements returned", + "group": "sorted_set", + "since": "6.2.0", + "arity": -2, + "function": "zrandmemberCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SORTEDSET" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "options", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "count", + "type": "integer" + }, + { + "name": "withscores", + "token": "WITHSCORES", + "type": "pure-token", + "optional": true + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zrange.json b/tools/codis2pika/scripts/commands/zrange.json new file mode 100644 index 0000000000..1c7232a956 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zrange.json @@ -0,0 +1,105 @@ +{ + "ZRANGE": { + "summary": "Return a range of members in a sorted set", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned.", + "group": "sorted_set", + "since": "1.2.0", + "arity": -4, + "function": "zrangeCommand", + "history": [ + [ + "6.2.0", + "Added the `REV`, `BYSCORE`, `BYLEX` and `LIMIT` options." + ] + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "min", + "type": "string" + }, + { + "name": "max", + "type": "string" + }, + { + "name": "sortby", + "type": "oneof", + "optional": true, + "since": "6.2.0", + "arguments": [ + { + "name": "byscore", + "type": "pure-token", + "token": "BYSCORE" + }, + { + "name": "bylex", + "type": "pure-token", + "token": "BYLEX" + } + ] + }, + { + "name": "rev", + "token": "REV", + "type": "pure-token", + "optional": true, + "since": "6.2.0" + }, + { + "token": "LIMIT", + "name": "offset_count", + "type": "block", + "optional": true, + "since": "6.2.0", + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + }, + { + "name": "withscores", + "token": "WITHSCORES", + "type": "pure-token", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zrangebylex.json b/tools/codis2pika/scripts/commands/zrangebylex.json new file mode 100644 index 0000000000..75e82bce69 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zrangebylex.json @@ -0,0 +1,72 @@ +{ + "ZRANGEBYLEX": { + "summary": "Return a range of members in a sorted set, by lexicographical range", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", + "group": "sorted_set", + "since": "2.8.9", + "arity": -4, + "function": "zrangebylexCommand", + "deprecated_since": "6.2.0", + "replaced_by": "`ZRANGE` with the `BYLEX` argument", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "min", + "type": "string" + }, + { + "name": "max", + "type": "string" + }, + { + "token": "LIMIT", + "name": "offset_count", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zrangebyscore.json b/tools/codis2pika/scripts/commands/zrangebyscore.json new file mode 100644 index 0000000000..d0678776ea --- /dev/null +++ b/tools/codis2pika/scripts/commands/zrangebyscore.json @@ -0,0 +1,85 @@ +{ + "ZRANGEBYSCORE": { + "summary": "Return a range of members in a sorted set, by score", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", + "group": "sorted_set", + "since": "1.0.5", + "arity": -4, + "function": "zrangebyscoreCommand", + "history": [ + [ + "2.0.0", + "Added the `WITHSCORES` modifier." + ] + ], + "deprecated_since": "6.2.0", + "replaced_by": "`ZRANGE` with the `BYSCORE` argument", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "min", + "type": "double" + }, + { + "name": "max", + "type": "double" + }, + { + "name": "withscores", + "token": "WITHSCORES", + "type": "pure-token", + "optional": true, + "since": "2.0.0" + }, + { + "token": "LIMIT", + "name": "offset_count", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zrangestore.json b/tools/codis2pika/scripts/commands/zrangestore.json new file mode 100644 index 0000000000..25d9dd76ef --- /dev/null +++ b/tools/codis2pika/scripts/commands/zrangestore.json @@ -0,0 +1,114 @@ +{ + "ZRANGESTORE": { + "summary": "Store a range of members from sorted set into another key", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements stored into the destination key.", + "group": "sorted_set", + "since": "6.2.0", + "arity": -5, + "function": "zrangestoreCommand", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "dst", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "src", + "type": "key", + "key_spec_index": 1 + }, + { + "name": "min", + "type": "string" + }, + { + "name": "max", + "type": "string" + }, + { + "name": "sortby", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "byscore", + "type": "pure-token", + "token": "BYSCORE" + }, + { + "name": "bylex", + "type": "pure-token", + "token": "BYLEX" + } + ] + }, + { + "name": "rev", + "token": "REV", + "type": "pure-token", + "optional": true + }, + { + "token": "LIMIT", + "name": "offset_count", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zrank.json b/tools/codis2pika/scripts/commands/zrank.json new file mode 100644 index 0000000000..6d5154715c --- /dev/null +++ b/tools/codis2pika/scripts/commands/zrank.json @@ -0,0 +1,48 @@ +{ + "ZRANK": { + "summary": "Determine the index of a member in a sorted set", + "complexity": "O(log(N))", + "group": "sorted_set", + "since": "2.0.0", + "arity": 3, + "function": "zrankCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zrem.json b/tools/codis2pika/scripts/commands/zrem.json new file mode 100644 index 0000000000..a899400937 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zrem.json @@ -0,0 +1,55 @@ +{ + "ZREM": { + "summary": "Remove one or more members from a sorted set", + "complexity": "O(M*log(N)) with N being the number of elements in the sorted set and M the number of elements to be removed.", + "group": "sorted_set", + "since": "1.2.0", + "arity": -3, + "function": "zremCommand", + "history": [ + [ + "2.4.0", + "Accepts multiple elements." + ] + ], + "command_flags": [ + "WRITE", + "FAST" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zremrangebylex.json b/tools/codis2pika/scripts/commands/zremrangebylex.json new file mode 100644 index 0000000000..ad7277723d --- /dev/null +++ b/tools/codis2pika/scripts/commands/zremrangebylex.json @@ -0,0 +1,51 @@ +{ + "ZREMRANGEBYLEX": { + "summary": "Remove all members in a sorted set between the given lexicographical range", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation.", + "group": "sorted_set", + "since": "2.8.9", + "arity": 4, + "function": "zremrangebylexCommand", + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "min", + "type": "string" + }, + { + "name": "max", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zremrangebyrank.json b/tools/codis2pika/scripts/commands/zremrangebyrank.json new file mode 100644 index 0000000000..62e5055b57 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zremrangebyrank.json @@ -0,0 +1,51 @@ +{ + "ZREMRANGEBYRANK": { + "summary": "Remove all members in a sorted set within the given indexes", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation.", + "group": "sorted_set", + "since": "2.0.0", + "arity": 4, + "function": "zremrangebyrankCommand", + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "integer" + }, + { + "name": "stop", + "type": "integer" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zremrangebyscore.json b/tools/codis2pika/scripts/commands/zremrangebyscore.json new file mode 100644 index 0000000000..3f84c33356 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zremrangebyscore.json @@ -0,0 +1,51 @@ +{ + "ZREMRANGEBYSCORE": { + "summary": "Remove all members in a sorted set within the given scores", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation.", + "group": "sorted_set", + "since": "1.2.0", + "arity": 4, + "function": "zremrangebyscoreCommand", + "command_flags": [ + "WRITE" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RW", + "DELETE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "min", + "type": "double" + }, + { + "name": "max", + "type": "double" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zrevrange.json b/tools/codis2pika/scripts/commands/zrevrange.json new file mode 100644 index 0000000000..66ddc11467 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zrevrange.json @@ -0,0 +1,62 @@ +{ + "ZREVRANGE": { + "summary": "Return a range of members in a sorted set, by index, with scores ordered from high to low", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned.", + "group": "sorted_set", + "since": "1.2.0", + "arity": -4, + "function": "zrevrangeCommand", + "deprecated_since": "6.2.0", + "replaced_by": "`ZRANGE` with the `REV` argument", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "integer" + }, + { + "name": "stop", + "type": "integer" + }, + { + "name": "withscores", + "token": "WITHSCORES", + "type": "pure-token", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zrevrangebylex.json b/tools/codis2pika/scripts/commands/zrevrangebylex.json new file mode 100644 index 0000000000..34faa93ae2 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zrevrangebylex.json @@ -0,0 +1,72 @@ +{ + "ZREVRANGEBYLEX": { + "summary": "Return a range of members in a sorted set, by lexicographical range, ordered from higher to lower strings.", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", + "group": "sorted_set", + "since": "2.8.9", + "arity": -4, + "function": "zrevrangebylexCommand", + "deprecated_since": "6.2.0", + "replaced_by": "`ZRANGE` with the `REV` and `BYLEX` arguments", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "max", + "type": "string" + }, + { + "name": "min", + "type": "string" + }, + { + "token": "LIMIT", + "name": "offset_count", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zrevrangebyscore.json b/tools/codis2pika/scripts/commands/zrevrangebyscore.json new file mode 100644 index 0000000000..9a0152cfed --- /dev/null +++ b/tools/codis2pika/scripts/commands/zrevrangebyscore.json @@ -0,0 +1,84 @@ +{ + "ZREVRANGEBYSCORE": { + "summary": "Return a range of members in a sorted set, by score, with scores ordered from high to low", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", + "group": "sorted_set", + "since": "2.2.0", + "arity": -4, + "function": "zrevrangebyscoreCommand", + "history": [ + [ + "2.1.6", + "`min` and `max` can be exclusive." + ] + ], + "deprecated_since": "6.2.0", + "replaced_by": "`ZRANGE` with the `REV` and `BYSCORE` arguments", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "max", + "type": "double" + }, + { + "name": "min", + "type": "double" + }, + { + "name": "withscores", + "token": "WITHSCORES", + "type": "pure-token", + "optional": true + }, + { + "token": "LIMIT", + "name": "offset_count", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zrevrank.json b/tools/codis2pika/scripts/commands/zrevrank.json new file mode 100644 index 0000000000..bcd4876954 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zrevrank.json @@ -0,0 +1,48 @@ +{ + "ZREVRANK": { + "summary": "Determine the index of a member in a sorted set, with scores ordered from high to low", + "complexity": "O(log(N))", + "group": "sorted_set", + "since": "2.0.0", + "arity": 3, + "function": "zrevrankCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zscan.json b/tools/codis2pika/scripts/commands/zscan.json new file mode 100644 index 0000000000..6cb9a8d0a3 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zscan.json @@ -0,0 +1,62 @@ +{ + "ZSCAN": { + "summary": "Incrementally iterate sorted sets elements and associated scores", + "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection..", + "group": "sorted_set", + "since": "2.8.0", + "arity": -3, + "function": "zscanCommand", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SORTEDSET" + ], + "command_tips": [ + "NONDETERMINISTIC_OUTPUT" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "cursor", + "type": "integer" + }, + { + "token": "MATCH", + "name": "pattern", + "type": "pattern", + "optional": true + }, + { + "token": "COUNT", + "name": "count", + "type": "integer", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zscore.json b/tools/codis2pika/scripts/commands/zscore.json new file mode 100644 index 0000000000..5ed3575105 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zscore.json @@ -0,0 +1,48 @@ +{ + "ZSCORE": { + "summary": "Get the score associated with the given member in a sorted set", + "complexity": "O(1)", + "group": "sorted_set", + "since": "1.2.0", + "arity": 3, + "function": "zscoreCommand", + "command_flags": [ + "READONLY", + "FAST" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string" + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zunion.json b/tools/codis2pika/scripts/commands/zunion.json new file mode 100644 index 0000000000..cc6c66c093 --- /dev/null +++ b/tools/codis2pika/scripts/commands/zunion.json @@ -0,0 +1,85 @@ +{ + "ZUNION": { + "summary": "Add multiple sorted sets", + "complexity": "O(N)+O(M*log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set.", + "group": "sorted_set", + "since": "6.2.0", + "arity": -3, + "function": "zunionCommand", + "get_keys_function": "zunionInterDiffGetKeys", + "command_flags": [ + "READONLY" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "token": "WEIGHTS", + "name": "weight", + "type": "integer", + "optional": true, + "multiple": true + }, + { + "token": "AGGREGATE", + "name": "aggregate", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "sum", + "type": "pure-token", + "token": "SUM" + }, + { + "name": "min", + "type": "pure-token", + "token": "MIN" + }, + { + "name": "max", + "type": "pure-token", + "token": "MAX" + } + ] + }, + { + "name": "withscores", + "token": "WITHSCORES", + "type": "pure-token", + "optional": true + } + ] + } +} diff --git a/tools/codis2pika/scripts/commands/zunionstore.json b/tools/codis2pika/scripts/commands/zunionstore.json new file mode 100644 index 0000000000..257b06d0ef --- /dev/null +++ b/tools/codis2pika/scripts/commands/zunionstore.json @@ -0,0 +1,103 @@ +{ + "ZUNIONSTORE": { + "summary": "Add multiple sorted sets and store the resulting sorted set in a new key", + "complexity": "O(N)+O(M log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set.", + "group": "sorted_set", + "since": "2.0.0", + "arity": -4, + "function": "zunionstoreCommand", + "get_keys_function": "zunionInterDiffStoreGetKeys", + "command_flags": [ + "WRITE", + "DENYOOM" + ], + "acl_categories": [ + "SORTEDSET" + ], + "key_specs": [ + { + "flags": [ + "OW", + "UPDATE" + ], + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "flags": [ + "RO", + "ACCESS" + ], + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + }, + { + "token": "WEIGHTS", + "name": "weight", + "type": "integer", + "optional": true, + "multiple": true + }, + { + "token": "AGGREGATE", + "name": "aggregate", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "sum", + "type": "pure-token", + "token": "SUM" + }, + { + "name": "min", + "type": "pure-token", + "token": "MIN" + }, + { + "name": "max", + "type": "pure-token", + "token": "MAX" + } + ] + } + ] + } +} diff --git a/tools/codis2pika/scripts/gen_table_go_from_table_json.py b/tools/codis2pika/scripts/gen_table_go_from_table_json.py new file mode 100644 index 0000000000..831a1d6a01 --- /dev/null +++ b/tools/codis2pika/scripts/gen_table_go_from_table_json.py @@ -0,0 +1,56 @@ +import json +import os + +j = json.load(open("table.json", "r")) +fp = open("table.go", "w") +fp.write("package commands\n\nvar containers = map[string]bool{\n") +for container in j["container"]: + fp.write(f'"{container.upper()}": true,\n') +fp.write("}\nvar redisCommands = map[string]redisCommand{\n") + +for group, cmds in j["table"].items(): + group = group.upper() + for cmd_name, specs in cmds.items(): + print(group, cmd_name) + cmd_name = cmd_name.upper() + fp.write(f'"{cmd_name}": ' + "{\n") + fp.write(f'"{group}",\n') + fp.write("[]keySpec{\n") + for key_spec in specs: + fp.write("{\n") + if "index" in key_spec["begin_search"]: + fp.write('"index",\n') + fp.write(f'{key_spec["begin_search"]["index"]["pos"]},\n') + fp.write('"",\n') + fp.write('0,\n') + elif "keyword" in key_spec["begin_search"]: + fp.write('"keyword",\n') + fp.write('0,\n') + fp.write(f'"{key_spec["begin_search"]["keyword"]["keyword"]}",\n') + fp.write(f'{key_spec["begin_search"]["keyword"]["startfrom"]},\n') + else: + raise Exception(key_spec) + if "range" in key_spec["find_keys"]: + fp.write('"range",\n') # type + fp.write(f'{key_spec["find_keys"]["range"]["lastkey"]},\n') # + fp.write(f'{key_spec["find_keys"]["range"]["step"]},\n') # + fp.write(f'{key_spec["find_keys"]["range"]["limit"]},\n') # + fp.write('0,\n') + fp.write('0,\n') + fp.write('0,\n') + elif "keynum" in key_spec["find_keys"]: + fp.write('"keynum",\n') # type + fp.write('0,\n') + fp.write('0,\n') + fp.write('0,\n') + fp.write(f'{key_spec["find_keys"]["keynum"]["keynumidx"]},\n') # + fp.write(f'{key_spec["find_keys"]["keynum"]["firstkey"]},\n') # + fp.write(f'{key_spec["find_keys"]["keynum"]["step"]},\n') # + else: + raise Exception(key_spec) + fp.write('},\n') + fp.write('},\n') + fp.write('},\n') +fp.write('}\n') +fp.close() +os.system("go fmt table.go") diff --git a/tools/codis2pika/scripts/gen_table_json_from_commands.py b/tools/codis2pika/scripts/gen_table_json_from_commands.py new file mode 100644 index 0000000000..8893ca0ff7 --- /dev/null +++ b/tools/codis2pika/scripts/gen_table_json_from_commands.py @@ -0,0 +1,45 @@ +import json +import os + +commands_dir = "./commands" +files = os.listdir(commands_dir) +table = {} +container = set() +for file in files: + j = json.load(open(f"{commands_dir}/{file}")) + cmd_name = list(j.keys())[0] + j = j[cmd_name] + + print(cmd_name) + if cmd_name == "SORT" or cmd_name == "MIGRATE": + continue + if "command_flags" not in j: + print(f"{file} No command_flags.") + continue + + flags = j["command_flags"] + group = j["group"] + if (("WRITE" in flags or "MAY_REPLICATE" in flags) and "BLOCKING" not in flags) or cmd_name in ("PING", "SELECT"): + key_specs = [] + if "key_specs" in j: + for key_spec in j["key_specs"]: + begin_search = key_spec["begin_search"] + find_keys = key_spec["find_keys"] + key_specs.append({ + "begin_search": begin_search, + "find_keys": find_keys + }) + if "container" in j: + cmd_name = j["container"] + "-" + cmd_name + container.add(j["container"]) + print(f"group: {group}") + print(f"flags: {flags}") + if group not in table: + table[group] = {} + table[group][cmd_name] = key_specs + +with open("table.json", "w") as f: + json.dump({ + "table": table, + "container": list(container) + }, f, indent=4) diff --git a/tools/codis2pika/scripts/table.json b/tools/codis2pika/scripts/table.json new file mode 100644 index 0000000000..e1379d335d --- /dev/null +++ b/tools/codis2pika/scripts/table.json @@ -0,0 +1,1850 @@ +{ + "table": { + "string": { + "MSETNX": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 2, + "limit": 0 + } + } + } + ], + "GETEX": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "INCRBYFLOAT": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "MSET": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 2, + "limit": 0 + } + } + } + ], + "SET": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "DECRBY": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "INCRBY": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "SETEX": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "DECR": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "INCR": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "PSETEX": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "GETSET": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "SETRANGE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "APPEND": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "SETNX": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "GETDEL": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ] + }, + "set": { + "SDIFFSTORE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "SINTERSTORE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "SUNIONSTORE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "SPOP": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "SADD": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "SREM": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "SMOVE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ] + }, + "geo": { + "GEORADIUS": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "keyword": { + "keyword": "STORE", + "startfrom": 6 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "keyword": { + "keyword": "STOREDIST", + "startfrom": 6 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "GEOADD": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "GEOSEARCHSTORE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "GEORADIUSBYMEMBER": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "keyword": { + "keyword": "STORE", + "startfrom": 5 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "keyword": { + "keyword": "STOREDIST", + "startfrom": 5 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ] + }, + "sorted_set": { + "ZPOPMAX": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "ZREMRANGEBYSCORE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "ZRANGESTORE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "ZINTERSTORE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "ZPOPMIN": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "ZINCRBY": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "ZDIFFSTORE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "ZUNIONSTORE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "ZREMRANGEBYLEX": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "ZMPOP": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "ZADD": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "ZREMRANGEBYRANK": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "ZREM": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ] + }, + "list": { + "LMPOP": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "LSET": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "RPOPLPUSH": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "LTRIM": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "LPUSH": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "LINSERT": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "LREM": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "RPUSH": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "RPOP": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "LPOP": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "LMOVE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "RPUSHX": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "LPUSHX": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ] + }, + "scripting": { + "FUNCTION-FLUSH": [], + "FUNCTION-DELETE": [], + "FUNCTION-RESTORE": [], + "FUNCTION-LOAD": [], + "EVALSHA": [ + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "FCALL": [ + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ], + "EVAL": [ + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "keynum": { + "keynumidx": 0, + "firstkey": 1, + "step": 1 + } + } + } + ] + }, + "stream": { + "XCLAIM": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "XGROUP-DELCONSUMER": [ + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "XACK": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "XTRIM": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "XGROUP-CREATE": [ + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "XDEL": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "XAUTOCLAIM": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "XGROUP-DESTROY": [ + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "XADD": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "XSETID": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "XGROUP-SETID": [ + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "XGROUP-CREATECONSUMER": [ + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ] + }, + "generic": { + "RESTORE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "UNLINK": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "MOVE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "COPY": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "PERSIST": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "DEL": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "PEXPIREAT": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "RENAME": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "RENAMENX": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "PEXPIRE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "EXPIRE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "EXPIREAT": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ] + }, + "hyperloglog": { + "PFCOUNT": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "PFMERGE": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "PFADD": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "PFDEBUG": [ + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ] + }, + "server": { + "FLUSHDB": [], + "SWAPDB": [], + "FLUSHALL": [], + "RESTORE-ASKING": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ] + }, + "bitmap": { + "SETBIT": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "BITOP": [ + { + "begin_search": { + "index": { + "pos": 2 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + }, + { + "begin_search": { + "index": { + "pos": 3 + } + }, + "find_keys": { + "range": { + "lastkey": -1, + "step": 1, + "limit": 0 + } + } + } + ], + "BITFIELD": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ] + }, + "hash": { + "HMSET": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "HINCRBYFLOAT": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "HDEL": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "HSETNX": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "HSET": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "HINCRBY": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ] + }, + "pubsub": { + "SPUBLISH": [ + { + "begin_search": { + "index": { + "pos": 1 + } + }, + "find_keys": { + "range": { + "lastkey": 0, + "step": 1, + "limit": 0 + } + } + } + ], + "PUBLISH": [] + }, + "connection": { + "PING": [], + "SELECT": [] + } + }, + "container": [ + "XGROUP", + "FUNCTION" + ] +} \ No newline at end of file diff --git a/tools/codis2pika/test/.gitignore b/tools/codis2pika/test/.gitignore new file mode 100644 index 0000000000..a6888070d2 --- /dev/null +++ b/tools/codis2pika/test/.gitignore @@ -0,0 +1,3 @@ +.pytest_cache +__pycache__ +tmp diff --git a/tools/codis2pika/test/assets/empty.toml b/tools/codis2pika/test/assets/empty.toml new file mode 100644 index 0000000000..98517eff59 --- /dev/null +++ b/tools/codis2pika/test/assets/empty.toml @@ -0,0 +1,50 @@ +[source] +type = "sync" # sync or restore +address = "127.0.0.1:6379" +username = "" # keep empty if not using ACL +password = "" # keep empty if no authentication is required +tls = false +elasticache_psync = "" # using when source is ElastiCache. ref: https://github.com/alibaba/RedisShake/issues/373 + +[target] +type = "cluster" # standalone or cluster +# When the target is a cluster, write the address of one of the nodes. +# redis-shake will obtain other nodes through the `cluster nodes` command. +address = "127.0.0.1:30001" +username = "" # keep empty if not using ACL +password = "" # keep empty if no authentication is required +tls = false + +[advanced] +dir = "data" + +# runtime.GOMAXPROCS, 0 means use runtime.NumCPU() cpu cores +ncpu = 3 + +# pprof port, 0 means disable +pprof_port = 0 + +# log +log_file = "redis-shake.log" +log_level = "info" # debug, info or warn +log_interval = 5 # in seconds + +# redis-shake gets key and value from rdb file, and uses RESTORE command to +# create the key in target redis. Redis RESTORE will return a "Target key name +# is busy" error when key already exists. You can use this configuration item +# to change the default behavior of restore: +# panic: redis-shake will stop when meet "Target key name is busy" error. +# rewrite: redis-shake will replace the key with new value. +# ignore: redis-shake will skip restore the key when meet "Target key name is busy" error. +rdb_restore_command_behavior = "rewrite" # panic, rewrite or skip + +# pipeline +pipeline_count_limit = 1024 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default. This amount is normally 1gb. +target_redis_client_max_querybuf_len = 1024_000_000 + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. +target_redis_proto_max_bulk_len = 512_000_000 \ No newline at end of file diff --git a/tools/codis2pika/test/cases/__init__.py b/tools/codis2pika/test/cases/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tools/codis2pika/test/cases/auth.py b/tools/codis2pika/test/cases/auth.py new file mode 100644 index 0000000000..1194b7f4e7 --- /dev/null +++ b/tools/codis2pika/test/cases/auth.py @@ -0,0 +1,27 @@ +import time + +from utils import * + + +def main(): + r0 = Redis() + r0.client.config_set("requirepass", "password") + r0.client.execute_command("auth", "password") # for Redis 4.0 + r1 = Redis() + r1.client.config_set("requirepass", "password") + r1.client.execute_command("auth", "password") # for Redis 4.0 + + t = get_empty_config() + t["source"]["address"] = r0.get_address() + t["source"]["password"] = "password" + t["target"]["type"] = "standalone" + t["target"]["address"] = r1.get_address() + t["target"]["password"] = "password" + + rs = RedisShake() + rs.run(t) + + # wait sync need use http interface + r0.client.set("finished", "1") + time.sleep(2) + assert r1.client.get("finished") == b"1" diff --git a/tools/codis2pika/test/cases/auth_acl.py b/tools/codis2pika/test/cases/auth_acl.py new file mode 100644 index 0000000000..66438c7588 --- /dev/null +++ b/tools/codis2pika/test/cases/auth_acl.py @@ -0,0 +1,35 @@ +import time + +from utils import * + + +def main(): + r0 = Redis() + try: + r0.client.acl_list() + except Exception: + return + r0.client.execute_command("acl", "setuser", "user0", ">password0", "~*", "+@all") + r0.client.execute_command("acl", "setuser", "user0", "on") + r0.client.execute_command("auth", "user0", "password0") # for Redis 4.0 + r1 = Redis() + r1.client.execute_command("acl", "setuser", "user1", ">password1", "~*", "+@all") + r1.client.execute_command("acl", "setuser", "user1", "on") + r1.client.execute_command("auth", "user1", "password1") # for Redis 4.0 + + t = get_empty_config() + t["source"]["address"] = r0.get_address() + t["source"]["username"] = "user0" + t["source"]["password"] = "password0" + t["target"]["type"] = "standalone" + t["target"]["address"] = r1.get_address() + t["target"]["username"] = "user1" + t["target"]["password"] = "password1" + + rs = RedisShake() + rs.run(t) + + # wait sync need use http interface + r0.client.set("finished", "1") + time.sleep(2) + assert r1.client.get("finished") == b"1" diff --git a/tools/codis2pika/test/cases/cluster/__init__.py b/tools/codis2pika/test/cases/cluster/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tools/codis2pika/test/cases/cluster/sync.py b/tools/codis2pika/test/cases/cluster/sync.py new file mode 100644 index 0000000000..4bb57e5a62 --- /dev/null +++ b/tools/codis2pika/test/cases/cluster/sync.py @@ -0,0 +1,5 @@ +from utils import * + + +def main(): + pass diff --git a/tools/codis2pika/test/cases/example.py b/tools/codis2pika/test/cases/example.py new file mode 100644 index 0000000000..8cf3afaaf6 --- /dev/null +++ b/tools/codis2pika/test/cases/example.py @@ -0,0 +1,9 @@ +from utils import * + + +def main(): + assert True + + +if __name__ == '__main__': + pass diff --git a/tools/codis2pika/test/cases/issues/__init__.py b/tools/codis2pika/test/cases/issues/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tools/codis2pika/test/cases/types/__init__.py b/tools/codis2pika/test/cases/types/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tools/codis2pika/test/cases/types/type_hash.py b/tools/codis2pika/test/cases/types/type_hash.py new file mode 100644 index 0000000000..a60f604aa2 --- /dev/null +++ b/tools/codis2pika/test/cases/types/type_hash.py @@ -0,0 +1,25 @@ +import redis + +prefix = "hash" + + +def add_rdb_data(c: redis.Redis): + c.hset(f"{prefix}_rdb_k", "key0", "value0") + for i in range(10000): + c.hset(f"{prefix}_rdb_k_large", f"key{i}", f"value{i}") + + +def add_aof_data(c: redis.Redis): + c.hset(f"{prefix}_aof_k", "key0", "value0") + for i in range(10000): + c.hset(f"{prefix}_aof_k_large", f"key{i}", f"value{i}") + + +def check_data(c: redis.Redis): + assert c.hget(f"{prefix}_rdb_k", "key0") == b"value0" + assert c.hmget(f"{prefix}_rdb_k_large", *[f"key{i}" for i in range(10000)]) == [f"value{i}".encode() for i in + range(10000)] + + assert c.hget(f"{prefix}_aof_k", "key0") == b"value0" + assert c.hmget(f"{prefix}_aof_k_large", *[f"key{i}" for i in range(10000)]) == [f"value{i}".encode() for i in + range(10000)] diff --git a/tools/codis2pika/test/cases/types/type_list.py b/tools/codis2pika/test/cases/types/type_list.py new file mode 100644 index 0000000000..f823a958dc --- /dev/null +++ b/tools/codis2pika/test/cases/types/type_list.py @@ -0,0 +1,23 @@ +import redis + +prefix = "list" + +elements = [f"element_{i}" for i in range(10000)] + + +def add_rdb_data(c: redis.Redis): + c.rpush(f"{prefix}_rdb_k", 0, 1, 2, 3, 4, 5, 6, 7) + c.rpush(f"{prefix}_rdb_k0", *elements) + + +def add_aof_data(c: redis.Redis): + c.rpush(f"{prefix}_aof_k", 0, 1, 2, 3, 4, 5, 6, 7) + c.rpush(f"{prefix}_aof_k0", *elements) + + +def check_data(c: redis.Redis): + assert c.lrange(f"{prefix}_rdb_k", 0, -1) == [b"0", b"1", b"2", b"3", b"4", b"5", b"6", b"7"] + assert c.lrange(f"{prefix}_rdb_k0", 0, -1) == [f"element_{i}".encode() for i in range(10000)] + + assert c.lrange(f"{prefix}_aof_k", 0, -1) == [b"0", b"1", b"2", b"3", b"4", b"5", b"6", b"7"] + assert c.lrange(f"{prefix}_aof_k0", 0, -1) == [f"element_{i}".encode() for i in range(10000)] diff --git a/tools/codis2pika/test/cases/types/type_set.py b/tools/codis2pika/test/cases/types/type_set.py new file mode 100644 index 0000000000..288d95ab5f --- /dev/null +++ b/tools/codis2pika/test/cases/types/type_set.py @@ -0,0 +1,23 @@ +import redis + +prefix = "set" + + +def add_rdb_data(c: redis.Redis): + c.sadd(f"{prefix}_rdb_k", 0, 1, 2, 3, 4, 5, 6, 7) + elements = [f"element_{i}" for i in range(10000)] + c.sadd(f"{prefix}_rdb_k0", *elements) + + +def add_aof_data(c: redis.Redis): + c.sadd(f"{prefix}_aof_k", 0, 1, 2, 3, 4, 5, 6, 7) + elements = [f"element_{i}" for i in range(10000)] + c.sadd(f"{prefix}_aof_k0", *elements) + + +def check_data(c: redis.Redis): + assert c.smembers(f"{prefix}_rdb_k") == {b"0", b"1", b"2", b"3", b"4", b"5", b"6", b"7"} + assert c.smembers(f"{prefix}_rdb_k0") == {f"element_{i}".encode() for i in range(10000)} + + assert c.smembers(f"{prefix}_aof_k") == {b"0", b"1", b"2", b"3", b"4", b"5", b"6", b"7"} + assert c.smembers(f"{prefix}_aof_k0") == {f"element_{i}".encode() for i in range(10000)} diff --git a/tools/codis2pika/test/cases/types/type_stream.py b/tools/codis2pika/test/cases/types/type_stream.py new file mode 100644 index 0000000000..d1f207e554 --- /dev/null +++ b/tools/codis2pika/test/cases/types/type_stream.py @@ -0,0 +1,34 @@ +import redis + +prefix = "stream" + +fields = {f"field_{i}": f"value_{i}" for i in range(64)} +STREAM_LENGTH = 128 + + +def add_rdb_data(c: redis.Redis): + c.xadd(f"{prefix}_rdb_k", {"key0": "value0"}, "*") + for i in range(STREAM_LENGTH): + c.xadd(f"{prefix}_rdb_k_large", fields=fields, id="*") + + +def add_aof_data(c: redis.Redis): + c.xadd(f"{prefix}_aof_k", {"key0": "value0"}, "*") + for i in range(STREAM_LENGTH): + c.xadd(f"{prefix}_aof_k_large", fields=fields, id="*") + + +def check_data(c: redis.Redis): + ret = c.xread(streams={f"{prefix}_rdb_k": "0-0"}, count=1)[0][1] + assert ret[0][1] == {b"key0": b"value0"} + + ret = c.xread(streams={f"{prefix}_rdb_k_large": "0-0"}, count=STREAM_LENGTH)[0][1] + for i in range(STREAM_LENGTH): + assert ret[i][1] == {k.encode(): v.encode() for k, v in fields.items()} + + ret = c.xread(streams={f"{prefix}_aof_k": "0-0"}, count=1)[0][1] + assert ret[0][1] == {b"key0": b"value0"} + + ret = c.xread(streams={f"{prefix}_aof_k_large": "0-0"}, count=STREAM_LENGTH)[0][1] + for i in range(STREAM_LENGTH): + assert ret[i][1] == {k.encode(): v.encode() for k, v in fields.items()} diff --git a/tools/codis2pika/test/cases/types/type_string.py b/tools/codis2pika/test/cases/types/type_string.py new file mode 100644 index 0000000000..c3c00b15a3 --- /dev/null +++ b/tools/codis2pika/test/cases/types/type_string.py @@ -0,0 +1,29 @@ +import redis + +prefix = "string" + + +def add_rdb_data(c: redis.Redis): + c.set(f"{prefix}_rdb_k", "v") + c.set(f"{prefix}_rdb_int", 0) + c.set(f"{prefix}_rdb_int0", -1) + c.set(f"{prefix}_rdb_int1", 123456789) + + +def add_aof_data(c: redis.Redis): + c.set(f"{prefix}_aof_k", "v") + c.set(f"{prefix}_aof_int", 0) + c.set(f"{prefix}_aof_int0", -1) + c.set(f"{prefix}_aof_int1", 123456789) + + +def check_data(c: redis.Redis): + assert c.get(f"{prefix}_rdb_k") == b"v" + assert c.get(f"{prefix}_rdb_int") == b'0' + assert c.get(f"{prefix}_rdb_int0") == b'-1' + assert c.get(f"{prefix}_rdb_int1") == b'123456789' + + assert c.get(f"{prefix}_aof_k") == b"v" + assert c.get(f"{prefix}_aof_int") == b'0' + assert c.get(f"{prefix}_aof_int0") == b'-1' + assert c.get(f"{prefix}_aof_int1") == b'123456789' diff --git a/tools/codis2pika/test/cases/types/type_zset.py b/tools/codis2pika/test/cases/types/type_zset.py new file mode 100644 index 0000000000..6fba041097 --- /dev/null +++ b/tools/codis2pika/test/cases/types/type_zset.py @@ -0,0 +1,27 @@ +import redis + +prefix = "zset" +float_maps = {"a": 1.1111, "b": 2.2222, "c": 3.3333} +maps = {str(item): item for item in range(10000)} + + +def add_rdb_data(c: redis.Redis): + c.zadd(f"{prefix}_rdb_k_float", float_maps) + c.zadd(f"{prefix}_rdb_k", maps) + + +def add_aof_data(c: redis.Redis): + c.zadd(f"{prefix}_aof_k_float", float_maps) + c.zadd(f"{prefix}_aof_k", maps) + + +def check_data(c: redis.Redis): + for k, v in c.zrange(f"{prefix}_rdb_k_float", 0, -1, withscores=True): + assert float_maps[k.decode()] == v + for k, v in c.zrange(f"{prefix}_rdb_k", 0, -1, withscores=True): + assert maps[k.decode()] == v + + for k, v in c.zrange(f"{prefix}_aof_k_float", 0, -1, withscores=True): + assert float_maps[k.decode()] == v + for k, v in c.zrange(f"{prefix}_aof_k", 0, -1, withscores=True): + assert maps[k.decode()] == v diff --git a/tools/codis2pika/test/cases/types/types.py b/tools/codis2pika/test/cases/types/types.py new file mode 100644 index 0000000000..743ce3c5a2 --- /dev/null +++ b/tools/codis2pika/test/cases/types/types.py @@ -0,0 +1,62 @@ +import time + +import jury +from utils import * + +from . import type_string, type_list, type_set, type_hash, type_zset, type_stream + + +def main(): + rs = RedisShake() + r0 = Redis() + r1 = Redis() + t = get_empty_config() + t["source"]["address"] = r0.get_address() + t["target"]["type"] = "standalone" + t["target"]["address"] = r1.get_address() + + timer = jury.Timer() + type_string.add_rdb_data(r0.client) + type_list.add_rdb_data(r0.client) + type_set.add_rdb_data(r0.client) + type_hash.add_rdb_data(r0.client) + type_zset.add_rdb_data(r0.client) + type_stream.add_rdb_data(r0.client) + jury.log(f"add_rdb_data: {timer.elapsed_time()}s") + + # run redis-shake + rs.run(t) + time.sleep(1) + + timer = jury.Timer() + type_string.add_aof_data(r0.client) + type_list.add_aof_data(r0.client) + type_set.add_aof_data(r0.client) + type_hash.add_aof_data(r0.client) + type_zset.add_aof_data(r0.client) + type_stream.add_aof_data(r0.client) + jury.log(f"add_aof_data: {timer.elapsed_time()}s") + + # wait sync need use http interface + timer = jury.Timer() + r0.client.set("finished", "1") + cnt = 0 + while r1.client.get("finished") != b"1": + time.sleep(0.5) + cnt += 1 + if cnt > 20: + raise Exception("sync timeout") + jury.log(f"sync time: {timer.elapsed_time()}s") + + timer = jury.Timer() + type_string.check_data(r1.client) + type_list.check_data(r1.client) + type_set.check_data(r1.client) + type_hash.check_data(r1.client) + type_zset.check_data(r1.client) + type_stream.check_data(r1.client) + jury.log(f"check_data: {timer.elapsed_time()}s") + + +if __name__ == '__main__': + main() diff --git a/tools/codis2pika/test/cases/types/types_rewrite.py b/tools/codis2pika/test/cases/types/types_rewrite.py new file mode 100644 index 0000000000..8ffc8ec42e --- /dev/null +++ b/tools/codis2pika/test/cases/types/types_rewrite.py @@ -0,0 +1,63 @@ +import time + +import jury +from utils import * + +from . import type_string, type_list, type_set, type_hash, type_zset, type_stream + + +def main(): + rs = RedisShake() + r0 = Redis() + r1 = Redis() + t = get_empty_config() + t["advanced"]["target_redis_proto_max_bulk_len"] = 0 + t["source"]["address"] = r0.get_address() + t["target"]["type"] = "standalone" + t["target"]["address"] = r1.get_address() + + timer = jury.Timer() + type_string.add_rdb_data(r0.client) + type_list.add_rdb_data(r0.client) + type_set.add_rdb_data(r0.client) + type_hash.add_rdb_data(r0.client) + type_zset.add_rdb_data(r0.client) + type_stream.add_rdb_data(r0.client) + jury.log(f"add_rdb_data: {timer.elapsed_time()}s") + + # run redis-shake + rs.run(t) + time.sleep(1) + + timer = jury.Timer() + type_string.add_aof_data(r0.client) + type_list.add_aof_data(r0.client) + type_set.add_aof_data(r0.client) + type_hash.add_aof_data(r0.client) + type_zset.add_aof_data(r0.client) + type_stream.add_aof_data(r0.client) + jury.log(f"add_aof_data: {timer.elapsed_time()}s") + + # wait sync need use http interface + timer = jury.Timer() + r0.client.set("finished", "1") + cnt = 0 + while r1.client.get("finished") != b"1": + time.sleep(0.5) + cnt += 1 + if cnt > 20: + raise Exception("sync timeout") + jury.log(f"sync time: {timer.elapsed_time()}s") + + timer = jury.Timer() + type_string.check_data(r1.client) + type_list.check_data(r1.client) + type_set.check_data(r1.client) + type_hash.check_data(r1.client) + type_zset.check_data(r1.client) + type_stream.check_data(r1.client) + jury.log(f"check_data: {timer.elapsed_time()}s") + + +if __name__ == '__main__': + main() diff --git a/tools/codis2pika/test/main.py b/tools/codis2pika/test/main.py new file mode 100644 index 0000000000..95da2c8a9f --- /dev/null +++ b/tools/codis2pika/test/main.py @@ -0,0 +1,19 @@ +import jury + +cases = [ + "cases/example", + "cases/types/types", + "cases/types/types_rewrite", + "cases/cluster/sync", + "cases/auth", + "cases/auth_acl", +] + + +def main(): + j = jury.Jury(cases) + j.run() + + +if __name__ == '__main__': + main() diff --git a/tools/codis2pika/test/requirements.txt b/tools/codis2pika/test/requirements.txt new file mode 100644 index 0000000000..3f5f6e19c0 --- /dev/null +++ b/tools/codis2pika/test/requirements.txt @@ -0,0 +1,3 @@ +jury-test==0.0.3 +redis==4.4.4 +toml==0.10.2 diff --git a/tools/codis2pika/test/test.sh b/tools/codis2pika/test/test.sh new file mode 100755 index 0000000000..ac1b9a9627 --- /dev/null +++ b/tools/codis2pika/test/test.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +go test ../... -v + +cd test +python main.py \ No newline at end of file diff --git a/tools/codis2pika/test/utils/__init__.py b/tools/codis2pika/test/utils/__init__.py new file mode 100644 index 0000000000..4eba8f9782 --- /dev/null +++ b/tools/codis2pika/test/utils/__init__.py @@ -0,0 +1,3 @@ +from .redis_ import Redis +from .cluster_ import Cluster +from .redis_shake import get_empty_config, RedisShake diff --git a/tools/codis2pika/test/utils/cluster_.py b/tools/codis2pika/test/utils/cluster_.py new file mode 100644 index 0000000000..5b8a956eb2 --- /dev/null +++ b/tools/codis2pika/test/utils/cluster_.py @@ -0,0 +1,18 @@ +import os + +from .redis_ import Redis +import jury + + +class Cluster: + def __init__(self, num=3): + self.nodes = [] + self.num = num + for i in range(num): + self.nodes.append(Redis(args=["--cluster-enabled", "yes"])) + host_port_list = [f"{node.host}:{node.port}" for node in self.nodes] + jury.log_yellow(f"Redis cluster created, {self.num} nodes. {host_port_list}") + os.system("redis-cli --cluster-yes --cluster create " + " ".join(host_port_list)) + + def push_table(self): + pass diff --git a/tools/codis2pika/test/utils/constant.py b/tools/codis2pika/test/utils/constant.py new file mode 100644 index 0000000000..9598674bde --- /dev/null +++ b/tools/codis2pika/test/utils/constant.py @@ -0,0 +1,6 @@ +from pathlib import Path + +BASE_PATH = f"{Path(__file__).parent.parent.parent.absolute()}" # project path + +PATH_REDIS_SHAKE = f"{BASE_PATH}/bin/redis-shake" +PATH_EMPTY_CONFIG_FILE = f"{BASE_PATH}/test/assets/empty.toml" diff --git a/tools/codis2pika/test/utils/redis_.py b/tools/codis2pika/test/utils/redis_.py new file mode 100644 index 0000000000..2ac5ed92a1 --- /dev/null +++ b/tools/codis2pika/test/utils/redis_.py @@ -0,0 +1,42 @@ +import time + +import jury +import redis + + +class Redis: + def __init__(self, args=None): + self.args = args + if args is None: + self.args = [] + + self.redis = None + self.host = "127.0.0.1" + self.port = jury.get_free_port() + self.dir = f"{jury.get_case_dir()}/redis_{self.port}" + self.server = jury.Launcher(args=["redis-server", "--port", str(self.port)] + self.args, work_dir=self.dir) + self.__wait_start() + jury.log_yellow(f"Redis started(pid={self.server.get_pid()}). redis-cli -p {self.port}") + + def get_address(self): + return f"127.0.0.1:{self.port}" + + def __wait_start(self, timeout=5): + timer = jury.Timer() + while True: + try: + self.__create_client() + self.client.ping() + break + except redis.exceptions.ConnectionError: + time.sleep(0.01) + except redis.exceptions.ResponseError as e: + if str(e) == "LOADING Redis is loading the dataset in memory": + time.sleep(0.01) + else: + raise e + if timer.elapsed_time() > timeout: + raise Exception(f"tair start timeout, {self.dir}") + + def __create_client(self): + self.client = redis.Redis(port=self.port, single_connection_client=True) diff --git a/tools/codis2pika/test/utils/redis_shake.py b/tools/codis2pika/test/utils/redis_shake.py new file mode 100644 index 0000000000..5d7a581cfb --- /dev/null +++ b/tools/codis2pika/test/utils/redis_shake.py @@ -0,0 +1,26 @@ +import os +from pathlib import Path + +import jury +import toml + +from .constant import PATH_REDIS_SHAKE, PATH_EMPTY_CONFIG_FILE + + +def get_empty_config(): + with open(PATH_EMPTY_CONFIG_FILE, "r") as f: + return toml.load(f) + + +class RedisShake: + def __init__(self): + self.server = None + self.redis = None + self.dir = f"{jury.get_case_dir()}/redis_shake" + if not os.path.exists(self.dir): + Path(self.dir).mkdir(parents=True, exist_ok=True) + + def run(self, toml_config): + with open(f"{self.dir}/redis-shake.toml", "w") as f: + toml.dump(toml_config, f) + self.server = jury.Launcher(args=[PATH_REDIS_SHAKE, "redis-shake.toml"], work_dir=self.dir) diff --git a/tools/manifest_generator/include/pika_binlog.h b/tools/manifest_generator/include/pika_binlog.h index 29c023d26c..db8dfcf73c 100644 --- a/tools/manifest_generator/include/pika_binlog.h +++ b/tools/manifest_generator/include/pika_binlog.h @@ -19,7 +19,7 @@ std::string NewFileName(const std::string& name, const uint32_t current); class Version { public: - Version(pstd::RWFile* save); + Version(std::shared_ptr save); ~Version(); Status Init(); @@ -39,7 +39,7 @@ class Version { } private: - pstd::RWFile* save_; + std::shared_ptr save_; // No copying allowed; Version(const Version&); @@ -65,7 +65,7 @@ class Binlog { static Status AppendBlank(pstd::WritableFile* file, uint64_t len); - pstd::WritableFile* queue() { return queue_; } + // pstd::WritableFile* queue() { return queue_; } uint64_t file_size() { return file_size_; } @@ -83,9 +83,9 @@ class Binlog { uint32_t consumer_num_; uint64_t item_num_; - Version* version_; - pstd::WritableFile* queue_; - pstd::RWFile* versionfile_; + std::unique_ptr version_; + std::unique_ptr queue_; + std::shared_ptr versionfile_; pstd::Mutex mutex_; diff --git a/tools/manifest_generator/pika_binlog.cc b/tools/manifest_generator/pika_binlog.cc index f0442467c2..b5b87e2a62 100644 --- a/tools/manifest_generator/pika_binlog.cc +++ b/tools/manifest_generator/pika_binlog.cc @@ -19,7 +19,7 @@ std::string NewFileName(const std::string& name, const uint32_t current) { /* * Version */ -Version::Version(pstd::RWFile* save) : pro_num_(0), pro_offset_(0), logic_id_(0), save_(save) { assert(save_); } +Version::Version(std::shared_ptr save) : pro_num_(0), pro_offset_(0), logic_id_(0), save_(save) { assert(save_); } Version::~Version() { StableSave(); } @@ -75,26 +75,29 @@ Binlog::Binlog(std::string binlog_path, const int file_size) std::cout << "Binlog: Manifest file not exist, we create a new one."; profile = NewFileName(filename, pro_num_); - s = pstd::NewWritableFile(profile, &queue_); + s = pstd::NewWritableFile(profile, queue_); if (!s.ok()) { std::cout << "Binlog: new " << filename << " " << s.ToString(); exit(-1); } - - s = pstd::NewRWFile(manifest, &versionfile_); + std::unique_ptr tmp_file; + s = pstd::NewRWFile(manifest, tmp_file); + versionfile_.reset(tmp_file.release()); if (!s.ok()) { std::cout << "Binlog: new versionfile error " << s.ToString(); exit(-1); } - version_ = new Version(versionfile_); + version_ = std::make_unique(versionfile_); version_->StableSave(); } else { std::cout << "Binlog: Find the exist file."; - s = pstd::NewRWFile(manifest, &versionfile_); + std::unique_ptr tmp_file; + s = pstd::NewRWFile(manifest, tmp_file); + versionfile_.reset(tmp_file.release()); if (s.ok()) { - version_ = new Version(versionfile_); + version_ = std::make_unique(versionfile_); version_->Init(); pro_num_ = version_->pro_num_; @@ -106,7 +109,7 @@ Binlog::Binlog(std::string binlog_path, const int file_size) } profile = NewFileName(filename, pro_num_); - s = pstd::AppendWritableFile(profile, &queue_, version_->pro_offset_); + s = pstd::AppendWritableFile(profile, queue_, version_->pro_offset_); if (!s.ok()) { std::cout << "Binlog: Open file " << profile << " error " << s.ToString(); exit(-1); @@ -116,12 +119,7 @@ Binlog::Binlog(std::string binlog_path, const int file_size) InitLogFile(); } -Binlog::~Binlog() { - delete version_; - delete versionfile_; - - delete queue_; -} +Binlog::~Binlog() {} void Binlog::InitLogFile() { assert(queue_ != nullptr); @@ -152,12 +150,12 @@ Status Binlog::Put(const char* item, int len) { /* Check to roll log file */ uint64_t filesize = queue_->Filesize(); if (filesize > file_size_) { - delete queue_; + queue_.reset(); queue_ = nullptr; pro_num_++; std::string profile = NewFileName(filename, pro_num_); - pstd::NewWritableFile(profile, &queue_); + pstd::NewWritableFile(profile, queue_); { std::lock_guard l(version_->rwlock_); @@ -310,7 +308,7 @@ Status Binlog::SetProducerStatus(uint32_t pro_num, uint64_t pro_offset) { pro_offset = 0; } - delete queue_; + queue_.reset(); std::string init_profile = NewFileName(filename, 0); if (pstd::FileExists(init_profile)) { @@ -322,8 +320,8 @@ Status Binlog::SetProducerStatus(uint32_t pro_num, uint64_t pro_offset) { pstd::DeleteFile(profile); } - pstd::NewWritableFile(profile, &queue_); - Binlog::AppendBlank(queue_, pro_offset); + pstd::NewWritableFile(profile, queue_); + Binlog::AppendBlank(queue_.get(), pro_offset); pro_num_ = pro_num; diff --git a/tools/pika-port/pika_port_3/migrator_thread.cc b/tools/pika-port/pika_port_3/migrator_thread.cc index 8ce19c39a9..f1bb206473 100644 --- a/tools/pika-port/pika_port_3/migrator_thread.cc +++ b/tools/pika-port/pika_port_3/migrator_thread.cc @@ -21,7 +21,7 @@ const int64_t MAX_BATCH_NUM = 30000; MigratorThread::~MigratorThread() = default; void MigratorThread::MigrateStringsDB() { - auto* db = static_cast(db_); + auto db = static_cast(db_); rocksdb::ReadOptions iterator_options; const rocksdb::Snapshot* snapshot; @@ -76,7 +76,7 @@ void MigratorThread::MigrateStringsDB() { } void MigratorThread::MigrateListsDB() { - auto* db = static_cast(db_); + auto db = static_cast(db_); std::string start_key; std::string next_key; @@ -159,7 +159,7 @@ void MigratorThread::MigrateListsDB() { } void MigratorThread::MigrateHashesDB() { - auto* db = static_cast(db_); + auto db = static_cast(db_); std::string start_key; std::string next_key; @@ -230,7 +230,7 @@ void MigratorThread::MigrateHashesDB() { } void MigratorThread::MigrateSetsDB() { - auto* db = static_cast(db_); + auto db = static_cast(db_); std::string start_key; std::string next_key; @@ -298,7 +298,7 @@ void MigratorThread::MigrateSetsDB() { } void MigratorThread::MigrateZsetsDB() { - auto* db = static_cast(db_); + auto db = static_cast(db_); std::string start_key; std::string next_key; diff --git a/tools/pika-port/pika_port_3/pika_binlog.cc b/tools/pika-port/pika_port_3/pika_binlog.cc index b7f237766e..77a078401d 100644 --- a/tools/pika-port/pika_port_3/pika_binlog.cc +++ b/tools/pika-port/pika_port_3/pika_binlog.cc @@ -26,7 +26,7 @@ std::string NewFileName(const std::string& name, const uint32_t current) { /* * Version */ -Version::Version(pstd::RWFile* save) : pro_num_(0), pro_offset_(0), logic_id_(0), save_(save) { assert(save_); } +Version::Version(std::shared_ptr save) : pro_num_(0), pro_offset_(0), logic_id_(0), save_(save) { assert(save_); } Version::~Version() { StableSave(); } @@ -88,24 +88,28 @@ Binlog::Binlog(std::string binlog_path, const int file_size) LOG(INFO) << "Binlog: Manifest file not exist, we create a new one."; profile = NewFileName(filename, pro_num_); - s = pstd::NewWritableFile(profile, &queue_); + s = pstd::NewWritableFile(profile, queue_); if (!s.ok()) { LOG(FATAL) << "Binlog: NewWritableFile(" << filename << ") = " << s.ToString(); } - s = pstd::NewRWFile(manifest, &versionfile_); + std::unique_ptr tmp_file; + s = pstd::NewRWFile(manifest, tmp_file); + versionfile_.reset(tmp_file.release()); if (!s.ok()) { LOG(FATAL) << "Binlog: new versionfile error " << s.ToString(); } - version_ = new Version(versionfile_); + version_ = std::make_unique(versionfile_); version_->StableSave(); } else { LOG(INFO) << "Binlog: Find the exist file."; - s = pstd::NewRWFile(manifest, &versionfile_); + std::unique_ptr tmp_file; + s = pstd::NewRWFile(manifest, tmp_file); + versionfile_.reset(tmp_file.release()); if (s.ok()) { - version_ = new Version(versionfile_); + version_ = std::make_unique(versionfile_); version_->Init(); pro_num_ = version_->pro_num_; @@ -117,7 +121,7 @@ Binlog::Binlog(std::string binlog_path, const int file_size) profile = NewFileName(filename, pro_num_); LOG(INFO) << "Binlog: open profile " << profile; - s = pstd::AppendWritableFile(profile, &queue_, version_->pro_offset_); + s = pstd::AppendWritableFile(profile, queue_, version_->pro_offset_); if (!s.ok()) { LOG(FATAL) << "Binlog: Open file " << profile << " error " << s.ToString(); } @@ -129,12 +133,7 @@ Binlog::Binlog(std::string binlog_path, const int file_size) InitLogFile(); } -Binlog::~Binlog() { - delete version_; - delete versionfile_; - - delete queue_; -} +Binlog::~Binlog() {} void Binlog::InitLogFile() { assert(queue_); @@ -165,12 +164,12 @@ Status Binlog::Put(const char* item, int len) { /* Check to roll log file */ uint64_t filesize = queue_->Filesize(); if (filesize > file_size_) { - delete queue_; + queue_.reset(); queue_ = nullptr; pro_num_++; std::string profile = NewFileName(filename, pro_num_); - pstd::NewWritableFile(profile, &queue_); + pstd::NewWritableFile(profile, queue_); { std::lock_guard l(version_->rwlock_); @@ -323,7 +322,7 @@ Status Binlog::SetProducerStatus(uint32_t pro_num, uint64_t pro_offset) { pro_offset = 0; } - delete queue_; + queue_.reset(); std::string init_profile = NewFileName(filename, 0); if (pstd::FileExists(init_profile)) { @@ -335,8 +334,8 @@ Status Binlog::SetProducerStatus(uint32_t pro_num, uint64_t pro_offset) { pstd::DeleteFile(profile); } - pstd::NewWritableFile(profile, &queue_); - Binlog::AppendBlank(queue_, pro_offset); + pstd::NewWritableFile(profile, queue_); + Binlog::AppendBlank(queue_.get(), pro_offset); pro_num_ = pro_num; diff --git a/tools/pika-port/pika_port_3/pika_binlog.h b/tools/pika-port/pika_port_3/pika_binlog.h index 35055eaa38..a6e5910d71 100644 --- a/tools/pika-port/pika_port_3/pika_binlog.h +++ b/tools/pika-port/pika_port_3/pika_binlog.h @@ -45,7 +45,7 @@ class Binlog { static Status AppendBlank(pstd::WritableFile* file, uint64_t len); - pstd::WritableFile* queue() { return queue_; } + // pstd::WritableFile* queue() { return queue_; } uint64_t file_size() { return file_size_; } @@ -63,9 +63,9 @@ class Binlog { uint32_t consumer_num_; uint64_t item_num_; - Version* version_; - pstd::WritableFile* queue_; - pstd::RWFile* versionfile_; + std::unique_ptr version_; + std::unique_ptr queue_; + std::shared_ptr versionfile_; pstd::Mutex mutex_; @@ -86,7 +86,7 @@ class Binlog { class Version { public: - Version(pstd::RWFile* save); + Version(std::shared_ptr save); ~Version(); Status Init(); @@ -106,7 +106,7 @@ class Version { } private: - pstd::RWFile* save_; + std::shared_ptr save_; // No copying allowed; Version(const Version&); diff --git a/tools/pika-port/pika_port_3/pika_command.h b/tools/pika-port/pika_port_3/pika_command.h index dcb6ffc789..1a54aa9417 100644 --- a/tools/pika-port/pika_port_3/pika_command.h +++ b/tools/pika-port/pika_port_3/pika_command.h @@ -426,7 +426,7 @@ class Cmd { Cmd& operator=(const Cmd&); }; -typedef std::unordered_map CmdTable; +typedef std::unordered_map> CmdTable; // Method for CmdInfo Table void InitCmdInfoTable(); @@ -436,7 +436,6 @@ void DestoryCmdInfoTable(); // Method for Cmd Table void InitCmdTable(CmdTable* cmd_table); Cmd* GetCmdFromTable(const std::string& opt, const CmdTable& cmd_table); -void DestoryCmdTable(CmdTable* cmd_table); void RedisAppendContent(std::string& str, const std::string& value) { str.append(value.data(), value.size()); diff --git a/tools/pika-port/pika_port_3/pika_sender.cc b/tools/pika-port/pika_port_3/pika_sender.cc index 790b840051..d36568cf4e 100644 --- a/tools/pika-port/pika_port_3/pika_sender.cc +++ b/tools/pika-port/pika_port_3/pika_sender.cc @@ -7,7 +7,7 @@ #include "pstd/include/xdebug.h" PikaSender::PikaSender(std::string ip, int64_t port, std::string password) - : cli_(nullptr), ip_(std::move(std::move(ip))), port_(port), password_(std::move(std::move(password))), should_exit_(false), cnt_(0), elements_(0) {} + : cli_(nullptr), ip_(std::move(ip)), port_(port), password_(std::move(password)), should_exit_(false), cnt_(0), elements_(0) {} PikaSender::~PikaSender() = default; @@ -22,7 +22,7 @@ void PikaSender::Stop() { } void PikaSender::ConnectRedis() { - while (cli_ == nullptr) { + while (!cli_) { // Connect to redis cli_ = net::NewRedisCli(); cli_->set_connect_timeout(1000); @@ -131,7 +131,7 @@ void PikaSender::SendCommand(std::string& command, const std::string& key) { void* PikaSender::ThreadMain() { LOG(INFO) << "Start sender thread..."; - if (cli_ == nullptr) { + if (!cli_) { ConnectRedis(); } diff --git a/tools/pika-port/pika_port_3/redis_sender.cc b/tools/pika-port/pika_port_3/redis_sender.cc index ac7aec339f..204a3d2362 100644 --- a/tools/pika-port/pika_port_3/redis_sender.cc +++ b/tools/pika-port/pika_port_3/redis_sender.cc @@ -13,14 +13,14 @@ static time_t kCheckDiff = 1; RedisSender::RedisSender(int id, std::string ip, int64_t port, std::string password) - : id_(id), cli_(nullptr), ip_(std::move(std::move(ip))), port_(port), password_(std::move(std::move(password))), should_exit_(false), cnt_(0), elements_(0) { + : id_(id), cli_(nullptr), ip_(std::move(ip)), port_(port), password_(std::move(password)), should_exit_(false), cnt_(0), elements_(0) { last_write_time_ = ::time(nullptr); } RedisSender::~RedisSender() { LOG(INFO) << "RedisSender thread " << id_ << " exit!!!"; } void RedisSender::ConnectRedis() { - while (cli_ == nullptr) { + while (!cli_) { // Connect to redis cli_ = net::NewRedisCli(); cli_->set_connect_timeout(1000); diff --git a/tools/pika_exporter/README.md b/tools/pika_exporter/README.md index f327517147..7261f79f27 100644 --- a/tools/pika_exporter/README.md +++ b/tools/pika_exporter/README.md @@ -10,8 +10,8 @@ Pika-Exporter is based on [Redis-Exporter](https://github.com/oliver006/redis_ex To start using `pika_exporter`, install `Go` and run go get ``` -$ go get github.com/OpenAtomFoundation/pika/pika-tools/pika_exporter -$ cd $GOPATH/src/github.com/OpenAtomFoundation/pika/pika-tools/pika_exporter +$ go get github.com/OpenAtomFoundation/pika/tools/pika_exporter +$ cd $GOPATH/src/github.com/OpenAtomFoundation/pika/tools/pika_exporter $ make $ ./bin/pika_exporter ``` @@ -38,7 +38,7 @@ scrape_configs: ## Flags ## | Name | Environment Variables | Default | Description | Example | |----------------------|------------------------------------|----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| -| pika.host-file | PIKA_HOST_FILE | | Path to file containing one or more pika nodes, separated by newline. NOTE: mutually exclusive with pika.addr.Each line can optionally be comma-separated with the fields ``,``,``. See [here](https://github.com/OpenAtomFoundation/pika/pika-tools/pika_exporter/raw/master/contrib/sample_pika_hosts_file.txt) for an example file. | --pika.host-file ./pika_hosts_file.txt | +| pika.host-file | PIKA_HOST_FILE | | Path to file containing one or more pika nodes, separated by newline. NOTE: mutually exclusive with pika.addr.Each line can optionally be comma-separated with the fields ``,``,``. See [here](https://github.com/OpenAtomFoundation/pika/tools/pika_exporter/raw/master/contrib/sample_pika_hosts_file.txt) for an example file. | --pika.host-file ./pika_hosts_file.txt | | pika.addr | PIKA_ADDR | | Address of one or more pika nodes, separated by comma. | --pika.addr 192.168.1.2:9221,192.168.1.3:9221 | | pika.password | PIKA_PASSWORD | | Password for one or more pika nodes, separated by comma. | --pika.password 123.com,123.com | | pika.alias | PIKA_ALIAS | | Pika instance alias for one or more pika nodes, separated by comma. | --pika.alias a,b | diff --git a/tools/pika_exporter/exporter/parser_test.go b/tools/pika_exporter/exporter/parser_test.go index 71a818358c..f373c0fa23 100644 --- a/tools/pika_exporter/exporter/parser_test.go +++ b/tools/pika_exporter/exporter/parser_test.go @@ -3,8 +3,8 @@ package exporter import ( "fmt" "github.com/Masterminds/semver" - "github.com/OpenAtomFoundation/pika/pika-tools/pika_exporter/exporter/metrics" - "github.com/OpenAtomFoundation/pika/pika-tools/pika_exporter/exporter/test" + "github.com/OpenAtomFoundation/pika/tools/pika_exporter/exporter/metrics" + "github.com/OpenAtomFoundation/pika/tools/pika_exporter/exporter/test" "github.com/stretchr/testify/assert" "testing" ) diff --git a/tools/pika_exporter/exporter/pika.go b/tools/pika_exporter/exporter/pika.go index 9283acd161..fe9ae11821 100644 --- a/tools/pika_exporter/exporter/pika.go +++ b/tools/pika_exporter/exporter/pika.go @@ -8,9 +8,9 @@ import ( "sync" "time" - "github.com/OpenAtomFoundation/pika/pika-tools/pika_exporter/discovery" + "github.com/OpenAtomFoundation/pika/tools/pika_exporter/discovery" - "github.com/OpenAtomFoundation/pika/pika-tools/pika_exporter/exporter/metrics" + "github.com/OpenAtomFoundation/pika/tools/pika_exporter/exporter/metrics" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" ) diff --git a/tools/pika_exporter/exporter/pika_test.go b/tools/pika_exporter/exporter/pika_test.go index 2d48129ba0..3d41d2a8b9 100644 --- a/tools/pika_exporter/exporter/pika_test.go +++ b/tools/pika_exporter/exporter/pika_test.go @@ -1,7 +1,7 @@ package exporter import ( - "github.com/OpenAtomFoundation/pika/pika-tools/pika_exporter/discovery" + "github.com/OpenAtomFoundation/pika/tools/pika_exporter/discovery" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" "testing" diff --git a/tools/pika_exporter/go.mod b/tools/pika_exporter/go.mod index a53c9c0ae1..0059c93fdb 100644 --- a/tools/pika_exporter/go.mod +++ b/tools/pika_exporter/go.mod @@ -1,4 +1,4 @@ -module github.com/OpenAtomFoundation/pika/pika-tools/pika_exporter +module github.com/OpenAtomFoundation/pika/tools/pika_exporter go 1.19 diff --git a/tools/pika_exporter/main.go b/tools/pika_exporter/main.go index c6570560a1..14f06a9a17 100644 --- a/tools/pika_exporter/main.go +++ b/tools/pika_exporter/main.go @@ -6,8 +6,8 @@ import ( "os" "strconv" - "github.com/OpenAtomFoundation/pika/pika-tools/pika_exporter/discovery" - "github.com/OpenAtomFoundation/pika/pika-tools/pika_exporter/exporter" + "github.com/OpenAtomFoundation/pika/tools/pika_exporter/discovery" + "github.com/OpenAtomFoundation/pika/tools/pika_exporter/exporter" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" diff --git a/tools/pika_to_txt/pika_to_txt.cc b/tools/pika_to_txt/pika_to_txt.cc index 0343c14c5f..8f85488cdf 100644 --- a/tools/pika_to_txt/pika_to_txt.cc +++ b/tools/pika_to_txt/pika_to_txt.cc @@ -64,7 +64,7 @@ int main(int argc, char** argv) { bw_option.options.create_if_missing = true; bw_option.options.write_buffer_size = 256 * 1024 * 1024; // 256M bw_option.options.target_file_size_base = 20 * 1024 * 1024; // 20M - auto* storage_db = new storage::Storage(); + auto storage_db = new storage::Storage(); if (storage_db != nullptr && (status = storage_db->Open(bw_option, storage_db_path)).ok()) { std::cout << "Open Storage db success..." << std::endl; } else { @@ -74,9 +74,9 @@ int main(int argc, char** argv) { std::cout << "Start migrating data from Blackwidow db to " << target_file << "..." << std::endl; - auto* write_thread = new WriteThread(target_file); - auto* scan_thread = new ScanThread(write_thread, storage_db); - auto* progress_thread = new ProgressThread(scan_thread); + auto write_thread = new WriteThread(target_file); + auto scan_thread = new ScanThread(write_thread, storage_db); + auto progress_thread = new ProgressThread(scan_thread); write_thread->StartThread(); // wait for write thread open file success diff --git a/tools/rdb_to_pika/protocoltopika.cc b/tools/rdb_to_pika/protocoltopika.cc index 7f6f3b1b6e..f8a4f36264 100644 --- a/tools/rdb_to_pika/protocoltopika.cc +++ b/tools/rdb_to_pika/protocoltopika.cc @@ -29,7 +29,7 @@ int main(int argc, char** argv) { int port; ss >> port; redisContext* c = redisConnect(ip, port); - if (c == nullptr || (c->err != 0)) { + if (!c || c->err)) { if (c != nullptr) { redisFree(c); std::cout << "connection error" << std::endl; diff --git a/tools/txt_to_pika/sender.cc b/tools/txt_to_pika/sender.cc index c127f1c632..dfbcf676ac 100644 --- a/tools/txt_to_pika/sender.cc +++ b/tools/txt_to_pika/sender.cc @@ -4,12 +4,12 @@ SenderThread::SenderThread(std::string ip, int64_t port, std::string password) - : cli_(nullptr), ip_(std::move(std::move(ip))), port_(port), password_(std::move(std::move(password))), should_exit_(false), elements_(0) {} + : cli_(nullptr), ip_(std::move(ip)), port_(port), password_(std::move(password)), should_exit_(false), elements_(0) {} SenderThread::~SenderThread() = default; void SenderThread::ConnectPika() { - while (cli_ == nullptr) { + while (!cli_) { // Connect to redis cli_ = net::NewRedisCli(); cli_->set_connect_timeout(1000); @@ -120,7 +120,7 @@ void* SenderThread::ThreadMain() { rsignal_.wait(lock, [this] { return cmd_queue_.empty() || should_exit_; }); } - if (cli_ == nullptr) { + if (!cli_) { ConnectPika(); continue; } diff --git a/tools/txt_to_pika/txt_to_pika.cc b/tools/txt_to_pika/txt_to_pika.cc index a6ca6c606b..a78d809885 100644 --- a/tools/txt_to_pika/txt_to_pika.cc +++ b/tools/txt_to_pika/txt_to_pika.cc @@ -64,7 +64,7 @@ for (int i = 0; i < thread_num; i++) { senders.push_back(new SenderThread(ip, port, password)); } - auto* scan_thread = new ScanThread(filename, senders, ttl); + auto scan_thread = new ScanThread(filename, senders, ttl); for (int i = 0; i < thread_num; i++) { senders[i]->StartThread();