From 589a5f6d6cfd8fe4091b98264c7bcbcdce7a3b77 Mon Sep 17 00:00:00 2001 From: bigdaronlee Date: Thu, 22 Aug 2024 07:58:00 +0700 Subject: [PATCH] feat: add new pika ehash cmd to pika 1. pkhget pkhset 2. pkhexpire pkhexpireat 3. pkhexpiretime pkhpersist pkhttl 4. add new test cases for pkhash cmd below 5. PKHSetex PKHExists PKHDel PKHLen PKHStrlen 6. PKHIncrby PKHMSet PKHMSetex PKHMGet PKHKeys 7. PKHVals PKHGetall PKHScan --- CMakeLists.txt | 3 + include/acl.h | 1 + include/pika_command.h | 31 +- include/pika_pkhash.h | 469 ++++++ src/pika_client_conn.cc | 1 + src/pika_command.cc | 635 +++++--- src/pika_pkhash.cc | 686 ++++++++ src/storage/include/storage/storage.h | 105 +- src/storage/include/storage/storage_define.h | 19 +- src/storage/src/base_filter.h | 32 +- src/storage/src/base_value_format.h | 40 +- src/storage/src/pkhash_data_value_format.h | 135 ++ src/storage/src/redis.cc | 411 ++--- src/storage/src/redis.h | 120 +- src/storage/src/redis_hashes.cc | 178 +-- src/storage/src/redis_pkhashes.cc | 1296 +++++++++++++++ src/storage/src/redis_strings.cc | 211 ++- src/storage/src/storage.cc | 220 ++- src/storage/tests/hashes_test.cc | 6 +- src/storage/tests/pkhashes_test.cc | 1499 ++++++++++++++++++ 20 files changed, 5270 insertions(+), 828 deletions(-) create mode 100644 include/pika_pkhash.h create mode 100644 src/pika_pkhash.cc create mode 100644 src/storage/src/pkhash_data_value_format.h create mode 100644 src/storage/src/redis_pkhashes.cc create mode 100644 src/storage/tests/pkhashes_test.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index 342956b682..0fe7edb4d8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,6 +35,9 @@ link_directories("/opt/rh/gcc-toolset-13/root/lib/gcc/x86_64-redhat-linux/13") #set(CMAKE_BUILD_TYPE "Debug") #set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=thread -O0 -fno-omit-frame-pointer -fno-optimize-sibling-calls") +set(CMAKE_BUILD_TYPE "Debug") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -Wall -g3 -ggdb -fno-inline -fno-builtin-memcmp") + string(TOLOWER ${CMAKE_HOST_SYSTEM_PROCESSOR} HOST_ARCH) if(NOT CMAKE_BUILD_TYPE) diff --git a/include/acl.h b/include/acl.h index 5aa83d408d..3df8d49109 100644 --- a/include/acl.h +++ b/include/acl.h @@ -52,6 +52,7 @@ enum class AclCategory { CONNECTION = (1ULL << 18), TRANSACTION = (1ULL << 19), SCRIPTING = (1ULL << 20), + PKHASH = (1ULL << 21), }; enum class AclUserFlag { diff --git a/include/pika_command.h b/include/pika_command.h index c132eae9c5..a34981e691 100644 --- a/include/pika_command.h +++ b/include/pika_command.h @@ -137,6 +137,27 @@ const std::string kCmdNameHScanx = "hscanx"; const std::string kCmdNamePKHScanRange = "pkhscanrange"; const std::string kCmdNamePKHRScanRange = "pkhrscanrange"; +// PKHash +const std::string kCmdNamePKHSet = "pkhset"; +const std::string kCmdNamePKHExpire = "pkhexpire"; +const std::string kCmdNamePKHExpireat = "pkhexpireat"; +const std::string kCmdNamePKHExpiretime = "pkhexpiretime"; +const std::string kCmdNamePKHTTL = "pkhttl"; +const std::string kCmdNamePKHPersist = "pkhpersist"; +const std::string kCmdNamePKHGet = "pkhget"; +const std::string kCmdNamePKHExists = "pkhexists"; +const std::string kCmdNamePKHDel = "pkhdel"; +const std::string kCmdNamePKHLen = "pkhlen"; +const std::string kCmdNamePKHStrlen = "pkhstrlen"; +const std::string kCmdNamePKHIncrby = "pkhincrby"; +const std::string kCmdNamePKHMSet = "pkhmset"; +const std::string kCmdNamePKHSetex = "pkhmsetex"; +const std::string kCmdNamePKHMGet = "pkhmget"; +const std::string kCmdNamePKHKeys = "pkhkeys"; +const std::string kCmdNamePKHVals = "pkhvals"; +const std::string kCmdNamePKHGetall = "pkhgetall"; +const std::string kCmdNamePKHScan = "pkhscan"; + // List const std::string kCmdNameLIndex = "lindex"; const std::string kCmdNameLInsert = "linsert"; @@ -247,7 +268,6 @@ const std::string kCmdNameXInfo = "xinfo"; const std::string kClusterPrefix = "pkcluster"; - /* * If a type holds a key, a new data structure * that uses the key will use this error @@ -290,7 +310,8 @@ enum CmdFlags { kCmdFlagsOperateKey = (1 << 19), // redis keySpace kCmdFlagsStream = (1 << 20), kCmdFlagsFast = (1 << 21), - kCmdFlagsSlow = (1 << 22) + kCmdFlagsSlow = (1 << 22), + kCmdFlagsPKHash = (1 << 23), }; void inline RedisAppendContent(std::string& str, const std::string& value); @@ -483,7 +504,7 @@ class CmdRes { struct UnblockTaskArgs { std::string key; std::shared_ptr db; - net::DispatchThread* dispatchThread{ nullptr }; + net::DispatchThread* dispatchThread{nullptr}; UnblockTaskArgs(std::string key_, std::shared_ptr db_, net::DispatchThread* dispatchThread_) : key(std::move(key_)), db(db_), dispatchThread(dispatchThread_) {} }; @@ -572,7 +593,7 @@ class Cmd : public std::enable_shared_from_this { std::shared_ptr GetResp(); void SetStage(CmdStage stage); - void SetCmdId(uint32_t cmdId){cmdId_ = cmdId;} + void SetCmdId(uint32_t cmdId) { cmdId_ = cmdId; } virtual void DoBinlog(); @@ -614,7 +635,7 @@ class Cmd : public std::enable_shared_from_this { private: virtual void DoInitial() = 0; - virtual void Clear(){}; + virtual void Clear() {}; Cmd& operator=(const Cmd&); }; diff --git a/include/pika_pkhash.h b/include/pika_pkhash.h new file mode 100644 index 0000000000..6482438073 --- /dev/null +++ b/include/pika_pkhash.h @@ -0,0 +1,469 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_PKHASH_H_ +#define PIKA_PKHASH_H_ + +#include "include/acl.h" +#include "include/pika_command.h" +#include "include/pika_db.h" +#include "storage/storage.h" + +class PKHExpireCmd : public Cmd { + public: + PKHExpireCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHExpireCmd(*this); } + + private: + std::string key_; + int64_t ttl_ = 0; + int64_t numfields_ = 0; + std::vector fields_; + + rocksdb::Status s_; + + void DoInitial() override; + void Clear() override {} +}; + +class PKHExpireatCmd : public Cmd { + public: + PKHExpireatCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHExpireatCmd(*this); } + + private: + std::string key_; + int64_t timestamp_ = 0; + int64_t numfields_ = 0; + std::vector fields_; + + rocksdb::Status s_; + + void DoInitial() override; + void Clear() override {} +}; +class PKHExpiretimeCmd : public Cmd { + public: + PKHExpiretimeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHExpiretimeCmd(*this); } + + private: + std::string key_; + int64_t ttl_ = 0; + int64_t numfields_ = 0; + std::vector fields_; + + rocksdb::Status s_; + + void DoInitial() override; + void Clear() override {} +}; + +class PKHPersistCmd : public Cmd { + public: + PKHPersistCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHPersistCmd(*this); } + + private: + std::string key_; + int64_t ttl_ = 0; + int64_t numfields_ = 0; + std::vector fields_; + + rocksdb::Status s_; + + void DoInitial() override; + void Clear() override {} +}; + +class PKHTTLCmd : public Cmd { + public: + PKHTTLCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHTTLCmd(*this); } + + private: + std::string key_; + int64_t ttl_ = 0; + int64_t numfields_ = 0; + std::vector fields_; + + rocksdb::Status s_; + + void DoInitial() override; + void Clear() override {} +}; + +class PKHGetCmd : public Cmd { + public: + PKHGetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHGetCmd(*this); } + + private: + std::string key_, field_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PKHSetCmd : public Cmd { + public: + PKHSetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHSetCmd(*this); } + + private: + std::string key_, field_, value_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PKHSetexCmd : public Cmd { + public: + PKHSetexCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHSetexCmd(*this); } + + private: + std::string key_, field_, value_; + int64_t sec_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PKHExistsCmd : public Cmd { + public: + PKHExistsCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHExistsCmd(*this); } + + private: + std::string key_, field_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PKHDelCmd : public Cmd { + public: + PKHDelCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHDelCmd(*this); } + + private: + std::string key_; + std::vector fields_; + int32_t deleted_ = 0; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PKHLenCmd : public Cmd { + public: + PKHLenCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHLenCmd(*this); } + + private: + std::string key_; + bool is_force_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PKHStrLenCmd : public Cmd { + public: + PKHStrLenCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHStrLenCmd(*this); } + + private: + std::string key_, field_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PKHIncrbyCmd : public Cmd { + public: + PKHIncrbyCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHIncrbyCmd(*this); } + + private: + std::string key_, field_; + int64_t by_; + int64_t sec_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PKHMSetCmd : public Cmd { + public: + PKHMSetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHMSetCmd(*this); } + + private: + std::string key_; + std::vector fvs_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PKHMGetCmd : public Cmd { + public: + PKHMGetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHMGetCmd(*this); } + + private: + std::string key_; + std::vector fields_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PKHKeysCmd : public Cmd { + public: + PKHKeysCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHKeysCmd(*this); } + + private: + std::string key_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PKHValsCmd : public Cmd { + public: + PKHValsCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHValsCmd(*this); } + + private: + std::string key_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PKHGetAllCmd : public Cmd { + public: + PKHGetAllCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHGetAllCmd(*this); } + + private: + std::string key_; + bool is_wt_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PKHScanCmd : public Cmd { + public: + PKHScanCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PKHASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHScanCmd(*this); } + + private: + std::string key_, pattern_; + int64_t cursor_, count_; + bool is_wt_; + virtual void Clear() { + pattern_ = "*"; + count_ = 10; + is_wt_ = false; + } + + void DoInitial() override; + rocksdb::Status s_; +}; + +#endif diff --git a/src/pika_client_conn.cc b/src/pika_client_conn.cc index 37a47bc371..d615afb856 100644 --- a/src/pika_client_conn.cc +++ b/src/pika_client_conn.cc @@ -25,6 +25,7 @@ extern PikaServer* g_pika_server; extern std::unique_ptr g_pika_rm; extern std::unique_ptr g_pika_cmd_table_manager; + PikaClientConn::PikaClientConn(int fd, const std::string& ip_port, net::Thread* thread, net::NetMultiplexer* mpx, const net::HandleType& handle_type, int max_conn_rbuf_size) : RedisConn(fd, ip_port, thread, mpx, handle_type, max_conn_rbuf_size), diff --git a/src/pika_command.cc b/src/pika_command.cc index 63199c3481..008ce84718 100644 --- a/src/pika_command.cc +++ b/src/pika_command.cc @@ -16,6 +16,7 @@ #include "include/pika_hyperloglog.h" #include "include/pika_kv.h" #include "include/pika_list.h" +#include "include/pika_pkhash.h" #include "include/pika_pubsub.h" #include "include/pika_rm.h" #include "include/pika_server.h" @@ -52,11 +53,12 @@ void InitCmdTable(CmdTable* cmd_table) { kCmdNameBgsave, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSuspend | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameBgsave, std::move(bgsaveptr))); - std::unique_ptr compactptr = - std::make_unique(kCmdNameCompact, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow | kCmdFlagsSuspend); + std::unique_ptr compactptr = std::make_unique( + kCmdNameCompact, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow | kCmdFlagsSuspend); cmd_table->insert(std::pair>(kCmdNameCompact, std::move(compactptr))); - std::unique_ptr compactrangeptr = std::make_unique(kCmdNameCompactRange, 4, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSuspend); + std::unique_ptr compactrangeptr = + std::make_unique(kCmdNameCompactRange, 4, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSuspend); cmd_table->insert(std::pair>(kCmdNameCompactRange, std::move(compactrangeptr))); std::unique_ptr purgelogsto = std::make_unique(kCmdNamePurgelogsto, -2, kCmdFlagsRead | kCmdFlagsAdmin); @@ -75,11 +77,13 @@ void InitCmdTable(CmdTable* cmd_table) { cmd_table->insert(std::pair>(kCmdNameSelect, std::move(selectptr))); std::unique_ptr flushallptr = std::make_unique( - kCmdNameFlushall, 1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsSlow); + kCmdNameFlushall, 1, + kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameFlushall, std::move(flushallptr))); std::unique_ptr flushdbptr = std::make_unique( - kCmdNameFlushdb, -1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsSlow); + kCmdNameFlushdb, -1, + kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameFlushdb, std::move(flushdbptr))); std::unique_ptr clientptr = @@ -152,9 +156,11 @@ void InitCmdTable(CmdTable* cmd_table) { cmd_table->insert(std::pair>(kCmdNameDisableWal, std::move(disablewalptr))); std::unique_ptr cacheptr = std::make_unique(kCmdNameCache, -2, kCmdFlagsAdmin | kCmdFlagsRead); cmd_table->insert(std::pair>(kCmdNameCache, std::move(cacheptr))); - std::unique_ptr clearcacheptr = std::make_unique(kCmdNameClearCache, 1, kCmdFlagsAdmin | kCmdFlagsWrite); + std::unique_ptr clearcacheptr = + std::make_unique(kCmdNameClearCache, 1, kCmdFlagsAdmin | kCmdFlagsWrite); cmd_table->insert(std::pair>(kCmdNameClearCache, std::move(clearcacheptr))); - std::unique_ptr lastsaveptr = std::make_unique(kCmdNameLastSave, 1, kCmdFlagsAdmin | kCmdFlagsRead | kCmdFlagsFast); + std::unique_ptr lastsaveptr = + std::make_unique(kCmdNameLastSave, 1, kCmdFlagsAdmin | kCmdFlagsRead | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameLastSave, std::move(lastsaveptr))); #ifdef WITH_COMMAND_DOCS @@ -238,51 +244,55 @@ void InitCmdTable(CmdTable* cmd_table) { // Kv ////SetCmd - std::unique_ptr setptr = - std::make_unique(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr setptr = std::make_unique( + kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameSet, std::move(setptr))); ////GetCmd - std::unique_ptr getptr = - std::make_unique(kCmdNameGet, 2, kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsSlow); + std::unique_ptr getptr = std::make_unique( + kCmdNameGet, 2, + kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameGet, std::move(getptr))); ////DelCmd - std::unique_ptr delptr = - std::make_unique(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr delptr = std::make_unique( + kCmdNameDel, -2, + kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameDel, std::move(delptr))); std::unique_ptr Unlinkptr = std::make_unique(kCmdNameUnlink, -2, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameUnlink, std::move(Unlinkptr))); ////IncrCmd - std::unique_ptr incrptr = - std::make_unique(kCmdNameIncr, 2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr incrptr = std::make_unique( + kCmdNameIncr, 2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameIncr, std::move(incrptr))); ////IncrbyCmd std::unique_ptr incrbyptr = std::make_unique( - kCmdNameIncrby, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + kCmdNameIncrby, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameIncrby, std::move(incrbyptr))); ////IncrbyfloatCmd std::unique_ptr incrbyfloatptr = std::make_unique( - kCmdNameIncrbyfloat, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + kCmdNameIncrbyfloat, 3, + kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameIncrbyfloat, std::move(incrbyfloatptr))); ////DecrCmd - std::unique_ptr decrptr = - std::make_unique(kCmdNameDecr, 2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr decrptr = std::make_unique( + kCmdNameDecr, 2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameDecr, std::move(decrptr))); ////DecrbyCmd std::unique_ptr decrbyptr = std::make_unique( - kCmdNameDecrby, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + kCmdNameDecrby, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameDecrby, std::move(decrbyptr))); ////GetsetCmd std::unique_ptr getsetptr = std::make_unique( - kCmdNameGetset, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + kCmdNameGetset, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameGetset, std::move(getsetptr))); ////AppendCmd std::unique_ptr appendptr = std::make_unique( - kCmdNameAppend, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + kCmdNameAppend, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameAppend, std::move(appendptr))); ////MgetCmd - std::unique_ptr mgetptr = - std::make_unique(kCmdNameMget, -2, kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsFast); + std::unique_ptr mgetptr = std::make_unique( + kCmdNameMget, -2, + kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameMget, std::move(mgetptr))); ////KeysCmd std::unique_ptr keysptr = @@ -290,75 +300,83 @@ void InitCmdTable(CmdTable* cmd_table) { cmd_table->insert(std::pair>(kCmdNameKeys, std::move(keysptr))); ////SetnxCmd std::unique_ptr setnxptr = - std::make_unique(kCmdNameSetnx, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsFast); + std::make_unique(kCmdNameSetnx, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameSetnx, std::move(setnxptr))); ////SetexCmd - std::unique_ptr setexptr = - std::make_unique(kCmdNameSetex, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr setexptr = std::make_unique( + kCmdNameSetex, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSetex, std::move(setexptr))); ////PsetexCmd - std::unique_ptr psetexptr = - std::make_unique(kCmdNamePsetex, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr psetexptr = std::make_unique( + kCmdNamePsetex, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePsetex, std::move(psetexptr))); ////DelvxCmd std::unique_ptr delvxptr = - std::make_unique(kCmdNameDelvx, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsSlow); + std::make_unique(kCmdNameDelvx, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameDelvx, std::move(delvxptr))); ////MSetCmd - std::unique_ptr msetptr = - std::make_unique(kCmdNameMset, -3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr msetptr = std::make_unique( + kCmdNameMset, -3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameMset, std::move(msetptr))); ////MSetnxCmd - std::unique_ptr msetnxptr = std::make_unique( - kCmdNameMsetnx, -3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsSlow); + std::unique_ptr msetnxptr = + std::make_unique(kCmdNameMsetnx, -3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameMsetnx, std::move(msetnxptr))); ////GetrangeCmd std::unique_ptr getrangeptr = std::make_unique( - kCmdNameGetrange, 4, kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsSlow); + kCmdNameGetrange, 4, + kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameGetrange, std::move(getrangeptr))); ////SetrangeCmd std::unique_ptr setrangeptr = std::make_unique( - kCmdNameSetrange, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + kCmdNameSetrange, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSetrange, std::move(setrangeptr))); ////StrlenCmd - std::unique_ptr strlenptr = - std::make_unique(kCmdNameStrlen, 2, kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsFast); + std::unique_ptr strlenptr = std::make_unique( + kCmdNameStrlen, 2, + kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameStrlen, std::move(strlenptr))); ////ExistsCmd - std::unique_ptr existsptr = - std::make_unique(kCmdNameExists, -2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + std::unique_ptr existsptr = std::make_unique( + kCmdNameExists, -2, + kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameExists, std::move(existsptr))); ////ExpireCmd std::unique_ptr expireptr = std::make_unique( - kCmdNameExpire, 3, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + kCmdNameExpire, 3, + kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameExpire, std::move(expireptr))); ////PexpireCmd std::unique_ptr pexpireptr = std::make_unique( - kCmdNamePexpire, 3, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + kCmdNamePexpire, 3, + kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNamePexpire, std::move(pexpireptr))); ////ExpireatCmd - std::unique_ptr expireatptr = - std::make_unique(kCmdNameExpireat, 3, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr expireatptr = std::make_unique( + kCmdNameExpireat, 3, + kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameExpireat, std::move(expireatptr))); ////PexpireatCmd - std::unique_ptr pexpireatptr = - std::make_unique(kCmdNamePexpireat, 3, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr pexpireatptr = std::make_unique( + kCmdNamePexpireat, 3, + kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNamePexpireat, std::move(pexpireatptr))); ////TtlCmd - std::unique_ptr ttlptr = - std::make_unique(kCmdNameTtl, 2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + std::unique_ptr ttlptr = std::make_unique( + kCmdNameTtl, 2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameTtl, std::move(ttlptr))); ////PttlCmd - std::unique_ptr pttlptr = - std::make_unique(kCmdNamePttl, 2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + std::unique_ptr pttlptr = std::make_unique( + kCmdNamePttl, 2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNamePttl, std::move(pttlptr))); ////PersistCmd - std::unique_ptr persistptr = - std::make_unique(kCmdNamePersist, 2, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr persistptr = std::make_unique( + kCmdNamePersist, 2, + kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNamePersist, std::move(persistptr))); ////TypeCmd - std::unique_ptr typeptr = - std::make_unique(kCmdNameType, 2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + std::unique_ptr typeptr = std::make_unique( + kCmdNameType, 2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameType, std::move(typeptr))); ////ScanCmd std::unique_ptr scanptr = @@ -370,361 +388,472 @@ void InitCmdTable(CmdTable* cmd_table) { cmd_table->insert(std::pair>(kCmdNameScanx, std::move(scanxptr))); ////PKSetexAtCmd std::unique_ptr pksetexatptr = std::make_unique( - kCmdNamePKSetexAt, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + kCmdNamePKSetexAt, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePKSetexAt, std::move(pksetexatptr))); ////PKScanRange - std::unique_ptr pkscanrangeptr = std::make_unique( - kCmdNamePKScanRange, -4, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); + std::unique_ptr pkscanrangeptr = + std::make_unique(kCmdNamePKScanRange, -4, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePKScanRange, std::move(pkscanrangeptr))); ////PKRScanRange - std::unique_ptr pkrscanrangeptr = std::make_unique( - kCmdNamePKRScanRange, -4, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); + std::unique_ptr pkrscanrangeptr = + std::make_unique(kCmdNamePKRScanRange, -4, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePKRScanRange, std::move(pkrscanrangeptr))); // Hash ////HDelCmd - std::unique_ptr hdelptr = - std::make_unique(kCmdNameHDel, -3, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + std::unique_ptr hdelptr = std::make_unique( + kCmdNameHDel, -3, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameHDel, std::move(hdelptr))); ////HSetCmd - std::unique_ptr hsetptr = - std::make_unique(kCmdNameHSet, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + std::unique_ptr hsetptr = std::make_unique( + kCmdNameHSet, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameHSet, std::move(hsetptr))); ////HGetCmd - std::unique_ptr hgetptr = - std::make_unique(kCmdNameHGet, 3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache |kCmdFlagsFast); + std::unique_ptr hgetptr = std::make_unique( + kCmdNameHGet, 3, + kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameHGet, std::move(hgetptr))); ////HGetallCmd - std::unique_ptr hgetallptr = - std::make_unique(kCmdNameHGetall, 2, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache); + std::unique_ptr hgetallptr = std::make_unique( + kCmdNameHGetall, 2, + kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache); cmd_table->insert(std::pair>(kCmdNameHGetall, std::move(hgetallptr))); ////HExistsCmd - std::unique_ptr hexistsptr = - std::make_unique(kCmdNameHExists, 3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast ); + std::unique_ptr hexistsptr = std::make_unique( + kCmdNameHExists, 3, + kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameHExists, std::move(hexistsptr))); ////HIncrbyCmd - std::unique_ptr hincrbyptr = - std::make_unique(kCmdNameHIncrby, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + std::unique_ptr hincrbyptr = std::make_unique( + kCmdNameHIncrby, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameHIncrby, std::move(hincrbyptr))); ////HIncrbyfloatCmd - std::unique_ptr hincrbyfloatptr = - std::make_unique(kCmdNameHIncrbyfloat, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + std::unique_ptr hincrbyfloatptr = std::make_unique( + kCmdNameHIncrbyfloat, 4, + kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameHIncrbyfloat, std::move(hincrbyfloatptr))); ////HKeysCmd - std::unique_ptr hkeysptr = - std::make_unique(kCmdNameHKeys, 2, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + std::unique_ptr hkeysptr = std::make_unique( + kCmdNameHKeys, 2, + kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameHKeys, std::move(hkeysptr))); ////HLenCmd - std::unique_ptr hlenptr = - std::make_unique(kCmdNameHLen, 2, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + std::unique_ptr hlenptr = std::make_unique( + kCmdNameHLen, 2, + kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameHLen, std::move(hlenptr))); ////HMgetCmd - std::unique_ptr hmgetptr = - std::make_unique(kCmdNameHMget, -3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache |kCmdFlagsFast); + std::unique_ptr hmgetptr = std::make_unique( + kCmdNameHMget, -3, + kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameHMget, std::move(hmgetptr))); ////HMsetCmd - std::unique_ptr hmsetptr = - std::make_unique(kCmdNameHMset, -4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + std::unique_ptr hmsetptr = std::make_unique( + kCmdNameHMset, -4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameHMset, std::move(hmsetptr))); ////HSetnxCmd - std::unique_ptr hsetnxptr = - std::make_unique(kCmdNameHSetnx, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + std::unique_ptr hsetnxptr = std::make_unique( + kCmdNameHSetnx, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameHSetnx, std::move(hsetnxptr))); ////HStrlenCmd - std::unique_ptr hstrlenptr = - std::make_unique(kCmdNameHStrlen, 3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + std::unique_ptr hstrlenptr = std::make_unique( + kCmdNameHStrlen, 3, + kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameHStrlen, std::move(hstrlenptr))); ////HValsCmd - std::unique_ptr hvalsptr = - std::make_unique(kCmdNameHVals, 2, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache); + std::unique_ptr hvalsptr = std::make_unique( + kCmdNameHVals, 2, + kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache); cmd_table->insert(std::pair>(kCmdNameHVals, std::move(hvalsptr))); ////HScanCmd - std::unique_ptr hscanptr = std::make_unique( - kCmdNameHScan, -3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); + std::unique_ptr hscanptr = + std::make_unique(kCmdNameHScan, -3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameHScan, std::move(hscanptr))); ////HScanxCmd - std::unique_ptr hscanxptr = std::make_unique( - kCmdNameHScanx, -3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); + std::unique_ptr hscanxptr = + std::make_unique(kCmdNameHScanx, -3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameHScanx, std::move(hscanxptr))); ////PKHScanRange - std::unique_ptr pkhscanrangeptr = std::make_unique( - kCmdNamePKHScanRange, -4, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); + std::unique_ptr pkhscanrangeptr = + std::make_unique(kCmdNamePKHScanRange, -4, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePKHScanRange, std::move(pkhscanrangeptr))); ////PKHRScanRange - std::unique_ptr pkhrscanrangeptr = std::make_unique( - kCmdNamePKHRScanRange, -4, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); + std::unique_ptr pkhrscanrangeptr = + std::make_unique(kCmdNamePKHRScanRange, -4, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePKHRScanRange, std::move(pkhrscanrangeptr))); + // Pika Expire Hash + std::unique_ptr ehsetptr = std::make_unique( + kCmdNamePKHSet, -4, kCmdFlagsWrite | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHSet, std::move(ehsetptr))); + + ////Ehexpire + std::unique_ptr ehexpireptr = std::make_unique( + kCmdNamePKHExpire, -4, kCmdFlagsWrite | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHExpire, std::move(ehexpireptr))); + ////Ehexpireat + std::unique_ptr ehexpireatptr = std::make_unique( + kCmdNamePKHExpireat, -4, kCmdFlagsWrite | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHExpireat, std::move(ehexpireatptr))); + ////Ehexpiretime + std::unique_ptr ehexpiretimeptr = std::make_unique( + kCmdNamePKHExpiretime, -4, kCmdFlagsWrite | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHExpiretime, std::move(ehexpiretimeptr))); + ////Ehttl + std::unique_ptr ehttlptr = std::make_unique( + kCmdNamePKHTTL, -4, kCmdFlagsWrite | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHTTL, std::move(ehttlptr))); + + ////Ehpersist + std::unique_ptr ehpersistptr = std::make_unique( + kCmdNamePKHPersist, -4, kCmdFlagsWrite | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHPersist, std::move(ehpersistptr))); + ////Ehget + std::unique_ptr ehgetptr = std::make_unique( + kCmdNamePKHGet, 3, kCmdFlagsRead | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHGet, std::move(ehgetptr))); + + std::unique_ptr ehsetexptr = std::make_unique( + kCmdNamePKHSetex, -4, kCmdFlagsWrite | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHSetex, std::move(ehsetexptr))); + + std::unique_ptr ehexistsptr = std::make_unique( + kCmdNamePKHExists, 3, kCmdFlagsRead | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHExists, std::move(ehexistsptr))); + + std::unique_ptr ehdelptr = std::make_unique( + kCmdNamePKHDel, -3, kCmdFlagsWrite | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHDel, std::move(ehdelptr))); + + std::unique_ptr ehlenptr = std::make_unique( + kCmdNamePKHLen, 2, kCmdFlagsRead | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHLen, std::move(ehlenptr))); + + std::unique_ptr ehstrlenptr = std::make_unique( + kCmdNamePKHStrlen, 3, kCmdFlagsRead | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHStrlen, std::move(ehstrlenptr))); + + std::unique_ptr ehincrbyptr = std::make_unique( + kCmdNamePKHIncrby, 4, kCmdFlagsWrite | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHIncrby, std::move(ehincrbyptr))); + + std::unique_ptr ehmsetptr = std::make_unique( + kCmdNamePKHMSet, -4, kCmdFlagsWrite | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHMSet, std::move(ehmsetptr))); + + std::unique_ptr ehmgetptr = std::make_unique( + kCmdNamePKHMGet, -3, kCmdFlagsRead | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHMGet, std::move(ehmgetptr))); + + std::unique_ptr ehkeysptr = std::make_unique( + kCmdNamePKHKeys, 2, kCmdFlagsRead | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePKHKeys, std::move(ehkeysptr))); + // TODO(DDD) 为啥vals是慢的命令。 + // TODO(DDD) 这些标志位都是啥意思。 + std::unique_ptr ehvalsptr = std::make_unique( + kCmdNamePKHVals, 2, kCmdFlagsRead | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePKHVals, std::move(ehvalsptr))); + + std::unique_ptr ehgetallptr = std::make_unique( + kCmdNamePKHGetall, 2, kCmdFlagsRead | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePKHGetall, std::move(ehgetallptr))); + + std::unique_ptr ehscanptr = std::make_unique( + kCmdNamePKHScan, -3, kCmdFlagsRead | kCmdFlagsDoThroughDB | kCmdFlagsPKHash | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePKHScan, std::move(ehscanptr))); + // List - std::unique_ptr lindexptr = - std::make_unique(kCmdNameLIndex, 3, kCmdFlagsRead | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr lindexptr = std::make_unique( + kCmdNameLIndex, 3, + kCmdFlagsRead | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameLIndex, std::move(lindexptr))); - std::unique_ptr linsertptr = - std::make_unique(kCmdNameLInsert, 5, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr linsertptr = std::make_unique( + kCmdNameLInsert, 5, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameLInsert, std::move(linsertptr))); - std::unique_ptr llenptr = - std::make_unique(kCmdNameLLen, 2, kCmdFlagsRead | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr llenptr = std::make_unique( + kCmdNameLLen, 2, + kCmdFlagsRead | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameLLen, std::move(llenptr))); - std::unique_ptr blpopptr = std::make_unique( - kCmdNameBLPop, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsSlow); + std::unique_ptr blpopptr = + std::make_unique(kCmdNameBLPop, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameBLPop, std::move(blpopptr))); - std::unique_ptr lpopptr = - std::make_unique(kCmdNameLPop, -2, kCmdFlagsWrite | kCmdFlagsList |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr lpopptr = std::make_unique( + kCmdNameLPop, -2, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameLPop, std::move(lpopptr))); std::unique_ptr lpushptr = std::make_unique( - kCmdNameLPush, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + kCmdNameLPush, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameLPush, std::move(lpushptr))); - std::unique_ptr lpushxptr = std::make_unique(kCmdNameLPushx, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr lpushxptr = std::make_unique( + kCmdNameLPushx, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameLPushx, std::move(lpushxptr))); std::unique_ptr lrangeptr = std::make_unique( - kCmdNameLRange, 4, kCmdFlagsRead | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + kCmdNameLRange, 4, + kCmdFlagsRead | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameLRange, std::move(lrangeptr))); - std::unique_ptr lremptr = - std::make_unique(kCmdNameLRem, 4, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr lremptr = std::make_unique( + kCmdNameLRem, 4, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameLRem, std::move(lremptr))); - std::unique_ptr lsetptr = - std::make_unique(kCmdNameLSet, 4, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr lsetptr = std::make_unique( + kCmdNameLSet, 4, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameLSet, std::move(lsetptr))); - std::unique_ptr ltrimptr = - std::make_unique(kCmdNameLTrim, 4, kCmdFlagsWrite | kCmdFlagsList |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr ltrimptr = std::make_unique( + kCmdNameLTrim, 4, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameLTrim, std::move(ltrimptr))); - std::unique_ptr brpopptr = std::make_unique( - kCmdNameBRpop, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsSlow); + std::unique_ptr brpopptr = + std::make_unique(kCmdNameBRpop, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameBRpop, std::move(brpopptr))); - std::unique_ptr rpopptr = - std::make_unique(kCmdNameRPop, -2, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr rpopptr = std::make_unique( + kCmdNameRPop, -2, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameRPop, std::move(rpopptr))); - std::unique_ptr rpoplpushptr = std::make_unique( - kCmdNameRPopLPush, 3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsSlow); + std::unique_ptr rpoplpushptr = + std::make_unique(kCmdNameRPopLPush, 3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameRPopLPush, std::move(rpoplpushptr))); - std::unique_ptr rpushptr = - std::make_unique(kCmdNameRPush, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr rpushptr = std::make_unique( + kCmdNameRPush, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameRPush, std::move(rpushptr))); - std::unique_ptr rpushxptr = - std::make_unique(kCmdNameRPushx, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr rpushxptr = std::make_unique( + kCmdNameRPushx, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameRPushx, std::move(rpushxptr))); // Zset ////ZAddCmd - std::unique_ptr zaddptr = - std::make_unique(kCmdNameZAdd, -4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr zaddptr = std::make_unique( + kCmdNameZAdd, -4, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameZAdd, std::move(zaddptr))); ////ZCardCmd - std::unique_ptr zcardptr = - std::make_unique(kCmdNameZCard, 2, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + std::unique_ptr zcardptr = std::make_unique( + kCmdNameZCard, 2, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameZCard, std::move(zcardptr))); ////ZScanCmd - std::unique_ptr zscanptr = std::make_unique( - kCmdNameZScan, -3, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + std::unique_ptr zscanptr = + std::make_unique(kCmdNameZScan, -3, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameZScan, std::move(zscanptr))); ////ZIncrbyCmd - std::unique_ptr zincrbyptr = - std::make_unique(kCmdNameZIncrby, 4, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast) ; + std::unique_ptr zincrbyptr = std::make_unique( + kCmdNameZIncrby, 4, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameZIncrby, std::move(zincrbyptr))); ////ZRangeCmd - std::unique_ptr zrangeptr = - std::make_unique(kCmdNameZRange, -4, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr zrangeptr = std::make_unique( + kCmdNameZRange, -4, + kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameZRange, std::move(zrangeptr))); ////ZRevrangeCmd - std::unique_ptr zrevrangeptr = - std::make_unique(kCmdNameZRevrange, -4, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr zrevrangeptr = std::make_unique( + kCmdNameZRevrange, -4, + kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameZRevrange, std::move(zrevrangeptr))); ////ZRangebyscoreCmd - std::unique_ptr zrangebyscoreptr = std::make_unique( - kCmdNameZRangebyscore, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + std::unique_ptr zrangebyscoreptr = + std::make_unique(kCmdNameZRangebyscore, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameZRangebyscore, std::move(zrangebyscoreptr))); ////ZRevrangebyscoreCmd std::unique_ptr zrevrangebyscoreptr = std::make_unique( - kCmdNameZRevrangebyscore, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + kCmdNameZRevrangebyscore, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); cmd_table->insert( std::pair>(kCmdNameZRevrangebyscore, std::move(zrevrangebyscoreptr))); ////ZCountCmd - std::unique_ptr zcountptr = - std::make_unique(kCmdNameZCount, 4, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr zcountptr = std::make_unique( + kCmdNameZCount, 4, + kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameZCount, std::move(zcountptr))); ////ZRemCmd - std::unique_ptr zremptr = - std::make_unique(kCmdNameZRem, -3, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr zremptr = std::make_unique( + kCmdNameZRem, -3, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameZRem, std::move(zremptr))); ////ZUnionstoreCmd - std::unique_ptr zunionstoreptr = - std::make_unique(kCmdNameZUnionstore, -4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr zunionstoreptr = std::make_unique( + kCmdNameZUnionstore, -4, + kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameZUnionstore, std::move(zunionstoreptr))); ////ZInterstoreCmd - std::unique_ptr zinterstoreptr = - std::make_unique(kCmdNameZInterstore, -4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr zinterstoreptr = std::make_unique( + kCmdNameZInterstore, -4, + kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameZInterstore, std::move(zinterstoreptr))); ////ZRankCmd - std::unique_ptr zrankptr = - std::make_unique(kCmdNameZRank, 3, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr zrankptr = std::make_unique( + kCmdNameZRank, 3, + kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameZRank, std::move(zrankptr))); ////ZRevrankCmd - std::unique_ptr zrevrankptr = - std::make_unique(kCmdNameZRevrank, 3, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr zrevrankptr = std::make_unique( + kCmdNameZRevrank, 3, + kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameZRevrank, std::move(zrevrankptr))); ////ZScoreCmd - std::unique_ptr zscoreptr = - std::make_unique(kCmdNameZScore, 3, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + std::unique_ptr zscoreptr = std::make_unique( + kCmdNameZScore, 3, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameZScore, std::move(zscoreptr))); ////ZRangebylexCmd std::unique_ptr zrangebylexptr = - std::make_unique(kCmdNameZRangebylex, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + std::make_unique(kCmdNameZRangebylex, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameZRangebylex, std::move(zrangebylexptr))); ////ZRevrangebylexCmd - std::unique_ptr zrevrangebylexptr = std::make_unique( - kCmdNameZRevrangebylex, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + std::unique_ptr zrevrangebylexptr = + std::make_unique(kCmdNameZRevrangebylex, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameZRevrangebylex, std::move(zrevrangebylexptr))); ////ZLexcountCmd std::unique_ptr zlexcountptr = - std::make_unique(kCmdNameZLexcount, 4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsFast); + std::make_unique(kCmdNameZLexcount, 4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameZLexcount, std::move(zlexcountptr))); ////ZRemrangebyrankCmd std::unique_ptr zremrangebyrankptr = std::make_unique( - kCmdNameZRemrangebyrank, 4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + kCmdNameZRemrangebyrank, 4, + kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert( std::pair>(kCmdNameZRemrangebyrank, std::move(zremrangebyrankptr))); ////ZRemrangebyscoreCmd std::unique_ptr zremrangebyscoreptr = std::make_unique( - kCmdNameZRemrangebyscore, 4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + kCmdNameZRemrangebyscore, 4, + kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert( std::pair>(kCmdNameZRemrangebyscore, std::move(zremrangebyscoreptr))); ////ZRemrangebylexCmd std::unique_ptr zremrangebylexptr = std::make_unique( - kCmdNameZRemrangebylex, 4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + kCmdNameZRemrangebylex, 4, + kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameZRemrangebylex, std::move(zremrangebylexptr))); ////ZPopmax - std::unique_ptr zpopmaxptr = std::make_unique( - kCmdNameZPopmax, -2, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsFast); + std::unique_ptr zpopmaxptr = + std::make_unique(kCmdNameZPopmax, -2, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameZPopmax, std::move(zpopmaxptr))); ////ZPopmin - std::unique_ptr zpopminptr = std::make_unique( - kCmdNameZPopmin, -2, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsFast); + std::unique_ptr zpopminptr = + std::make_unique(kCmdNameZPopmin, -2, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameZPopmin, std::move(zpopminptr))); // Set ////SAddCmd - std::unique_ptr saddptr = - std::make_unique(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSet |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr saddptr = std::make_unique( + kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameSAdd, std::move(saddptr))); ////SPopCmd - std::unique_ptr spopptr = - std::make_unique(kCmdNameSPop, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr spopptr = std::make_unique( + kCmdNameSPop, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameSPop, std::move(spopptr))); ////SCardCmd - std::unique_ptr scardptr = - std::make_unique(kCmdNameSCard, 2, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr scardptr = std::make_unique( + kCmdNameSCard, 2, + kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameSCard, std::move(scardptr))); ////SMembersCmd - std::unique_ptr smembersptr = - std::make_unique(kCmdNameSMembers, 2, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr smembersptr = std::make_unique( + kCmdNameSMembers, 2, + kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSMembers, std::move(smembersptr))); ////SScanCmd std::unique_ptr sscanptr = - std::make_unique(kCmdNameSScan, -3, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsSlow); + std::make_unique(kCmdNameSScan, -3, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSScan, std::move(sscanptr))); ////SRemCmd - std::unique_ptr sremptr = - std::make_unique(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSet |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr sremptr = std::make_unique( + kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameSRem, std::move(sremptr))); ////SUnionCmd - std::unique_ptr sunionptr = std::make_unique( - kCmdNameSUnion, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); + std::unique_ptr sunionptr = + std::make_unique(kCmdNameSUnion, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSUnion, std::move(sunionptr))); ////SUnionstoreCmd - std::unique_ptr sunionstoreptr = - std::make_unique(kCmdNameSUnionstore, -3, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr sunionstoreptr = std::make_unique( + kCmdNameSUnionstore, -3, + kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSUnionstore, std::move(sunionstoreptr))); ////SInterCmd - std::unique_ptr sinterptr = std::make_unique( - kCmdNameSInter, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); + std::unique_ptr sinterptr = + std::make_unique(kCmdNameSInter, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSInter, std::move(sinterptr))); ////SInterstoreCmd - std::unique_ptr sinterstoreptr = - std::make_unique(kCmdNameSInterstore, -3, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr sinterstoreptr = std::make_unique( + kCmdNameSInterstore, -3, + kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSInterstore, std::move(sinterstoreptr))); ////SIsmemberCmd - std::unique_ptr sismemberptr = - std::make_unique(kCmdNameSIsmember, 3, kCmdFlagsRead | kCmdFlagsSet |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr sismemberptr = std::make_unique( + kCmdNameSIsmember, 3, + kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameSIsmember, std::move(sismemberptr))); ////SDiffCmd std::unique_ptr sdiffptr = std::make_unique(kCmdNameSDiff, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSDiff, std::move(sdiffptr))); ////SDiffstoreCmd - std::unique_ptr sdiffstoreptr = - std::make_unique(kCmdNameSDiffstore, -3, kCmdFlagsWrite | kCmdFlagsSet |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr sdiffstoreptr = std::make_unique( + kCmdNameSDiffstore, -3, + kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSDiffstore, std::move(sdiffstoreptr))); ////SMoveCmd - std::unique_ptr smoveptr = - std::make_unique(kCmdNameSMove, 4, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + std::unique_ptr smoveptr = std::make_unique( + kCmdNameSMove, 4, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameSMove, std::move(smoveptr))); ////SRandmemberCmd - std::unique_ptr srandmemberptr = - std::make_unique(kCmdNameSRandmember, -2, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + std::unique_ptr srandmemberptr = std::make_unique( + kCmdNameSRandmember, -2, + kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSRandmember, std::move(srandmemberptr))); // BitMap ////bitsetCmd - std::unique_ptr bitsetptr = - std::make_unique(kCmdNameBitSet, 4, kCmdFlagsWrite | kCmdFlagsBit | kCmdFlagsSlow | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache); + std::unique_ptr bitsetptr = std::make_unique( + kCmdNameBitSet, 4, kCmdFlagsWrite | kCmdFlagsBit | kCmdFlagsSlow | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache); cmd_table->insert(std::pair>(kCmdNameBitSet, std::move(bitsetptr))); ////bitgetCmd std::unique_ptr bitgetptr = std::make_unique(kCmdNameBitGet, 3, kCmdFlagsRead | kCmdFlagsBit | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameBitGet, std::move(bitgetptr))); ////bitcountCmd - std::unique_ptr bitcountptr = - std::make_unique(kCmdNameBitCount, -2, kCmdFlagsRead | kCmdFlagsBit | kCmdFlagsSlow | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache); + std::unique_ptr bitcountptr = std::make_unique( + kCmdNameBitCount, -2, + kCmdFlagsRead | kCmdFlagsBit | kCmdFlagsSlow | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache); cmd_table->insert(std::pair>(kCmdNameBitCount, std::move(bitcountptr))); ////bitposCmd std::unique_ptr bitposptr = std::make_unique(kCmdNameBitPos, -3, kCmdFlagsRead | kCmdFlagsBit | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameBitPos, std::move(bitposptr))); ////bitopCmd - std::unique_ptr bitopptr = - std::make_unique(kCmdNameBitOp, -3, kCmdFlagsWrite | kCmdFlagsBit | kCmdFlagsSlow | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache); + std::unique_ptr bitopptr = std::make_unique( + kCmdNameBitOp, -3, kCmdFlagsWrite | kCmdFlagsBit | kCmdFlagsSlow | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache); cmd_table->insert(std::pair>(kCmdNameBitOp, std::move(bitopptr))); // HyperLogLog ////pfaddCmd - std::unique_ptr pfaddptr = std::make_unique( - kCmdNamePfAdd, -2, kCmdFlagsWrite | kCmdFlagsHyperLogLog | kCmdFlagsFast); + std::unique_ptr pfaddptr = + std::make_unique(kCmdNamePfAdd, -2, kCmdFlagsWrite | kCmdFlagsHyperLogLog | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNamePfAdd, std::move(pfaddptr))); ////pfcountCmd - std::unique_ptr pfcountptr = std::make_unique( - kCmdNamePfCount, -2, kCmdFlagsRead | kCmdFlagsHyperLogLog | kCmdFlagsSlow); + std::unique_ptr pfcountptr = + std::make_unique(kCmdNamePfCount, -2, kCmdFlagsRead | kCmdFlagsHyperLogLog | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePfCount, std::move(pfcountptr))); ////pfmergeCmd - std::unique_ptr pfmergeptr = std::make_unique( - kCmdNamePfMerge, -2, kCmdFlagsWrite | kCmdFlagsHyperLogLog | kCmdFlagsSlow); + std::unique_ptr pfmergeptr = + std::make_unique(kCmdNamePfMerge, -2, kCmdFlagsWrite | kCmdFlagsHyperLogLog | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePfMerge, std::move(pfmergeptr))); // GEO ////GepAdd - std::unique_ptr geoaddptr = std::make_unique( - kCmdNameGeoAdd, -5, kCmdFlagsWrite | kCmdFlagsGeo | kCmdFlagsSlow); + std::unique_ptr geoaddptr = + std::make_unique(kCmdNameGeoAdd, -5, kCmdFlagsWrite | kCmdFlagsGeo | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameGeoAdd, std::move(geoaddptr))); ////GeoPos - std::unique_ptr geoposptr = std::make_unique( - kCmdNameGeoPos, -2, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + std::unique_ptr geoposptr = + std::make_unique(kCmdNameGeoPos, -2, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameGeoPos, std::move(geoposptr))); ////GeoDist - std::unique_ptr geodistptr = std::make_unique( - kCmdNameGeoDist, -4, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + std::unique_ptr geodistptr = + std::make_unique(kCmdNameGeoDist, -4, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameGeoDist, std::move(geodistptr))); ////GeoHash - std::unique_ptr geohashptr = std::make_unique( - kCmdNameGeoHash, -2, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + std::unique_ptr geohashptr = + std::make_unique(kCmdNameGeoHash, -2, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameGeoHash, std::move(geohashptr))); ////GeoRadius - std::unique_ptr georadiusptr = std::make_unique( - kCmdNameGeoRadius, -6, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + std::unique_ptr georadiusptr = + std::make_unique(kCmdNameGeoRadius, -6, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameGeoRadius, std::move(georadiusptr))); ////GeoRadiusByMember std::unique_ptr georadiusbymemberptr = std::make_unique( @@ -735,60 +864,59 @@ void InitCmdTable(CmdTable* cmd_table) { // PubSub ////Publish std::unique_ptr publishptr = - std::make_unique(kCmdNamePublish, 3, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsFast ); + std::make_unique(kCmdNamePublish, 3, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNamePublish, std::move(publishptr))); ////Subscribe std::unique_ptr subscribeptr = - std::make_unique(kCmdNameSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + std::make_unique(kCmdNameSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSubscribe, std::move(subscribeptr))); ////UnSubscribe std::unique_ptr unsubscribeptr = - std::make_unique(kCmdNameUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + std::make_unique(kCmdNameUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameUnSubscribe, std::move(unsubscribeptr))); ////PSubscribe std::unique_ptr psubscribeptr = - std::make_unique(kCmdNamePSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + std::make_unique(kCmdNamePSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePSubscribe, std::move(psubscribeptr))); ////PUnSubscribe std::unique_ptr punsubscribeptr = - std::make_unique(kCmdNamePUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + std::make_unique(kCmdNamePUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePUnSubscribe, std::move(punsubscribeptr))); ////PubSub std::unique_ptr pubsubptr = - std::make_unique(kCmdNamePubSub, -2, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + std::make_unique(kCmdNamePubSub, -2, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePubSub, std::move(pubsubptr))); ////ACL - std::unique_ptr aclptr = std::make_unique(KCmdNameAcl, -2, kCmdFlagsAdmin | kCmdFlagsSlow ); + std::unique_ptr aclptr = std::make_unique(KCmdNameAcl, -2, kCmdFlagsAdmin | kCmdFlagsSlow); cmd_table->insert(std::pair>(KCmdNameAcl, std::move(aclptr))); // Transaction ////Multi - std::unique_ptr multiptr = - std::make_unique(kCmdNameMulti, 1, kCmdFlagsRead | kCmdFlagsFast ); + std::unique_ptr multiptr = std::make_unique(kCmdNameMulti, 1, kCmdFlagsRead | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameMulti, std::move(multiptr))); ////Exec - std::unique_ptr execptr = std::make_unique( - kCmdNameExec, 1, kCmdFlagsRead | kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsSlow ); + std::unique_ptr execptr = + std::make_unique(kCmdNameExec, 1, kCmdFlagsRead | kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameExec, std::move(execptr))); ////Discard - std::unique_ptr discardptr = std::make_unique(kCmdNameDiscard, 1, kCmdFlagsRead | kCmdFlagsFast ); + std::unique_ptr discardptr = std::make_unique(kCmdNameDiscard, 1, kCmdFlagsRead | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameDiscard, std::move(discardptr))); ////Watch - std::unique_ptr watchptr = std::make_unique(kCmdNameWatch, -2, kCmdFlagsRead | kCmdFlagsFast ); + std::unique_ptr watchptr = std::make_unique(kCmdNameWatch, -2, kCmdFlagsRead | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameWatch, std::move(watchptr))); ////Unwatch - std::unique_ptr unwatchptr = std::make_unique(kCmdNameUnWatch, 1, kCmdFlagsRead | kCmdFlagsFast ); + std::unique_ptr unwatchptr = std::make_unique(kCmdNameUnWatch, 1, kCmdFlagsRead | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameUnWatch, std::move(unwatchptr))); // Stream ////XAdd std::unique_ptr xaddptr = - std::make_unique(kCmdNameXAdd, -4, kCmdFlagsWrite | kCmdFlagsStream | kCmdFlagsFast); + std::make_unique(kCmdNameXAdd, -4, kCmdFlagsWrite | kCmdFlagsStream | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameXAdd, std::move(xaddptr))); ////XLen std::unique_ptr xlenptr = - std::make_unique(kCmdNameXLen, 2, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsFast); + std::make_unique(kCmdNameXLen, 2, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameXLen, std::move(xlenptr))); ////XRead std::unique_ptr xreadptr = @@ -796,24 +924,24 @@ void InitCmdTable(CmdTable* cmd_table) { cmd_table->insert(std::pair>(kCmdNameXRead, std::move(xreadptr))); ////XRange std::unique_ptr xrangeptr = - std::make_unique(kCmdNameXRange, -4, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); + std::make_unique(kCmdNameXRange, -4, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameXRange, std::move(xrangeptr))); ////XRerange std::unique_ptr xrerverangeptr = - std::make_unique(kCmdNameXRevrange, -4, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); + std::make_unique(kCmdNameXRevrange, -4, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameXRevrange, std::move(xrerverangeptr))); ////XTrim std::unique_ptr xtrimptr = - std::make_unique(kCmdNameXTrim, -2, kCmdFlagsWrite | kCmdFlagsStream | kCmdFlagsSlow); + std::make_unique(kCmdNameXTrim, -2, kCmdFlagsWrite | kCmdFlagsStream | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameXTrim, std::move(xtrimptr))); ////XDel std::unique_ptr xdelptr = - std::make_unique(kCmdNameXDel, -3, kCmdFlagsWrite | kCmdFlagsStream | kCmdFlagsFast); + std::make_unique(kCmdNameXDel, -3, kCmdFlagsWrite | kCmdFlagsStream | kCmdFlagsFast); cmd_table->insert(std::pair>(kCmdNameXDel, std::move(xdelptr))); ////XINFO - std::unique_ptr xinfoptr = - std::make_unique(kCmdNameXInfo, -2, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); - cmd_table->insert(std::pair>(kCmdNameXInfo, std::move(xinfoptr))); + std::unique_ptr xinfoptr = + std::make_unique(kCmdNameXInfo, -2, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameXInfo, std::move(xinfoptr))); } Cmd* GetCmdFromDB(const std::string& opt, const CmdTable& cmd_table) { @@ -827,8 +955,7 @@ Cmd* GetCmdFromDB(const std::string& opt, const CmdTable& cmd_table) { bool Cmd::CheckArg(uint64_t num) const { return !((arity_ > 0 && num != arity_) || (arity_ < 0 && num < -arity_)); } Cmd::Cmd(std::string name, int arity, uint32_t flag, uint32_t aclCategory) - : name_(std::move(name)), arity_(arity), flag_(flag), aclCategory_(aclCategory), cache_missed_in_rtc_(false) { -} + : name_(std::move(name)), arity_(arity), flag_(flag), aclCategory_(aclCategory), cache_missed_in_rtc_(false) {} void Cmd::Initial(const PikaCmdArgsType& argv, const std::string& db_name) { argv_ = argv; @@ -836,15 +963,13 @@ void Cmd::Initial(const PikaCmdArgsType& argv, const std::string& db_name) { res_.clear(); // Clear res content db_ = g_pika_server->GetDB(db_name_); sync_db_ = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name_)); - Clear(); // Clear cmd, Derived class can has own implement + Clear(); // Clear cmd, Derived class can has own implement DoInitial(); }; std::vector Cmd::current_key() const { return {""}; } -void Cmd::Execute() { - ProcessCommand(); -} +void Cmd::Execute() { ProcessCommand(); } void Cmd::ProcessCommand(const HintKeys& hint_keys) { if (stage_ == kNone) { @@ -887,15 +1012,12 @@ void Cmd::InternalProcessCommand(const HintKeys& hint_keys) { } void Cmd::DoCommand(const HintKeys& hint_keys) { - if (IsNeedCacheDo() - && PIKA_CACHE_NONE != g_pika_conf->cache_mode() - && db_->cache()->CacheStatus() == PIKA_CACHE_STATUS_OK) { - if (!cache_missed_in_rtc_ - && IsNeedReadCache()) { + if (IsNeedCacheDo() && PIKA_CACHE_NONE != g_pika_conf->cache_mode() && + db_->cache()->CacheStatus() == PIKA_CACHE_STATUS_OK) { + if (!cache_missed_in_rtc_ && IsNeedReadCache()) { ReadCache(); } - if (is_read() - && (res().CacheMiss() || cache_missed_in_rtc_)) { + if (is_read() && (res().CacheMiss() || cache_missed_in_rtc_)) { pstd::lock::MultiScopeRecordLock record_lock(db_->LockMgr(), current_key()); DoThroughDB(); if (IsNeedUpdateCache()) { @@ -930,18 +1052,17 @@ bool Cmd::DoReadCommandInCache() { }; if (db_->cache()->CacheStatus() == PIKA_CACHE_STATUS_OK) { - if (IsNeedReadCache()) { - ReadCache(); - } - // return true only the read command hit - if (is_read() && !res().CacheMiss()) { - return true; - } + if (IsNeedReadCache()) { + ReadCache(); + } + // return true only the read command hit + if (is_read() && !res().CacheMiss()) { + return true; + } } return false; } - void Cmd::DoBinlog() { if (res().ok() && is_write() && g_pika_conf->write_binlog()) { std::shared_ptr conn_ptr = GetConn(); diff --git a/src/pika_pkhash.cc b/src/pika_pkhash.cc new file mode 100644 index 0000000000..db09c61a03 --- /dev/null +++ b/src/pika_pkhash.cc @@ -0,0 +1,686 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_pkhash.h" + +#include "pstd/include/pstd_string.h" + +#include "include/pika_cache.h" +#include "include/pika_conf.h" +#include "include/pika_slot_command.h" + +extern std::unique_ptr g_pika_conf; + +void PKHExpireCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHExpire); + return; + } + key_ = argv_[1]; + auto iter = argv_.begin(); + // ttl + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &ttl_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + iter++; + iter++; + iter++; + iter++; + iter++; + if (pstd::string2int(argv_[4].data(), argv_[4].size(), &numfields_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + fields_.assign(iter, argv_.end()); +} + +void PKHExpireCmd::Do() { + std::vector rets; + s_ = db_->storage()->PKHExpire(key_, ttl_, numfields_, fields_, &rets); + if (s_.ok()) { + res_.AppendArrayLenUint64(rets.size()); + for (const auto& ret : rets) { + res_.AppendInteger(ret); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHExpireatCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHExpire); + return; + } + + key_ = argv_[1]; + auto iter = argv_.begin(); + if (pstd::string2int(argv_[2].data(), argv_[2].size(), ×tamp_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + iter++; + iter++; + iter++; + iter++; + iter++; + + if (pstd::string2int(argv_[4].data(), argv_[4].size(), &numfields_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + fields_.assign(iter, argv_.end()); +} +void PKHExpireatCmd::Do() { + std::vector rets; + s_ = db_->storage()->PKHExpireat(key_, timestamp_, numfields_, fields_, &rets); + if (s_.ok()) { + res_.AppendArrayLenUint64(rets.size()); + for (const auto& ret : rets) { + res_.AppendInteger(ret); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHExpiretimeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHExpire); + return; + } + + key_ = argv_[1]; + auto iter = argv_.begin(); + + iter++; + iter++; + iter++; + iter++; + + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &numfields_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + fields_.assign(iter, argv_.end()); +} +void PKHExpiretimeCmd::Do() { + std::vector timestamps; + s_ = db_->storage()->PKHExpiretime(key_, numfields_, fields_, ×tamps); + if (s_.ok()) { + res_.AppendArrayLenUint64(timestamps.size()); + for (const auto& timestamp : timestamps) { + res_.AppendInteger(timestamp); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHPersistCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHExpire); + return; + } + + key_ = argv_[1]; + auto iter = argv_.begin(); + iter++; + iter++; + iter++; + iter++; + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &numfields_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + fields_.assign(iter, argv_.end()); +} +void PKHPersistCmd::Do() { + std::vector rets; + s_ = db_->storage()->PKHPersist(key_, numfields_, fields_, &rets); + if (s_.ok()) { + res_.AppendArrayLenUint64(rets.size()); + for (const auto& ret : rets) { + res_.AppendInteger(ret); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHTTLCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHExpire); + return; + } + + key_ = argv_[1]; + auto iter = argv_.begin(); + iter++; + iter++; + iter++; + iter++; + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &numfields_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + fields_.assign(iter, argv_.end()); +} +void PKHTTLCmd::Do() { + std::vector ttls; + s_ = db_->storage()->PKHTTL(key_, numfields_, fields_, &ttls); + if (s_.ok()) { + res_.AppendArrayLenUint64(ttls.size()); + for (const auto& ttl : ttls) { + res_.AppendInteger(ttl); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHGetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHSet); + return; + } + key_ = argv_[1]; + field_ = argv_[2]; +} + +void PKHGetCmd::Do() { + std::string value; + s_ = db_->storage()->PKHGet(key_, field_, &value); + if (s_.ok()) { + res_.AppendStringLenUint64(value.size()); + res_.AppendContent(value); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsNotFound()) { + res_.AppendContent("$-1"); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHGetCmd::ReadCache() { + std::string value; + auto s = db_->cache()->HGet(key_, field_, &value); + if (s.ok()) { + res_.AppendStringLen(value.size()); + res_.AppendContent(value); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void PKHGetCmd::DoThroughDB() { + res_.clear(); + Do(); +} +void PKHGetCmd::DoUpdateCache() {} + +void PKHSetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHSet); + return; + } + key_ = argv_[1]; + field_ = argv_[2]; + value_ = argv_[3]; +} + +void PKHSetCmd::Do() { + int32_t ret = 0; + s_ = db_->storage()->PKHSet(key_, field_, value_, &ret); + if (s_.ok()) { + res_.AppendContent(":" + std::to_string(ret)); + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHSetCmd::DoThroughDB() { Do(); } + +void PKHSetCmd::DoUpdateCache() {} +// 下面是新的命令。 + +void PKHSetexCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHSetex); + return; + } + key_ = argv_[1]; + field_ = argv_[2]; + value_ = argv_[3]; +} + +void PKHSetexCmd::Do() { + int32_t ret = 0; + s_ = db_->storage()->PKHSet(key_, field_, value_, &ret); + if (s_.ok()) { + res_.AppendContent(":" + std::to_string(ret)); + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHSetexCmd::DoThroughDB() { Do(); } + +void PKHSetexCmd::DoUpdateCache() {} + +void PKHExistsCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHExists); + return; + } + key_ = argv_[1]; + field_ = argv_[2]; +} + +void PKHExistsCmd::Do() { + s_ = db_->storage()->HExists(key_, field_); + if (s_.ok()) { + res_.AppendContent(":1"); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsNotFound()) { + res_.AppendContent(":0"); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHExistsCmd::DoThroughDB() { Do(); } + +void PKHExistsCmd::DoUpdateCache() {} + +void PKHDelCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHDel); + return; + } + key_ = argv_[1]; + auto iter = argv_.begin(); + iter++; + iter++; + fields_.assign(iter, argv_.end()); +} + +void PKHDelCmd::Do() { + s_ = db_->storage()->HDel(key_, fields_, &deleted_); + + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(deleted_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHDelCmd::DoThroughDB() { Do(); } + +void PKHDelCmd::DoUpdateCache() {} + +void PKHLenCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHLen); + return; + } + key_ = argv_[1]; +} + +void PKHLenCmd::Do() { + int32_t len = 0; + s_ = db_->storage()->HLen(key_, &len); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(len); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, "something wrong in hlen"); + } +} + +void PKHLenCmd::DoThroughDB() { Do(); } + +void PKHLenCmd::DoUpdateCache() {} + +void PKHStrLenCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHStrlen); + return; + } + key_ = argv_[1]; + field_ = argv_[2]; +} + +void PKHStrLenCmd::Do() { + int32_t len = 0; + s_ = db_->storage()->HStrlen(key_, field_, &len); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(len); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, "something wrong in hstrlen"); + } +} + +void PKHStrLenCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void PKHStrLenCmd::DoUpdateCache() {} + + +void PKHIncrbyCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHIncrby); + return; + } + key_ = argv_[1]; + field_ = argv_[2]; + if (argv_[3].find(' ') != std::string::npos || (pstd::string2int(argv_[3].data(), argv_[3].size(), &by_) == 0)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } +} + +void PKHIncrbyCmd::Do() { + int64_t new_value = 0; + s_ = db_->storage()->HIncrby(key_, field_, by_, &new_value); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendContent(":" + std::to_string(new_value)); + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument() && + s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: hash value is not an integer") { + res_.SetRes(CmdRes::kInvalidInt); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kOverFlow); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHIncrbyCmd::DoThroughDB() { Do(); } + +void PKHIncrbyCmd::DoUpdateCache() {} + + +void PKHMSetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHMSet); + return; + } + key_ = argv_[1]; + size_t argc = argv_.size(); + if (argc % 2 != 0) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHMSet); + return; + } + size_t index = 2; + fvs_.clear(); + for (; index < argc; index += 2) { + fvs_.push_back({argv_[index], argv_[index + 1]}); + } +} + +void PKHMSetCmd::Do() { + s_ = db_->storage()->HMSet(key_, fvs_); + if (s_.ok()) { + res_.SetRes(CmdRes::kOk); + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHMSetCmd::DoThroughDB() { Do(); } + +void PKHMSetCmd::DoUpdateCache() {} + +void PKHMGetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHMGet); + return; + } + key_ = argv_[1]; + auto iter = argv_.begin(); + iter++; + iter++; + fields_.assign(iter, argv_.end()); +} + +void PKHMGetCmd::Do() { + std::vector vss; + s_ = db_->storage()->HMGet(key_, fields_, &vss); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLenUint64(vss.size()); + for (const auto& vs : vss) { + if (vs.status.ok()) { + res_.AppendStringLenUint64(vs.value.size()); + res_.AppendContent(vs.value); + } else { + res_.AppendContent("$-1"); + } + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHMGetCmd::DoThroughDB() { Do(); } + +void PKHMGetCmd::DoUpdateCache() {} + + +void PKHKeysCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHKeys); + return; + } + key_ = argv_[1]; +} + +void PKHKeysCmd::Do() { + std::vector fields; + s_ = db_->storage()->HKeys(key_, &fields); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLenUint64(fields.size()); + for (const auto& field : fields) { + res_.AppendString(field); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHKeysCmd::DoThroughDB() { Do(); } + +void PKHKeysCmd::DoUpdateCache() {} + + +void PKHValsCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHVals); + return; + } + key_ = argv_[1]; +} + +void PKHValsCmd::Do() { + std::vector values; + s_ = db_->storage()->HVals(key_, &values); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLenUint64(values.size()); + for (const auto& value : values) { + res_.AppendStringLenUint64(value.size()); + res_.AppendContent(value); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHValsCmd::DoThroughDB() { Do(); } + +void PKHValsCmd::DoUpdateCache() {} + + +void PKHGetAllCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHGetall); + return; + } + key_ = argv_[1]; +} + +void PKHGetAllCmd::Do() { + int64_t total_fv = 0; + int64_t cursor = 0; + int64_t next_cursor = 0; + size_t raw_limit = g_pika_conf->max_client_response_size(); + std::string raw; + std::vector fvs; + + do { + fvs.clear(); + s_ = db_->storage()->HScan(key_, cursor, "*", PIKA_SCAN_STEP_LENGTH, &fvs, &next_cursor); + if (!s_.ok()) { + raw.clear(); + total_fv = 0; + break; + } else { + for (const auto& fv : fvs) { + RedisAppendLenUint64(raw, fv.field.size(), "$"); + RedisAppendContent(raw, fv.field); + RedisAppendLenUint64(raw, fv.value.size(), "$"); + RedisAppendContent(raw, fv.value); + } + if (raw.size() >= raw_limit) { + res_.SetRes(CmdRes::kErrOther, "Response exceeds the max-client-response-size limit"); + return; + } + total_fv += static_cast(fvs.size()); + cursor = next_cursor; + } + } while (cursor != 0); + + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLen(total_fv * 2); + res_.AppendStringRaw(raw); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHGetAllCmd::DoThroughDB() { Do(); } + +void PKHGetAllCmd::DoUpdateCache() {} + + +void PKHScanCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHScan); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &cursor_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + size_t index = 3; + size_t argc = argv_.size(); + + while (index < argc) { + std::string opt = argv_[index]; + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "count") == 0)) { + index++; + if (index >= argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (strcasecmp(opt.data(), "match") == 0) { + pattern_ = argv_[index]; + } else if (pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } + if (count_ < 0) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } +} + +void PKHScanCmd::Do() { + int64_t next_cursor = 0; + std::vector field_values; + auto s = db_->storage()->HScan(key_, cursor_, pattern_, count_, &field_values, &next_cursor); + + if (s.ok() || s.IsNotFound()) { + res_.AppendContent("*2"); + char buf[32]; + int32_t len = pstd::ll2string(buf, sizeof(buf), next_cursor); + res_.AppendStringLen(len); + res_.AppendContent(buf); + + res_.AppendArrayLenUint64(field_values.size() * 2); + for (const auto& field_value : field_values) { + res_.AppendString(field_value.field); + res_.AppendString(field_value.value); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void PKHScanCmd::DoThroughDB() { Do(); } + +void PKHScanCmd::DoUpdateCache() {} + diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index dd41b3ea94..f6bc9bacf8 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -22,9 +22,10 @@ #include "rocksdb/status.h" #include "rocksdb/table.h" -#include "slot_indexer.h" #include "pstd/include/pstd_mutex.h" +#include "slot_indexer.h" #include "src/base_data_value_format.h" +#include "src/pkhash_data_value_format.h" namespace storage { @@ -103,7 +104,7 @@ struct KeyInfo { KeyInfo(uint64_t k, uint64_t e, uint64_t a, uint64_t i) : keys(k), expires(e), avg_ttl(a), invaild_keys(i) {} - KeyInfo operator + (const KeyInfo& info) { + KeyInfo operator+(const KeyInfo& info) { KeyInfo res; res.keys = keys + info.keys; res.expires = expires + info.expires; @@ -117,7 +118,9 @@ struct ValueStatus { std::string value; Status status; int64_t ttl_millsec; - bool operator==(const ValueStatus& vs) const { return (vs.value == value && vs.status == status && vs.ttl_millsec == ttl_millsec); } + bool operator==(const ValueStatus& vs) const { + return (vs.value == value && vs.status == status && vs.ttl_millsec == ttl_millsec); + } }; struct FieldValue { @@ -129,6 +132,15 @@ struct FieldValue { bool operator==(const FieldValue& fv) const { return (fv.field == field && fv.value == value); } }; +struct FieldValueTTL { + std::string field; + std::string value; + int64_t ttl_millsec; + bool operator==(const FieldValueTTL& fv) const { + return (fv.field == field && fv.value == value && fv.ttl_millsec == ttl_millsec); + } +}; + struct IdMessage { std::string field; std::string value; @@ -181,7 +193,7 @@ struct BGTask { class Storage { public: - Storage(); // for unit test only + Storage(); // for unit test only Storage(int db_instance_num, int slot_num, bool is_classic_mode); ~Storage(); @@ -266,13 +278,14 @@ class Storage { // determined by the offsets start and end (both are inclusive) Status Getrange(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret); - Status GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t end_offset, - std::string* ret, std::string* value, int64_t* ttl_millsec); + Status GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret, + std::string* value, int64_t* ttl_millsec); // If key already exists and is a string, this command appends the value at // the end of the string // return the length of the string after the append operation - Status Append(const Slice& key, const Slice& value, int32_t* ret, int64_t* expired_timestamp_millsec, std::string& out_new_value); + Status Append(const Slice& key, const Slice& value, int32_t* ret, int64_t* expired_timestamp_millsec, + std::string& out_new_value); // Count the number of set bits (population counting) in a string. // return the number of bits set to 1 @@ -281,7 +294,8 @@ class Storage { // Perform a bitwise operation between multiple keys // and store the result in the destination key - Status BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, std::string &value_to_dest, int64_t* ret); + Status BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, + std::string& value_to_dest, int64_t* ret); // Return the position of the first bit set to 1 or 0 in a string // BitPos key 0 @@ -416,6 +430,51 @@ class Storage { Status PKHRScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, const Slice& pattern, int32_t limit, std::vector* field_values, std::string* next_field); + // Pika Hash Commands + + Status PKHExpire(const Slice& key, int32_t ttl, int32_t numfields, const std::vector& fields, + std::vector* rets); + Status PKHExpireat(const Slice& key, int64_t timestamp, int32_t numfields, const std::vector& fields, + std::vector* rets); + Status PKHExpiretime(const Slice& key, int32_t numfields, const std::vector& fields, + std::vector* timestamps); + + Status PKHPersist(const Slice& key, int32_t numfields, const std::vector& fields, + std::vector* rets); + Status PKHTTL(const Slice& key, int32_t numfields, const std::vector& fields, + std::vector* ttls); + + Status PKHSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res); + + Status PKHGet(const Slice& key, const Slice& field, std::string* value); + + Status PKHSetex(const Slice& key, const Slice& field, const Slice& value, int32_t ttl, int32_t* ret); + + Status PKHExists(const Slice& key, const Slice& field); + + Status PKHDel(const Slice& key, const std::vector& fields, int32_t* ret); + + Status PKHLen(const Slice& key, int32_t* ret); + + Status PKHStrlen(const Slice& key, const Slice& field, int32_t* len); + + Status PKHIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret, int32_t ttl = 0); + + Status PKHMSet(const Slice& key, const std::vector& fvs); + + Status PKHMSetex(const Slice& key, const std::vector& fvts); + + Status PKHMGet(const Slice& key, const std::vector& fields, std::vector* vss); + + Status PKHKeys(const Slice& key, std::vector* fields); + + Status PKHVals(const Slice& key, std::vector* values); + + Status PKHGetall(const Slice& key, std::vector* fvts); + + Status PKHScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* fvts, int64_t* next_cursor); + // Sets Commands // Add the specified members to the set stored at key. Specified members that @@ -447,7 +506,8 @@ class Storage { // key3 = {a, c, e} // SDIFFSTORE destination key1 key2 key3 // destination = {b, d} - Status SDiffstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + Status SDiffstore(const Slice& destination, const std::vector& keys, + std::vector& value_to_dest, int32_t* ret); // Returns the members of the set resulting from the intersection of all the // given sets. @@ -470,7 +530,8 @@ class Storage { // key3 = {a, c, e} // SINTERSTORE destination key1 key2 key3 // destination = {a, c} - Status SInterstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + Status SInterstore(const Slice& destination, const std::vector& keys, + std::vector& value_to_dest, int32_t* ret); // Returns if member is a member of the set stored at key. Status SIsmember(const Slice& key, const Slice& member, int32_t* ret); @@ -479,7 +540,7 @@ class Storage { // This has the same effect as running SINTER with one argument key. Status SMembers(const Slice& key, std::vector* members); - Status SMembersWithTTL(const Slice& key, std::vector* members, int64_t * ttl_millsec); + Status SMembersWithTTL(const Slice& key, std::vector* members, int64_t* ttl_millsec); // Remove the specified members from the set stored at key. Specified members // that are not a member of this set are ignored. If key does not exist, it is @@ -529,7 +590,8 @@ class Storage { // key3 = {c, d, e} // SUNIONSTORE destination key1 key2 key3 // destination = {a, b, c, d, e} - Status SUnionstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + Status SUnionstore(const Slice& destination, const std::vector& keys, + std::vector& value_to_dest, int32_t* ret); // See SCAN for SSCAN documentation. Status SScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, @@ -552,7 +614,8 @@ class Storage { // (the head of the list), 1 being the next element and so on. Status LRange(const Slice& key, int64_t start, int64_t stop, std::vector* ret); - Status LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std::vector* ret, int64_t * ttl_millsec); + Status LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std::vector* ret, + int64_t* ttl_millsec); // Removes the first count occurrences of elements equal to value from the // list stored at key. The count argument influences the operation in the @@ -715,7 +778,7 @@ class Storage { Status ZRange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members); Status ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, std::vector* score_members, - int64_t * ttl_millsec); + int64_t* ttl_millsec); // Returns all the elements in the sorted set at key with a score between min // and max (including elements with score equal to min or max). The elements @@ -962,7 +1025,7 @@ class Storage { Status XLen(const Slice& key, int32_t& len); Status XRead(const StreamReadGroupReadArgs& args, std::vector>& results, std::vector& reserved_keys); - Status XInfo(const Slice& key, StreamInfoResult &result); + Status XInfo(const Slice& key, StreamInfoResult& result); // Keys Commands // Note: @@ -979,7 +1042,6 @@ class Storage { // return >=0 the number of keys that were removed int64_t Del(const std::vector& keys); - // Iterate over a collection of elements // return an updated cursor that the user need to use as the cursor argument // in the next call @@ -998,7 +1060,8 @@ class Storage { // Traverses the database of the specified type, removing the Key that matches // the pattern - Status PKPatternMatchDelWithRemoveKeys(const std::string& pattern, int64_t* ret, std::vector* remove_keys, const int64_t& max_count); + Status PKPatternMatchDelWithRemoveKeys(const std::string& pattern, int64_t* ret, + std::vector* remove_keys, const int64_t& max_count); // Iterate over a collection of elements // return next_key that the user need to use as the start_key argument @@ -1115,10 +1178,10 @@ class Storage { Status SetOptions(const OptionType& option_type, const std::string& db_type, const std::unordered_map& options); void SetCompactRangeOptions(const bool is_canceled); - Status EnableDymayticOptions(const OptionType& option_type, - const std::string& db_type, const std::unordered_map& options); - Status EnableAutoCompaction(const OptionType& option_type, - const std::string& db_type, const std::unordered_map& options); + Status EnableDymayticOptions(const OptionType& option_type, const std::string& db_type, + const std::unordered_map& options); + Status EnableAutoCompaction(const OptionType& option_type, const std::string& db_type, + const std::unordered_map& options); void GetRocksDBInfo(std::string& info); const StorageOptions& GetStorageOptions(); diff --git a/src/storage/include/storage/storage_define.h b/src/storage/include/storage/storage_define.h index 59fa44c495..0bf0823bba 100644 --- a/src/storage/include/storage/storage_define.h +++ b/src/storage/include/storage/storage_define.h @@ -44,6 +44,7 @@ enum ColumnFamilyIndex { kZsetsDataCF = 4, kZsetsScoreCF = 5, kStreamsDataCF = 6, + kPKHashDataCF = 7, }; const static char kNeedTransformCharacter = '\u0000'; @@ -120,16 +121,16 @@ inline const char* DecodeUserKey(const char* ptr, int length, std::string* user_ } inline const char* SeekUserkeyDelim(const char* ptr, int length) { - bool zero_ahead = false; - for (int i = 0; i < length; i++) { - if (ptr[i] == kNeedTransformCharacter && zero_ahead) { - return ptr + i + 1; - } - zero_ahead = ptr[i] == kNeedTransformCharacter; + bool zero_ahead = false; + for (int i = 0; i < length; i++) { + if (ptr[i] == kNeedTransformCharacter && zero_ahead) { + return ptr + i + 1; } - //TODO: handle invalid format - return ptr; + zero_ahead = ptr[i] == kNeedTransformCharacter; + } + // TODO: handle invalid format + return ptr; } -} // end namespace storage +} // end namespace storage #endif diff --git a/src/storage/src/base_filter.h b/src/storage/src/base_filter.h index 934b2d96d7..b2217e4055 100644 --- a/src/storage/src/base_filter.h +++ b/src/storage/src/base_filter.h @@ -13,13 +13,14 @@ #include "glog/logging.h" #include "rocksdb/compaction_filter.h" #include "src/base_data_key_format.h" -#include "src/base_value_format.h" +#include "src/base_key_format.h" #include "src/base_meta_value_format.h" +#include "src/base_value_format.h" +#include "src/debug.h" #include "src/lists_meta_value_format.h" #include "src/pika_stream_meta_value.h" #include "src/strings_value_format.h" #include "src/zsets_data_key_format.h" -#include "src/debug.h" namespace storage { @@ -54,14 +55,13 @@ class BaseMetaFilter : public rocksdb::CompactionFilter { DEBUG("[stream meta type], key: %s, entries_added = %llu, first_id: %s, last_id: %s, version: %llu", parsed_key.Key().ToString().c_str(), parsed_stream_meta_value.entries_added(), parsed_stream_meta_value.first_id().ToString().c_str(), - parsed_stream_meta_value.last_id().ToString().c_str(), - parsed_stream_meta_value.version()); + parsed_stream_meta_value.last_id().ToString().c_str(), parsed_stream_meta_value.version()); return false; } else if (type == DataType::kLists) { ParsedListsMetaValue parsed_lists_meta_value(value); - DEBUG("[list meta type], key: %s, count = %d, timestamp: %llu, cur_time: %llu, version: %llu", parsed_key.Key().ToString().c_str(), - parsed_lists_meta_value.Count(), parsed_lists_meta_value.Etime(), cur_time, - parsed_lists_meta_value.Version()); + DEBUG("[list meta type], key: %s, count = %d, timestamp: %llu, cur_time: %llu, version: %llu", + parsed_key.Key().ToString().c_str(), parsed_lists_meta_value.Count(), parsed_lists_meta_value.Etime(), + cur_time, parsed_lists_meta_value.Version()); if (parsed_lists_meta_value.Etime() != 0 && parsed_lists_meta_value.Etime() < cur_time && parsed_lists_meta_value.Version() < cur_time) { @@ -110,10 +110,7 @@ class BaseMetaFilterFactory : public rocksdb::CompactionFilterFactory { class BaseDataFilter : public rocksdb::CompactionFilter { public: BaseDataFilter(rocksdb::DB* db, std::vector* cf_handles_ptr, enum DataType type) - : db_(db), - cf_handles_ptr_(cf_handles_ptr), - type_(type) - {} + : db_(db), cf_handles_ptr_(cf_handles_ptr), type_(type) {} bool Filter(int level, const Slice& key, const rocksdb::Slice& value, std::string* new_value, bool* value_changed) const override { @@ -156,8 +153,9 @@ class BaseDataFilter : public rocksdb::CompactionFilter { ParsedStreamMetaValue parsed_stream_meta_value(meta_value); meta_not_found_ = false; cur_meta_version_ = parsed_stream_meta_value.version(); - cur_meta_etime_ = 0; // stream do not support ttl - } else if (type == DataType::kHashes || type == DataType::kSets || type == DataType::kZSets) { + cur_meta_etime_ = 0; // stream do not support ttl + } else if (type == DataType::kHashes || type == DataType::kSets || type == DataType::kZSets || + type == DataType::kPKHashes) { ParsedBaseMetaValue parsed_base_meta_value(&meta_value); meta_not_found_ = false; cur_meta_version_ = parsed_base_meta_value.Version(); @@ -226,7 +224,8 @@ class BaseDataFilter : public rocksdb::CompactionFilter { class BaseDataFilterFactory : public rocksdb::CompactionFilterFactory { public: - BaseDataFilterFactory(rocksdb::DB** db_ptr, std::vector* handles_ptr, enum DataType type) + BaseDataFilterFactory(rocksdb::DB** db_ptr, std::vector* handles_ptr, + enum DataType type) : db_ptr_(db_ptr), cf_handles_ptr_(handles_ptr), type_(type) {} std::unique_ptr CreateCompactionFilter( const rocksdb::CompactionFilter::Context& context) override { @@ -245,6 +244,11 @@ using HashesMetaFilterFactory = BaseMetaFilterFactory; using HashesDataFilter = BaseDataFilter; using HashesDataFilterFactory = BaseDataFilterFactory; +using PKHashesMetaFilter = BaseMetaFilter; +using PKHashesMetaFilterFactory = BaseMetaFilterFactory; +using PKHashesDataFilter = BaseDataFilter; +using PKHashesDataFilterFactory = BaseDataFilterFactory; + using SetsMetaFilter = BaseMetaFilter; using SetsMetaFilterFactory = BaseMetaFilterFactory; using SetsMemberFilter = BaseDataFilter; diff --git a/src/storage/src/base_value_format.h b/src/storage/src/base_value_format.h index 14e0175f46..559dc8c3f3 100644 --- a/src/storage/src/base_value_format.h +++ b/src/storage/src/base_value_format.h @@ -18,11 +18,21 @@ namespace storage { -enum class DataType : uint8_t { kStrings = 0, kHashes = 1, kSets = 2, kLists = 3, kZSets = 4, kStreams = 5, kNones = 6, kAll = 7 }; +enum class DataType : uint8_t { + kStrings = 0, + kHashes = 1, + kSets = 2, + kLists = 3, + kZSets = 4, + kStreams = 5, + kPKHashes = 6, + kNones = 7, + kAll = 8, +}; constexpr int DataTypeNum = int(DataType::kNones); -constexpr char DataTypeTag[] = { 'k', 'h', 's', 'l', 'z', 'x', 'n', 'a'}; -constexpr char* DataTypeStrings[] = { "string", "hash", "set", "list", "zset", "streams", "none", "all"}; +constexpr char DataTypeTag[] = {'k', 'h', 's', 'l', 'z', 'x', 'e', 'n', 'a'}; +constexpr char* DataTypeStrings[] = {"string", "hash", "set", "list", "zset", "streams", "pkhash", "none", "all"}; constexpr char* DataTypeToString(DataType type) { if (type < DataType::kStrings || type > DataType::kNones) { @@ -39,12 +49,12 @@ constexpr char DataTypeToTag(DataType type) { } class InternalValue { -public: - explicit InternalValue(DataType type, const rocksdb::Slice& user_value) : type_(type), user_value_(user_value) { - ctime_ = pstd::NowMillis(); - } + public: + explicit InternalValue(DataType type, const rocksdb::Slice& user_value) : type_(type), user_value_(user_value) { + ctime_ = pstd::NowMillis(); + } - virtual ~InternalValue() { + virtual ~InternalValue() { if (start_ != space_) { delete[] start_; } @@ -74,7 +84,7 @@ class InternalValue { virtual rocksdb::Slice Encode() = 0; -protected: + protected: char space_[200]; char* start_ = nullptr; rocksdb::Slice user_value_; @@ -86,7 +96,7 @@ class InternalValue { }; class ParsedInternalValue { -public: + public: // Use this constructor after rocksdb::DB::Get(), since we use this in // the implement of user interfaces and may need to modify the // original value suffix, so the value_ must point to the string @@ -137,23 +147,21 @@ class ParsedInternalValue { return etime_ < unix_time; } - virtual bool IsValid() { - return !IsStale(); - } + virtual bool IsValid() { return !IsStale(); } virtual void StripSuffix() = 0; -protected: + protected: virtual void SetVersionToValue() = 0; virtual void SetEtimeToValue() = 0; virtual void SetCtimeToValue() = 0; std::string* value_ = nullptr; rocksdb::Slice user_value_; - uint64_t version_ = 0 ; + uint64_t version_ = 0; uint64_t ctime_ = 0; uint64_t etime_ = 0; DataType type_; - char reserve_[16] = {0}; //unused + char reserve_[16] = {0}; // unused }; } // namespace storage diff --git a/src/storage/src/pkhash_data_value_format.h b/src/storage/src/pkhash_data_value_format.h new file mode 100644 index 0000000000..9479d9ab7e --- /dev/null +++ b/src/storage/src/pkhash_data_value_format.h @@ -0,0 +1,135 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_PKHASH_DATA_VALUE_FORMAT_H_ +#define SRC_PKHASH_DATA_VALUE_FORMAT_H_ + +#include + +#include "rocksdb/env.h" +#include "rocksdb/slice.h" + +#include "base_value_format.h" +#include "src/coding.h" +#include "src/mutex.h" +#include "storage/storage_define.h" + +namespace storage { +/* + * pika expire hash data value format + * use etime to store expire time + * | value | reserve | ctime | etime | + * | | 16B | 8B | 8B | + */ +class PKHashDataValue : public InternalValue { + public: + /* + * The header of the Value field is initially initialized to knulltype + */ + explicit PKHashDataValue(const rocksdb::Slice& user_value) : InternalValue(DataType::kNones, user_value) {} + virtual ~PKHashDataValue() {} + + virtual rocksdb::Slice Encode() { + size_t usize = user_value_.size(); + size_t needed = usize + kSuffixReserveLength + kTimestampLength * 2; + char* dst = ReAllocIfNeeded(needed); + char* start_pos = dst; + + memcpy(dst, user_value_.data(), user_value_.size()); + dst += user_value_.size(); + memcpy(dst, reserve_, kSuffixReserveLength); + dst += kSuffixReserveLength; + + uint64_t ctime = ctime_ > 0 ? (ctime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, ctime); + dst += kTimestampLength; + + uint64_t etime = etime_ > 0 ? (etime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, etime); + dst += kTimestampLength; + + return rocksdb::Slice(start_pos, needed); + } + + private: + const size_t kDefaultValueSuffixLength = kSuffixReserveLength + kTimestampLength * 2; +}; + +class ParsedPKHashDataValue : public ParsedInternalValue { + public: + // Use this constructor after rocksdb::DB::Get(), since we use this in + // the implement of user interfaces and may need to modify the + // original value suffix, so the value_ must point to the string + explicit ParsedPKHashDataValue(std::string* value) : ParsedInternalValue(value) { + if (value_->size() >= kPKHashDataValueSuffixLength) { + user_value_ = rocksdb::Slice(value_->data(), value_->size() - kPKHashDataValueSuffixLength); + memcpy(reserve_, value_->data() + user_value_.size(), kSuffixReserveLength); + uint64_t ctime = DecodeFixed64(value->data() + user_value_.size() + kSuffixReserveLength); + ctime_ = (ctime & ~(1ULL << 63)); + uint64_t etime = DecodeFixed64(value->data() + user_value_.size() + kSuffixReserveLength + kTimestampLength); + etime_ = (etime & ~(1ULL << 63)); + } + } + + // Use this constructor in rocksdb::CompactionFilter::Filter(), + // since we use this in Compaction process, all we need to do is parsing + // the rocksdb::Slice, so don't need to modify the original value, value_ can be + // set to nullptr + explicit ParsedPKHashDataValue(const rocksdb::Slice& value) : ParsedInternalValue(value) { + if (value.size() >= kPKHashDataValueSuffixLength) { + user_value_ = rocksdb::Slice(value.data(), value.size() - kPKHashDataValueSuffixLength); + memcpy(reserve_, value.data() + user_value_.size(), kSuffixReserveLength); + uint64_t ctime = DecodeFixed64(value.data() + user_value_.size() + kSuffixReserveLength); + ctime_ = (ctime & ~(1ULL << 63)); + uint64_t etime = DecodeFixed64(value.data() + user_value_.size() + kSuffixReserveLength + kTimestampLength); + etime_ = (etime & ~(1ULL << 63)); + } + } + + virtual ~ParsedPKHashDataValue() = default; + + void SetEtimeToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kTimestampLength; + uint64_t etime = etime_ > 0 ? (etime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, etime); + } + } + + void SetCtimeToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kTimestampLength - kTimestampLength; + uint64_t ctime = ctime_ > 0 ? (ctime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, ctime); + } + } + + void SetReserveToValue() { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kPKHashDataValueSuffixLength; + memcpy(dst, reserve_, kSuffixReserveLength); + } + } + + virtual void StripSuffix() override { + if (value_) { + value_->erase(value_->size() - kPKHashDataValueSuffixLength, kPKHashDataValueSuffixLength); + } + } + + void SetTimestamp(int64_t timestamp) { + etime_ = timestamp; + SetEtimeToValue(); + } + + protected: + virtual void SetVersionToValue() override {}; + + private: + const size_t kPKHashDataValueSuffixLength = kSuffixReserveLength + kTimestampLength * 2; +}; + +} // namespace storage +#endif // SRC_BASE_VALUE_FORMAT_H_ diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index 077fe15dd0..c3b29f7c93 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -7,9 +7,9 @@ #include "rocksdb/env.h" -#include "src/redis.h" -#include "src/lists_filter.h" #include "src/base_filter.h" +#include "src/lists_filter.h" +#include "src/redis.h" #include "src/zsets_filter.h" #include "pstd/include/pstd_defer.h" @@ -28,7 +28,8 @@ rocksdb::Comparator* ZSetsScoreKeyComparator() { } Redis::Redis(Storage* const s, int32_t index) - : storage_(s), index_(index), + : storage_(s), + index_(index), lock_mgr_(std::make_shared(1000, 0, std::make_shared())), small_compaction_threshold_(5000), small_compaction_duration_threshold_(10000) { @@ -39,7 +40,7 @@ Redis::Redis(Storage* const s, int32_t index) default_compact_range_options_.change_level = true; spop_counts_store_->SetCapacity(1000); scan_cursors_store_->SetCapacity(5000); - //env_ = rocksdb::Env::Instance(); + // env_ = rocksdb::Env::Instance(); handles_.clear(); } @@ -91,16 +92,28 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ // hash column-family options rocksdb::ColumnFamilyOptions hash_data_cf_ops(storage_options.options); - hash_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, DataType::kHashes); + hash_data_cf_ops.compaction_filter_factory = + std::make_shared(&db_, &handles_, DataType::kHashes); rocksdb::BlockBasedTableOptions hash_data_cf_table_ops(table_ops); if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { hash_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); } hash_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(hash_data_cf_table_ops)); + // pika hash column-family options + rocksdb::ColumnFamilyOptions pika_hash_data_cf_ops(storage_options.options); + pika_hash_data_cf_ops.compaction_filter_factory = + std::make_shared(&db_, &handles_, DataType::kHashes); + rocksdb::BlockBasedTableOptions pika_hash_data_cf_table_ops(table_ops); + if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { + pika_hash_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + pika_hash_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(pika_hash_data_cf_table_ops)); + // list column-family options rocksdb::ColumnFamilyOptions list_data_cf_ops(storage_options.options); - list_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, DataType::kLists); + list_data_cf_ops.compaction_filter_factory = + std::make_shared(&db_, &handles_, DataType::kLists); list_data_cf_ops.comparator = ListsDataKeyComparator(); rocksdb::BlockBasedTableOptions list_data_cf_table_ops(table_ops); @@ -111,7 +124,8 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ // set column-family options rocksdb::ColumnFamilyOptions set_data_cf_ops(storage_options.options); - set_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, DataType::kSets); + set_data_cf_ops.compaction_filter_factory = + std::make_shared(&db_, &handles_, DataType::kSets); rocksdb::BlockBasedTableOptions set_data_cf_table_ops(table_ops); if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { set_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); @@ -121,8 +135,10 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ // zset column-family options rocksdb::ColumnFamilyOptions zset_data_cf_ops(storage_options.options); rocksdb::ColumnFamilyOptions zset_score_cf_ops(storage_options.options); - zset_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, DataType::kZSets); - zset_score_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, DataType::kZSets); + zset_data_cf_ops.compaction_filter_factory = + std::make_shared(&db_, &handles_, DataType::kZSets); + zset_score_cf_ops.compaction_filter_factory = + std::make_shared(&db_, &handles_, DataType::kZSets); zset_score_cf_ops.comparator = ZSetsScoreKeyComparator(); rocksdb::BlockBasedTableOptions zset_meta_cf_table_ops(table_ops); @@ -136,7 +152,8 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ // stream column-family options rocksdb::ColumnFamilyOptions stream_data_cf_ops(storage_options.options); - stream_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, DataType::kStreams); + stream_data_cf_ops.compaction_filter_factory = + std::make_shared(&db_, &handles_, DataType::kStreams); rocksdb::BlockBasedTableOptions stream_data_cf_table_ops(table_ops); if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { stream_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); @@ -148,6 +165,8 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, meta_cf_ops); // hash CF column_families.emplace_back("hash_data_cf", hash_data_cf_ops); + // pika hash CF + column_families.emplace_back("pika_hash_data_cf", pika_hash_data_cf_ops); // set CF column_families.emplace_back("set_data_cf", set_data_cf_ops); // list CF @@ -162,7 +181,8 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ return rocksdb::DB::Open(ops, db_path, column_families, &handles_, &db_); } -Status Redis::GetScanStartPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, std::string* start_point) { +Status Redis::GetScanStartPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, + std::string* start_point) { std::string index_key; index_key.append(1, DataTypeTag[static_cast(type)]); index_key.append("_"); @@ -198,6 +218,7 @@ Status Redis::SetMaxCacheStatisticKeys(size_t max_cache_statistic_keys) { Status Redis::CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end) { db_->CompactRange(default_compact_range_options_, begin, end); db_->CompactRange(default_compact_range_options_, handles_[kHashesDataCF], begin, end); + db_->CompactRange(default_compact_range_options_, handles_[kPKHashDataCF], begin, end); db_->CompactRange(default_compact_range_options_, handles_[kSetsDataCF], begin, end); db_->CompactRange(default_compact_range_options_, handles_[kListsDataCF], begin, end); db_->CompactRange(default_compact_range_options_, handles_[kZsetsDataCF], begin, end); @@ -474,7 +495,8 @@ Status Redis::UpdateSpecificKeyDuration(const DataType& dtype, const std::string return Status::OK(); } -Status Redis::AddCompactKeyTaskIfNeeded(const DataType& dtype, const std::string& key, uint64_t total, uint64_t duration) { +Status Redis::AddCompactKeyTaskIfNeeded(const DataType& dtype, const std::string& key, uint64_t total, + uint64_t duration) { if (total < small_compaction_threshold_ || duration < small_compaction_duration_threshold_) { return Status::OK(); } else { @@ -504,208 +526,209 @@ Status Redis::SetOptions(const OptionType& option_type, const std::unordered_map } void Redis::GetRocksDBInfo(std::string& info, const char* prefix) { - std::ostringstream string_stream; - string_stream << "#" << prefix << "RocksDB" << "\r\n"; + std::ostringstream string_stream; + string_stream << "#" << prefix << "RocksDB" << "\r\n"; - auto write_aggregated_int_property=[&](const Slice& property, const char *metric) { - uint64_t value = 0; - db_->GetAggregatedIntProperty(property, &value); - string_stream << prefix << metric << ':' << value << "\r\n"; - }; + auto write_aggregated_int_property = [&](const Slice& property, const char* metric) { + uint64_t value = 0; + db_->GetAggregatedIntProperty(property, &value); + string_stream << prefix << metric << ':' << value << "\r\n"; + }; - auto write_property=[&](const Slice& property, const char *metric) { - if (handles_.size() == 0) { + auto write_property = [&](const Slice& property, const char* metric) { + if (handles_.size() == 0) { + std::string value; + db_->GetProperty(db_->DefaultColumnFamily(), property, &value); + string_stream << prefix << metric << "_" << db_->DefaultColumnFamily()->GetName() << ':' << value << "\r\n"; + } else { + for (auto handle : handles_) { std::string value; - db_->GetProperty(db_->DefaultColumnFamily(), property, &value); - string_stream << prefix << metric << "_" << db_->DefaultColumnFamily()->GetName() << ':' << value << "\r\n"; - } else { - for (auto handle : handles_) { - std::string value; - db_->GetProperty(handle, property, &value); - string_stream << prefix << metric << "_" << handle->GetName() << ':' << value << "\r\n"; - } + db_->GetProperty(handle, property, &value); + string_stream << prefix << metric << "_" << handle->GetName() << ':' << value << "\r\n"; } - }; + } + }; - auto write_ticker_count = [&](uint32_t tick_type, const char *metric) { - if (db_statistics_ == nullptr) { - return; - } - uint64_t count = db_statistics_->getTickerCount(tick_type); - string_stream << prefix << metric << ':' << count << "\r\n"; - }; - - auto mapToString=[&](const std::map& map_data, const char *prefix) { - for (const auto& kv : map_data) { - std::string str_data; - str_data += kv.first + ": " + kv.second + "\r\n"; - string_stream << prefix << str_data; - } - }; + auto write_ticker_count = [&](uint32_t tick_type, const char* metric) { + if (db_statistics_ == nullptr) { + return; + } + uint64_t count = db_statistics_->getTickerCount(tick_type); + string_stream << prefix << metric << ':' << count << "\r\n"; + }; + + auto mapToString = [&](const std::map& map_data, const char* prefix) { + for (const auto& kv : map_data) { + std::string str_data; + str_data += kv.first + ": " + kv.second + "\r\n"; + string_stream << prefix << str_data; + } + }; + // memtables num + write_aggregated_int_property(rocksdb::DB::Properties::kNumImmutableMemTable, "num_immutable_mem_table"); + write_aggregated_int_property(rocksdb::DB::Properties::kNumImmutableMemTableFlushed, + "num_immutable_mem_table_flushed"); + write_aggregated_int_property(rocksdb::DB::Properties::kMemTableFlushPending, "mem_table_flush_pending"); + write_aggregated_int_property(rocksdb::DB::Properties::kNumRunningFlushes, "num_running_flushes"); + + // compaction + write_aggregated_int_property(rocksdb::DB::Properties::kCompactionPending, "compaction_pending"); + write_aggregated_int_property(rocksdb::DB::Properties::kNumRunningCompactions, "num_running_compactions"); + + // background errors + write_aggregated_int_property(rocksdb::DB::Properties::kBackgroundErrors, "background_errors"); + + // memtables size + write_aggregated_int_property(rocksdb::DB::Properties::kCurSizeActiveMemTable, "cur_size_active_mem_table"); + write_aggregated_int_property(rocksdb::DB::Properties::kCurSizeAllMemTables, "cur_size_all_mem_tables"); + write_aggregated_int_property(rocksdb::DB::Properties::kSizeAllMemTables, "size_all_mem_tables"); + + // keys + write_aggregated_int_property(rocksdb::DB::Properties::kEstimateNumKeys, "estimate_num_keys"); + + // table readers mem + write_aggregated_int_property(rocksdb::DB::Properties::kEstimateTableReadersMem, "estimate_table_readers_mem"); + + // snapshot + write_aggregated_int_property(rocksdb::DB::Properties::kNumSnapshots, "num_snapshots"); + + // version + write_aggregated_int_property(rocksdb::DB::Properties::kNumLiveVersions, "num_live_versions"); + write_aggregated_int_property(rocksdb::DB::Properties::kCurrentSuperVersionNumber, "current_super_version_number"); + + // live data size + write_aggregated_int_property(rocksdb::DB::Properties::kEstimateLiveDataSize, "estimate_live_data_size"); + + // sst files + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix + "0", "num_files_at_level0"); + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix + "1", "num_files_at_level1"); + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix + "2", "num_files_at_level2"); + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix + "3", "num_files_at_level3"); + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix + "4", "num_files_at_level4"); + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix + "5", "num_files_at_level5"); + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix + "6", "num_files_at_level6"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix + "0", "compression_ratio_at_level0"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix + "1", "compression_ratio_at_level1"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix + "2", "compression_ratio_at_level2"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix + "3", "compression_ratio_at_level3"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix + "4", "compression_ratio_at_level4"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix + "5", "compression_ratio_at_level5"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix + "6", "compression_ratio_at_level6"); + write_aggregated_int_property(rocksdb::DB::Properties::kTotalSstFilesSize, "total_sst_files_size"); + write_aggregated_int_property(rocksdb::DB::Properties::kLiveSstFilesSize, "live_sst_files_size"); + + // pending compaction bytes + write_aggregated_int_property(rocksdb::DB::Properties::kEstimatePendingCompactionBytes, + "estimate_pending_compaction_bytes"); + + // block cache + write_aggregated_int_property(rocksdb::DB::Properties::kBlockCacheCapacity, "block_cache_capacity"); + write_aggregated_int_property(rocksdb::DB::Properties::kBlockCacheUsage, "block_cache_usage"); + write_aggregated_int_property(rocksdb::DB::Properties::kBlockCachePinnedUsage, "block_cache_pinned_usage"); + + // blob files + write_aggregated_int_property(rocksdb::DB::Properties::kNumBlobFiles, "num_blob_files"); + write_aggregated_int_property(rocksdb::DB::Properties::kBlobStats, "blob_stats"); + write_aggregated_int_property(rocksdb::DB::Properties::kTotalBlobFileSize, "total_blob_file_size"); + write_aggregated_int_property(rocksdb::DB::Properties::kLiveBlobFileSize, "live_blob_file_size"); + + write_aggregated_int_property(rocksdb::DB::Properties::kBlobCacheCapacity, "blob_cache_capacity"); + write_aggregated_int_property(rocksdb::DB::Properties::kBlobCacheUsage, "blob_cache_usage"); + write_aggregated_int_property(rocksdb::DB::Properties::kBlobCachePinnedUsage, "blob_cache_pinned_usage"); + + // rocksdb ticker + { // memtables num - write_aggregated_int_property(rocksdb::DB::Properties::kNumImmutableMemTable, "num_immutable_mem_table"); - write_aggregated_int_property(rocksdb::DB::Properties::kNumImmutableMemTableFlushed, "num_immutable_mem_table_flushed"); - write_aggregated_int_property(rocksdb::DB::Properties::kMemTableFlushPending, "mem_table_flush_pending"); - write_aggregated_int_property(rocksdb::DB::Properties::kNumRunningFlushes, "num_running_flushes"); + write_ticker_count(rocksdb::Tickers::MEMTABLE_HIT, "memtable_hit"); + write_ticker_count(rocksdb::Tickers::MEMTABLE_MISS, "memtable_miss"); + + write_ticker_count(rocksdb::Tickers::BYTES_WRITTEN, "bytes_written"); + write_ticker_count(rocksdb::Tickers::BYTES_READ, "bytes_read"); + write_ticker_count(rocksdb::Tickers::ITER_BYTES_READ, "iter_bytes_read"); + write_ticker_count(rocksdb::Tickers::GET_HIT_L0, "get_hit_l0"); + write_ticker_count(rocksdb::Tickers::GET_HIT_L1, "get_hit_l1"); + write_ticker_count(rocksdb::Tickers::GET_HIT_L2_AND_UP, "get_hit_l2_and_up"); + + write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_USEFUL, "bloom_filter_useful"); + write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_FULL_POSITIVE, "bloom_filter_full_positive"); + write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_FULL_TRUE_POSITIVE, "bloom_filter_full_true_positive"); + write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_PREFIX_CHECKED, "bloom_filter_prefix_checked"); + write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL, "bloom_filter_prefix_useful"); // compaction - write_aggregated_int_property(rocksdb::DB::Properties::kCompactionPending, "compaction_pending"); - write_aggregated_int_property(rocksdb::DB::Properties::kNumRunningCompactions, "num_running_compactions"); - - // background errors - write_aggregated_int_property(rocksdb::DB::Properties::kBackgroundErrors, "background_errors"); - - // memtables size - write_aggregated_int_property(rocksdb::DB::Properties::kCurSizeActiveMemTable, "cur_size_active_mem_table"); - write_aggregated_int_property(rocksdb::DB::Properties::kCurSizeAllMemTables, "cur_size_all_mem_tables"); - write_aggregated_int_property(rocksdb::DB::Properties::kSizeAllMemTables, "size_all_mem_tables"); + write_ticker_count(rocksdb::Tickers::COMPACTION_KEY_DROP_NEWER_ENTRY, "compaction_key_drop_newer_entry"); + write_ticker_count(rocksdb::Tickers::COMPACTION_KEY_DROP_OBSOLETE, "compaction_key_drop_obsolete"); + write_ticker_count(rocksdb::Tickers::COMPACTION_KEY_DROP_USER, "compaction_key_drop_user"); + write_ticker_count(rocksdb::Tickers::COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE, + "compaction_optimized_del_drop_obsolete"); + write_ticker_count(rocksdb::Tickers::COMPACT_READ_BYTES, "compact_read_bytes"); + write_ticker_count(rocksdb::Tickers::COMPACT_WRITE_BYTES, "compact_write_bytes"); + write_ticker_count(rocksdb::Tickers::FLUSH_WRITE_BYTES, "flush_write_bytes"); // keys - write_aggregated_int_property(rocksdb::DB::Properties::kEstimateNumKeys, "estimate_num_keys"); + write_ticker_count(rocksdb::Tickers::NUMBER_KEYS_READ, "number_keys_read"); + write_ticker_count(rocksdb::Tickers::NUMBER_KEYS_WRITTEN, "number_keys_written"); + write_ticker_count(rocksdb::Tickers::NUMBER_KEYS_UPDATED, "number_keys_updated"); + write_ticker_count(rocksdb::Tickers::NUMBER_OF_RESEEKS_IN_ITERATION, "number_of_reseeks_in_iteration"); + + write_ticker_count(rocksdb::Tickers::NUMBER_DB_SEEK, "number_db_seek"); + write_ticker_count(rocksdb::Tickers::NUMBER_DB_NEXT, "number_db_next"); + write_ticker_count(rocksdb::Tickers::NUMBER_DB_PREV, "number_db_prev"); + write_ticker_count(rocksdb::Tickers::NUMBER_DB_SEEK_FOUND, "number_db_seek_found"); + write_ticker_count(rocksdb::Tickers::NUMBER_DB_NEXT_FOUND, "number_db_next_found"); + write_ticker_count(rocksdb::Tickers::NUMBER_DB_PREV_FOUND, "number_db_prev_found"); + write_ticker_count(rocksdb::Tickers::LAST_LEVEL_READ_BYTES, "last_level_read_bytes"); + write_ticker_count(rocksdb::Tickers::LAST_LEVEL_READ_COUNT, "last_level_read_count"); + write_ticker_count(rocksdb::Tickers::NON_LAST_LEVEL_READ_BYTES, "non_last_level_read_bytes"); + write_ticker_count(rocksdb::Tickers::NON_LAST_LEVEL_READ_COUNT, "non_last_level_read_count"); - // table readers mem - write_aggregated_int_property(rocksdb::DB::Properties::kEstimateTableReadersMem, "estimate_table_readers_mem"); - - // snapshot - write_aggregated_int_property(rocksdb::DB::Properties::kNumSnapshots, "num_snapshots"); - - // version - write_aggregated_int_property(rocksdb::DB::Properties::kNumLiveVersions, "num_live_versions"); - write_aggregated_int_property(rocksdb::DB::Properties::kCurrentSuperVersionNumber, "current_super_version_number"); - - // live data size - write_aggregated_int_property(rocksdb::DB::Properties::kEstimateLiveDataSize, "estimate_live_data_size"); + // background errors + write_ticker_count(rocksdb::Tickers::STALL_MICROS, "stall_micros"); // sst files - write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"0", "num_files_at_level0"); - write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"1", "num_files_at_level1"); - write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"2", "num_files_at_level2"); - write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"3", "num_files_at_level3"); - write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"4", "num_files_at_level4"); - write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"5", "num_files_at_level5"); - write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"6", "num_files_at_level6"); - write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"0", "compression_ratio_at_level0"); - write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"1", "compression_ratio_at_level1"); - write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"2", "compression_ratio_at_level2"); - write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"3", "compression_ratio_at_level3"); - write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"4", "compression_ratio_at_level4"); - write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"5", "compression_ratio_at_level5"); - write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"6", "compression_ratio_at_level6"); - write_aggregated_int_property(rocksdb::DB::Properties::kTotalSstFilesSize, "total_sst_files_size"); - write_aggregated_int_property(rocksdb::DB::Properties::kLiveSstFilesSize, "live_sst_files_size"); - - // pending compaction bytes - write_aggregated_int_property(rocksdb::DB::Properties::kEstimatePendingCompactionBytes, "estimate_pending_compaction_bytes"); + write_ticker_count(rocksdb::Tickers::NO_FILE_OPENS, "no_file_opens"); + write_ticker_count(rocksdb::Tickers::NO_FILE_ERRORS, "no_file_errors"); // block cache - write_aggregated_int_property(rocksdb::DB::Properties::kBlockCacheCapacity, "block_cache_capacity"); - write_aggregated_int_property(rocksdb::DB::Properties::kBlockCacheUsage, "block_cache_usage"); - write_aggregated_int_property(rocksdb::DB::Properties::kBlockCachePinnedUsage, "block_cache_pinned_usage"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_INDEX_HIT, "block_cache_index_hit"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_INDEX_MISS, "block_cache_index_miss"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_FILTER_HIT, "block_cache_filter_hit"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_FILTER_MISS, "block_cache_filter_miss"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_DATA_HIT, "block_cache_data_hit"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_DATA_MISS, "block_cache_data_miss"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_BYTES_READ, "block_cache_bytes_read"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_BYTES_WRITE, "block_cache_bytes_write"); // blob files - write_aggregated_int_property(rocksdb::DB::Properties::kNumBlobFiles, "num_blob_files"); - write_aggregated_int_property(rocksdb::DB::Properties::kBlobStats, "blob_stats"); - write_aggregated_int_property(rocksdb::DB::Properties::kTotalBlobFileSize, "total_blob_file_size"); - write_aggregated_int_property(rocksdb::DB::Properties::kLiveBlobFileSize, "live_blob_file_size"); - - write_aggregated_int_property(rocksdb::DB::Properties::kBlobCacheCapacity, "blob_cache_capacity"); - write_aggregated_int_property(rocksdb::DB::Properties::kBlobCacheUsage, "blob_cache_usage"); - write_aggregated_int_property(rocksdb::DB::Properties::kBlobCachePinnedUsage, "blob_cache_pinned_usage"); - - //rocksdb ticker - { - // memtables num - write_ticker_count(rocksdb::Tickers::MEMTABLE_HIT, "memtable_hit"); - write_ticker_count(rocksdb::Tickers::MEMTABLE_MISS, "memtable_miss"); - - write_ticker_count(rocksdb::Tickers::BYTES_WRITTEN, "bytes_written"); - write_ticker_count(rocksdb::Tickers::BYTES_READ, "bytes_read"); - write_ticker_count(rocksdb::Tickers::ITER_BYTES_READ, "iter_bytes_read"); - write_ticker_count(rocksdb::Tickers::GET_HIT_L0, "get_hit_l0"); - write_ticker_count(rocksdb::Tickers::GET_HIT_L1, "get_hit_l1"); - write_ticker_count(rocksdb::Tickers::GET_HIT_L2_AND_UP, "get_hit_l2_and_up"); - - write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_USEFUL, "bloom_filter_useful"); - write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_FULL_POSITIVE, "bloom_filter_full_positive"); - write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_FULL_TRUE_POSITIVE, "bloom_filter_full_true_positive"); - write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_PREFIX_CHECKED, "bloom_filter_prefix_checked"); - write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL, "bloom_filter_prefix_useful"); - - // compaction - write_ticker_count(rocksdb::Tickers::COMPACTION_KEY_DROP_NEWER_ENTRY, "compaction_key_drop_newer_entry"); - write_ticker_count(rocksdb::Tickers::COMPACTION_KEY_DROP_OBSOLETE, "compaction_key_drop_obsolete"); - write_ticker_count(rocksdb::Tickers::COMPACTION_KEY_DROP_USER, "compaction_key_drop_user"); - write_ticker_count(rocksdb::Tickers::COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE, "compaction_optimized_del_drop_obsolete"); - write_ticker_count(rocksdb::Tickers::COMPACT_READ_BYTES, "compact_read_bytes"); - write_ticker_count(rocksdb::Tickers::COMPACT_WRITE_BYTES, "compact_write_bytes"); - write_ticker_count(rocksdb::Tickers::FLUSH_WRITE_BYTES, "flush_write_bytes"); - - // keys - write_ticker_count(rocksdb::Tickers::NUMBER_KEYS_READ, "number_keys_read"); - write_ticker_count(rocksdb::Tickers::NUMBER_KEYS_WRITTEN, "number_keys_written"); - write_ticker_count(rocksdb::Tickers::NUMBER_KEYS_UPDATED, "number_keys_updated"); - write_ticker_count(rocksdb::Tickers::NUMBER_OF_RESEEKS_IN_ITERATION, "number_of_reseeks_in_iteration"); - - write_ticker_count(rocksdb::Tickers::NUMBER_DB_SEEK, "number_db_seek"); - write_ticker_count(rocksdb::Tickers::NUMBER_DB_NEXT, "number_db_next"); - write_ticker_count(rocksdb::Tickers::NUMBER_DB_PREV, "number_db_prev"); - write_ticker_count(rocksdb::Tickers::NUMBER_DB_SEEK_FOUND, "number_db_seek_found"); - write_ticker_count(rocksdb::Tickers::NUMBER_DB_NEXT_FOUND, "number_db_next_found"); - write_ticker_count(rocksdb::Tickers::NUMBER_DB_PREV_FOUND, "number_db_prev_found"); - write_ticker_count(rocksdb::Tickers::LAST_LEVEL_READ_BYTES, "last_level_read_bytes"); - write_ticker_count(rocksdb::Tickers::LAST_LEVEL_READ_COUNT, "last_level_read_count"); - write_ticker_count(rocksdb::Tickers::NON_LAST_LEVEL_READ_BYTES, "non_last_level_read_bytes"); - write_ticker_count(rocksdb::Tickers::NON_LAST_LEVEL_READ_COUNT, "non_last_level_read_count"); - - // background errors - write_ticker_count(rocksdb::Tickers::STALL_MICROS, "stall_micros"); - - // sst files - write_ticker_count(rocksdb::Tickers::NO_FILE_OPENS, "no_file_opens"); - write_ticker_count(rocksdb::Tickers::NO_FILE_ERRORS, "no_file_errors"); - - // block cache - write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_INDEX_HIT, "block_cache_index_hit"); - write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_INDEX_MISS, "block_cache_index_miss"); - write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_FILTER_HIT, "block_cache_filter_hit"); - write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_FILTER_MISS, "block_cache_filter_miss"); - write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_DATA_HIT, "block_cache_data_hit"); - write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_DATA_MISS, "block_cache_data_miss"); - write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_BYTES_READ, "block_cache_bytes_read"); - write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_BYTES_WRITE, "block_cache_bytes_write"); - - // blob files - write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_KEYS_WRITTEN, "blob_db_num_keys_written"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_KEYS_READ, "blob_db_num_keys_read"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_BYTES_WRITTEN, "blob_db_bytes_written"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_BYTES_READ, "blob_db_bytes_read"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_SEEK, "blob_db_num_seek"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_NEXT, "blob_db_num_next"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_PREV, "blob_db_num_prev"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_BLOB_FILE_BYTES_WRITTEN, "blob_db_blob_file_bytes_written"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_BLOB_FILE_BYTES_READ, "blob_db_blob_file_bytes_read"); - - write_ticker_count(rocksdb::Tickers::BLOB_DB_GC_NUM_FILES, "blob_db_gc_num_files"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_GC_NUM_NEW_FILES, "blob_db_gc_num_new_files"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_GC_NUM_KEYS_RELOCATED, "blob_db_gc_num_keys_relocated"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_GC_BYTES_RELOCATED, "blob_db_gc_bytes_relocated"); - - write_ticker_count(rocksdb::Tickers::BLOB_DB_CACHE_MISS, "blob_db_cache_miss"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_CACHE_HIT, "blob_db_cache_hit"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_CACHE_BYTES_READ, "blob_db_cache_bytes_read"); - write_ticker_count(rocksdb::Tickers::BLOB_DB_CACHE_BYTES_WRITE, "blob_db_cache_bytes_write"); - } - // column family stats - std::map mapvalues; - db_->rocksdb::DB::GetMapProperty(rocksdb::DB::Properties::kCFStats,&mapvalues); - mapToString(mapvalues,prefix); - info.append(string_stream.str()); + write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_KEYS_WRITTEN, "blob_db_num_keys_written"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_KEYS_READ, "blob_db_num_keys_read"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_BYTES_WRITTEN, "blob_db_bytes_written"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_BYTES_READ, "blob_db_bytes_read"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_SEEK, "blob_db_num_seek"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_NEXT, "blob_db_num_next"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_PREV, "blob_db_num_prev"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_BLOB_FILE_BYTES_WRITTEN, "blob_db_blob_file_bytes_written"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_BLOB_FILE_BYTES_READ, "blob_db_blob_file_bytes_read"); + + write_ticker_count(rocksdb::Tickers::BLOB_DB_GC_NUM_FILES, "blob_db_gc_num_files"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_GC_NUM_NEW_FILES, "blob_db_gc_num_new_files"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_GC_NUM_KEYS_RELOCATED, "blob_db_gc_num_keys_relocated"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_GC_BYTES_RELOCATED, "blob_db_gc_bytes_relocated"); + + write_ticker_count(rocksdb::Tickers::BLOB_DB_CACHE_MISS, "blob_db_cache_miss"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_CACHE_HIT, "blob_db_cache_hit"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_CACHE_BYTES_READ, "blob_db_cache_bytes_read"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_CACHE_BYTES_WRITE, "blob_db_cache_bytes_write"); + } + // column family stats + std::map mapvalues; + db_->rocksdb::DB::GetMapProperty(rocksdb::DB::Properties::kCFStats, &mapvalues); + mapToString(mapvalues, prefix); + info.append(string_stream.str()); } -void Redis::SetWriteWalOptions(const bool is_wal_disable) { - default_write_options_.disableWAL = is_wal_disable; -} +void Redis::SetWriteWalOptions(const bool is_wal_disable) { default_write_options_.disableWAL = is_wal_disable; } void Redis::SetCompactRangeOptions(const bool is_canceled) { if (!default_compact_range_options_.canceled) { diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index 54c6e10d46..df17b9fb7b 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -15,17 +15,17 @@ #include "rocksdb/slice.h" #include "rocksdb/status.h" +#include "pstd/include/env.h" +#include "pstd/include/pika_codis_slot.h" +#include "src/custom_comparator.h" #include "src/debug.h" #include "src/lock_mgr.h" #include "src/lru_cache.h" #include "src/mutex_impl.h" +#include "src/redis_streams.h" #include "src/type_iterator.h" -#include "src/custom_comparator.h" #include "storage/storage.h" #include "storage/storage_define.h" -#include "pstd/include/env.h" -#include "src/redis_streams.h" -#include "pstd/include/pika_codis_slot.h" #define SPOP_COMPACT_THRESHOLD_COUNT 500 #define SPOP_COMPACT_THRESHOLD_DURATION (1000 * 1000) // 1000ms @@ -58,7 +58,7 @@ class Redis { } } uint64_t AvgDuration() { - if (durations.size () < window_size) { + if (durations.size() < window_size) { return 0; } uint64_t min = durations[0]; @@ -75,12 +75,8 @@ class Redis { } return (sum - max - min) / (durations.size() - 2); } - void AddModifyCount(uint64_t count) { - modify_count += count; - } - uint64_t ModifyCount() { - return modify_count; - } + void AddModifyCount(uint64_t count) { modify_count += count; } + uint64_t ModifyCount() { return modify_count; } }; struct KeyStatisticsDurationGuard { @@ -88,15 +84,15 @@ class Redis { std::string key; uint64_t start_us; DataType dtype; - KeyStatisticsDurationGuard(Redis* that, const DataType type, const std::string& key): ctx(that), key(key), start_us(pstd::NowMicros()), dtype(type) { - } + KeyStatisticsDurationGuard(Redis* that, const DataType type, const std::string& key) + : ctx(that), key(key), start_us(pstd::NowMicros()), dtype(type) {} ~KeyStatisticsDurationGuard() { uint64_t end_us = pstd::NowMicros(); uint64_t duration = end_us > start_us ? end_us - start_us : 0; ctx->UpdateSpecificKeyDuration(dtype, key, duration); } }; - int GetIndex() const {return index_;} + int GetIndex() const { return index_; } Status SetOptions(const OptionType& option_type, const std::unordered_map& options); void SetWriteWalOptions(const bool is_wal_disable); @@ -126,6 +122,7 @@ class Redis { virtual Status ListsExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta = {}); virtual Status ZsetsExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta = {}); virtual Status SetsExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta = {}); + virtual Status PKHashesExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta = {}); virtual Status StringsDel(const Slice& key, std::string&& prefetch_meta = {}); virtual Status HashesDel(const Slice& key, std::string&& prefetch_meta = {}); @@ -155,7 +152,8 @@ class Redis { // Strings Commands Status Append(const Slice& key, const Slice& value, int32_t* ret, int64_t* expired_timestamp_millsec, std::string& out_new_value); Status BitCount(const Slice& key, int64_t start_offset, int64_t end_offset, int32_t* ret, bool have_range); - Status BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, std::string &value_to_dest, int64_t* ret); + Status BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, + std::string& value_to_dest, int64_t* ret); Status Decrby(const Slice& key, int64_t value, int64_t* ret); Status Get(const Slice& key, std::string* value); Status HyperloglogGet(const Slice& key, std::string* value); @@ -226,7 +224,6 @@ class Redis { Status SetSmallCompactionThreshold(uint64_t small_compaction_threshold); Status SetSmallCompactionDurationThreshold(uint64_t small_compaction_duration_threshold); - std::vector GetStringCFHandles() { return {handles_[kMetaCF]}; } std::vector GetHashCFHandles() { @@ -248,15 +245,64 @@ class Redis { std::vector GetStreamCFHandles() { return {handles_.begin() + kMetaCF, handles_.end()}; } - void GetRocksDBInfo(std::string &info, const char *prefix); + + std::vector GetPKHashCFHandles() { + return {handles_.begin() + kMetaCF, handles_.begin() + kPKHashDataCF + 1}; + } + + void GetRocksDBInfo(std::string& info, const char* prefix); + + // PK Hash Commands + Status PKHExpire(const Slice& key, int32_t ttl, int32_t numfields, const std::vector& fields, + std::vector* rets); + Status PKHExpireat(const Slice& key, int64_t timestamp, int32_t numfields, const std::vector& fields, + std::vector* rets); + Status PKHExpiretime(const Slice& key, int32_t numfields, const std::vector& fields, + std::vector* timestamps); + Status PKHTTL(const Slice& key, int32_t numfields, const std::vector& fields, + std::vector* ttls); + Status PKHPersist(const Slice& key, int32_t numfields, const std::vector& fields, + std::vector* rets); + Status PKHGet(const Slice& key, const Slice& field, std::string* value); + + Status PKHSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res); + + Status PKHSetex(const Slice& key, const Slice& field, const Slice& value, int32_t ttl, int32_t* ret); + + Status PKHExists(const Slice& key, const Slice& field); + + Status PKHDel(const Slice& key, const std::vector& fields, int32_t* ret); + + Status PKHLen(const Slice& key, int32_t* ret, std::string&& prefetch_meta = {}); + + Status PKHStrlen(const Slice& key, const Slice& field, int32_t* len); + + Status PKHIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret, int32_t ttl = 0); + + Status PKHMSet(const Slice& key, const std::vector& fvs); + + Status PKHMSetex(const Slice& key, const std::vector& fvts); + + Status PKHMGet(const Slice& key, const std::vector& fields, std::vector* vss); + + Status PKHKeys(const Slice& key, std::vector* fields); + + Status PKHVals(const Slice& key, std::vector* values); + + Status PKHGetall(const Slice& key, std::vector* fvts); + + Status PKHScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* fvts, int64_t* next_cursor); // Sets Commands Status SAdd(const Slice& key, const std::vector& members, int32_t* ret); Status SCard(const Slice& key, int32_t* ret, std::string&& prefetch_meta = {}); Status SDiff(const std::vector& keys, std::vector* members); - Status SDiffstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + Status SDiffstore(const Slice& destination, const std::vector& keys, + std::vector& value_to_dest, int32_t* ret); Status SInter(const std::vector& keys, std::vector* members); - Status SInterstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + Status SInterstore(const Slice& destination, const std::vector& keys, + std::vector& value_to_dest, int32_t* ret); Status SIsmember(const Slice& key, const Slice& member, int32_t* ret); Status SMembers(const Slice& key, std::vector* members); Status SMembersWithTTL(const Slice& key, std::vector* members, int64_t* ttl_millsec); @@ -265,7 +311,8 @@ class Redis { Status SRandmember(const Slice& key, int32_t count, std::vector* members); Status SRem(const Slice& key, const std::vector& members, int32_t* ret); Status SUnion(const std::vector& keys, std::vector* members); - Status SUnionstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + Status SUnionstore(const Slice& destination, const std::vector& keys, + std::vector& value_to_dest, int32_t* ret); Status SScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, std::vector* members, int64_t* next_cursor); Status AddAndGetSpopCount(const std::string& key, uint64_t* count); @@ -329,7 +376,8 @@ class Redis { Status XAdd(const Slice& key, const std::string& serialized_message, StreamAddTrimArgs& args); Status XDel(const Slice& key, const std::vector& ids, int32_t& count); Status XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& count); - Status XRange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages, std::string&& prefetch_meta = {}); + Status XRange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages, + std::string&& prefetch_meta = {}); Status XRevrange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages); Status XLen(const Slice& key, int32_t& len); Status XRead(const StreamReadGroupReadArgs& args, std::vector>& results, @@ -339,7 +387,8 @@ class Redis { rocksdb::ReadOptions& read_options); // get and parse the stream meta if found // @return ok only when the stream meta exists - Status GetStreamMeta(StreamMetaValue& tream_meta, const rocksdb::Slice& key, rocksdb::ReadOptions& read_options, std::string&& prefetch_meta = {}); + Status GetStreamMeta(StreamMetaValue& tream_meta, const rocksdb::Slice& key, rocksdb::ReadOptions& read_options, + std::string&& prefetch_meta = {}); // Before calling this function, the caller should ensure that the ids are valid Status DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, @@ -359,11 +408,13 @@ class Redis { void ScanZsets(); void ScanSets(); - TypeIterator* CreateIterator(const DataType& type, const std::string& pattern, const Slice* lower_bound, const Slice* upper_bound) { + TypeIterator* CreateIterator(const DataType& type, const std::string& pattern, const Slice* lower_bound, + const Slice* upper_bound) { return CreateIterator(DataTypeTag[static_cast(type)], pattern, lower_bound, upper_bound); } - TypeIterator* CreateIterator(const char& type, const std::string& pattern, const Slice* lower_bound, const Slice* upper_bound) { + TypeIterator* CreateIterator(const char& type, const std::string& pattern, const Slice* lower_bound, + const Slice* upper_bound) { rocksdb::ReadOptions options; options.fill_cache = false; options.iterate_lower_bound = lower_bound; @@ -396,12 +447,12 @@ class Redis { return nullptr; } - enum DataType GetMetaValueType(const std::string &meta_value) { + enum DataType GetMetaValueType(const std::string& meta_value) { DataType meta_type = static_cast(static_cast(meta_value[0])); return meta_type; } - inline bool ExpectedMetaValue(enum DataType type, const std::string &meta_value) { + inline bool ExpectedMetaValue(enum DataType type, const std::string& meta_value) { auto meta_type = static_cast(static_cast(meta_value[0])); if (type == meta_type) { return true; @@ -409,12 +460,13 @@ class Redis { return false; } - inline bool ExpectedStale(const std::string &meta_value) { + inline bool ExpectedStale(const std::string& meta_value) { auto meta_type = static_cast(static_cast(meta_value[0])); switch (meta_type) { case DataType::kZSets: case DataType::kSets: - case DataType::kHashes: { + case DataType::kHashes: + case DataType::kPKHashes: { ParsedBaseMetaValue parsed_meta_value(meta_value); return (parsed_meta_value.IsStale() || parsed_meta_value.Count() == 0); } @@ -436,7 +488,7 @@ class Redis { } } -private: + private: Status GenerateStreamID(const StreamMetaValue& stream_meta, StreamAddTrimArgs& args); Status StreamScanRange(const Slice& key, const uint64_t version, const Slice& id_start, const std::string& id_end, @@ -513,8 +565,8 @@ class Redis { std::shared_ptr lock_mgr_; rocksdb::DB* db_ = nullptr; std::shared_ptr db_statistics_ = nullptr; - //TODO(wangshaoyi): seperate env for each rocksdb instance - // rocksdb::Env* env_ = nullptr; + // TODO(wangshaoyi): seperate env for each rocksdb instance + // rocksdb::Env* env_ = nullptr; std::vector handles_; rocksdb::WriteOptions default_write_options_; @@ -527,8 +579,10 @@ class Redis { std::unique_ptr> scan_cursors_store_; std::unique_ptr> spop_counts_store_; - Status GetScanStartPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, std::string* start_point); - Status StoreScanNextPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, const std::string& next_point); + Status GetScanStartPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, + std::string* start_point); + Status StoreScanNextPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, + const std::string& next_point); // For Statistics std::atomic_uint64_t small_compaction_threshold_; diff --git a/src/storage/src/redis_hashes.cc b/src/storage/src/redis_hashes.cc index 1a947c07e7..e66503f0a1 100644 --- a/src/storage/src/redis_hashes.cc +++ b/src/storage/src/redis_hashes.cc @@ -11,11 +11,11 @@ #include #include "pstd/include/pika_codis_slot.h" +#include "src/base_data_key_format.h" +#include "src/base_data_value_format.h" #include "src/base_filter.h" #include "src/scope_record_lock.h" #include "src/scope_snapshot.h" -#include "src/base_data_key_format.h" -#include "src/base_data_value_format.h" #include "storage/util.h" namespace storage { @@ -62,7 +62,7 @@ Status Redis::HDel(const Slice& key, const std::vector& fields, int uint32_t statistic = 0; std::vector filtered_fields; std::unordered_set field_set; - for (const auto & iter : fields) { + for (const auto& iter : fields) { const std::string& field = iter; if (field_set.find(field) == field_set.end()) { field_set.insert(field); @@ -87,10 +87,9 @@ Status Redis::HDel(const Slice& key, const std::vector& fields, int if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -151,10 +150,9 @@ Status Redis::HGet(const Slice& key, const Slice& field, std::string* value) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -191,10 +189,9 @@ Status Redis::HGetall(const Slice& key, std::vector* fvs) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -234,10 +231,9 @@ Status Redis::HGetallWithTTL(const Slice& key, std::vector* fvs, int if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -282,7 +278,6 @@ Status Redis::HIncrby(const Slice& key, const Slice& field, int64_t value, int64 std::string old_value; std::string meta_value; - BaseMetaKey base_meta_key(key); Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); char value_buf[32] = {0}; @@ -291,10 +286,9 @@ Status Redis::HIncrby(const Slice& key, const Slice& field, int64_t value, int64 if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -376,7 +370,6 @@ Status Redis::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by return Status::Corruption("value is not a vaild float"); } - BaseMetaKey base_meta_key(key); Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); char meta_value_buf[4] = {0}; @@ -384,10 +377,9 @@ Status Redis::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -468,10 +460,9 @@ Status Redis::HKeys(const Slice& key, std::vector* fields) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -510,10 +501,9 @@ Status Redis::HLen(const Slice& key, int32_t* ret, std::string&& prefetch_meta) if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } } @@ -550,10 +540,9 @@ Status Redis::HMGet(const Slice& key, const std::vector& fields, st if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -614,10 +603,9 @@ Status Redis::HMSet(const Slice& key, const std::vector& fvs) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -689,10 +677,9 @@ Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -764,10 +751,9 @@ Status Redis::HSetnx(const Slice& key, const Slice& field, const Slice& value, i if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -827,10 +813,9 @@ Status Redis::HVals(const Slice& key, std::vector* values) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -867,7 +852,7 @@ Status Redis::HStrlen(const Slice& key, const Slice& field, int32_t* len) { } Status Redis::HScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, - std::vector* field_values, int64_t* next_cursor) { + std::vector* field_values, int64_t* next_cursor) { *next_cursor = 0; field_values->clear(); if (cursor < 0) { @@ -890,10 +875,9 @@ Status Redis::HScan(const Slice& key, int64_t cursor, const std::string& pattern if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -950,7 +934,7 @@ Status Redis::HScan(const Slice& key, int64_t cursor, const std::string& pattern } Status Redis::HScanx(const Slice& key, const std::string& start_field, const std::string& pattern, int64_t count, - std::vector* field_values, std::string* next_field) { + std::vector* field_values, std::string* next_field) { next_field->clear(); field_values->clear(); @@ -967,10 +951,9 @@ Status Redis::HScanx(const Slice& key, const std::string& start_field, const std if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1012,8 +995,8 @@ Status Redis::HScanx(const Slice& key, const std::string& start_field, const std } Status Redis::PKHScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, - const Slice& pattern, int32_t limit, std::vector* field_values, - std::string* next_field) { + const Slice& pattern, int32_t limit, std::vector* field_values, + std::string* next_field) { next_field->clear(); field_values->clear(); @@ -1037,10 +1020,9 @@ Status Redis::PKHScanRange(const Slice& key, const Slice& field_start, const std if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1083,8 +1065,8 @@ Status Redis::PKHScanRange(const Slice& key, const Slice& field_start, const std } Status Redis::PKHRScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, - const Slice& pattern, int32_t limit, std::vector* field_values, - std::string* next_field) { + const Slice& pattern, int32_t limit, std::vector* field_values, + std::string* next_field) { next_field->clear(); field_values->clear(); @@ -1108,10 +1090,9 @@ Status Redis::PKHRScanRange(const Slice& key, const Slice& field_start, const st if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1169,10 +1150,9 @@ Status Redis::HashesExpire(const Slice& key, int64_t ttl_millsec, std::string&& if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } } @@ -1209,10 +1189,9 @@ Status Redis::HashesDel(const Slice& key, std::string&& prefetch_meta) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } } @@ -1246,10 +1225,9 @@ Status Redis::HashesExpireat(const Slice& key, int64_t timestamp_millsec, std::s if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } } @@ -1285,10 +1263,9 @@ Status Redis::HashesPersist(const Slice& key, std::string&& prefetch_meta) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } } @@ -1324,10 +1301,9 @@ Status Redis::HashesTTL(const Slice& key, int64_t* ttl_millsec, std::string&& pr if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } } @@ -1371,7 +1347,8 @@ void Redis::ScanHashes() { ParsedHashesMetaValue parsed_hashes_meta_value(meta_iter->value()); int32_t survival_time = 0; if (parsed_hashes_meta_value.Etime() != 0) { - survival_time = parsed_hashes_meta_value.Etime() > current_time ? parsed_hashes_meta_value.Etime() - current_time : -1; + survival_time = + parsed_hashes_meta_value.Etime() > current_time ? parsed_hashes_meta_value.Etime() - current_time : -1; } ParsedBaseMetaKey parsed_meta_key(meta_iter->key()); @@ -1384,7 +1361,6 @@ void Redis::ScanHashes() { LOG(INFO) << "***************Hashes Field Data***************"; auto field_iter = db_->NewIterator(iterator_options, handles_[kHashesDataCF]); for (field_iter->SeekToFirst(); field_iter->Valid(); field_iter->Next()) { - ParsedHashesDataKey parsed_hashes_data_key(field_iter->key()); ParsedBaseDataValue parsed_internal_value(field_iter->value()); diff --git a/src/storage/src/redis_pkhashes.cc b/src/storage/src/redis_pkhashes.cc new file mode 100644 index 0000000000..d96934a829 --- /dev/null +++ b/src/storage/src/redis_pkhashes.cc @@ -0,0 +1,1296 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "src/redis.h" + +#include + +#include +#include + +#include "pstd/include/pika_codis_slot.h" +#include "src/base_data_key_format.h" +#include "src/base_filter.h" +#include "src/pkhash_data_value_format.h" +#include "src/scope_record_lock.h" +#include "src/scope_snapshot.h" +#include "storage/util.h" + +namespace storage { + +Status Redis::PKHGet(const Slice& key, const Slice& field, std::string* value) { + std::string meta_value; + uint64_t version = 0; + rocksdb::ReadOptions read_options; + + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + version = parsed_hashes_meta_value.Version(); + + HashesDataKey data_key(key, version, field); + s = db_->Get(read_options, handles_[kPKHashDataCF], data_key.Encode(), value); + if (s.ok()) { + ParsedPKHashDataValue parsed_internal_value(value); + if (parsed_internal_value.IsStale()) { + return Status::NotFound("Stale"); + } + parsed_internal_value.StripSuffix(); + } + } + } + return s; +} + +Status Redis::PKHSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res) { + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + uint64_t version = 0; + + std::string meta_value; + uint32_t statistic = 0; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + char meta_value_buf[4] = {0}; + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.Count() == 0 || parsed_hashes_meta_value.IsStale()) { + version = parsed_hashes_meta_value.InitialMetaValue(); + parsed_hashes_meta_value.SetCount(1); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + HashesDataKey data_key(key, version, field); + PKHashDataValue ehashes_value(value); + batch.Put(handles_[kPKHashDataCF], data_key.Encode(), ehashes_value.Encode()); + *res = 1; + } else { + version = parsed_hashes_meta_value.Version(); + std::string data_value; + HashesDataKey hashes_data_key(key, version, field); + s = db_->Get(default_read_options_, handles_[kPKHashDataCF], hashes_data_key.Encode(), &data_value); + if (s.ok()) { + ParsedPKHashDataValue parsed_internal_value(data_value); + *res = 0; + // if [field:value] already expire and then the [field:value] should be updated + if (parsed_internal_value.IsStale()) { + *res = 1; + PKHashDataValue internal_value(value); + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), internal_value.Encode()); + statistic++; + } else { + if (data_value == value.ToString()) { + return Status::OK(); + } else { + PKHashDataValue internal_value(value); + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), internal_value.Encode()); + statistic++; + } + } + } else if (s.IsNotFound()) { + if (!parsed_hashes_meta_value.CheckModifyCount(1)) { + return Status::InvalidArgument("hash size overflow"); + } + + parsed_hashes_meta_value.ModifyCount(1); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + PKHashDataValue ehashes_value(value); + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), ehashes_value.Encode()); + *res = 1; + } else { + return s; + } + } + } else if (s.IsNotFound()) { + EncodeFixed32(meta_value_buf, 1); + HashesMetaValue hashes_meta_value(DataType::kPKHashes, Slice(meta_value_buf, 4)); + version = hashes_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + HashesDataKey data_key(key, version, field); + PKHashDataValue ehashes_value(value); + batch.Put(handles_[kPKHashDataCF], data_key.Encode(), ehashes_value.Encode()); + *res = 1; + } else { + return s; + } + + s = db_->Write(default_write_options_, &batch); + + UpdateSpecificKeyStatistics(DataType::kPKHashes, key.ToString(), statistic); + return s; +} + +// Pika Hash Commands +Status Redis::PKHExpire(const Slice& key, int32_t ttl_millsec, int32_t numfields, + const std::vector& fields, std::vector* rets) { + if (ttl_millsec <= 0) { + return Status::InvalidArgument("invalid expire time, must be >= 0"); + } + + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + bool is_stale = false; + uint64_t version = 0; + + std::string meta_value; + + // const rocksdb::Snapshot* snapshot; + // ScopeSnapshot ss(db_, &snapshot); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + version = parsed_hashes_meta_value.Version(); + + for (const auto& field : fields) { + HashesDataKey data_key(key, version, field); + std::string data_value; + s = db_->Get(default_read_options_, handles_[kPKHashDataCF], data_key.Encode(), &data_value); + if (s.ok()) { + ParsedPKHashDataValue parsed_internal_value(&data_value); + if (parsed_internal_value.IsStale()) { + rets->push_back(-2); + } else { + rets->push_back(1); + parsed_internal_value.SetRelativeTimestamp(ttl_millsec); + batch.Put(handles_[kPKHashDataCF], data_key.Encode(), data_value); + } + } + } + s = db_->Write(default_write_options_, &batch); + + return s; + } + } else if (s.IsNotFound()) { + return Status::NotFound(is_stale ? "Stale" : "NotFound"); + } + return s; +} + +Status Redis::PKHExpireat(const Slice& key, int64_t timestamp, int32_t numfields, + const std::vector& fields, std::vector* rets) { + if (timestamp <= 0) { + rets->assign(numfields, 2); + return Status::InvalidArgument("invalid expire time, must be >= 0"); + } + + // int64_t unix_time; + // rocksdb::Env::Default()->GetCurrentTime(&unix_time); + pstd::TimeType curtime = pstd::NowMillis(); + + if (timestamp < curtime) { + rets->assign(numfields, 2); + return Status::InvalidArgument("invalid expire time, called with a past Unix time in seconds or milliseconds."); + } + + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + bool is_stale = false; + uint64_t version = 0; + + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + rets->assign(numfields, -2); + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + rets->assign(numfields, -2); + return Status::NotFound(); + } else { + version = parsed_hashes_meta_value.Version(); + + for (const auto& field : fields) { + HashesDataKey data_key(key, version, field); + std::string data_value; + + s = db_->Get(default_read_options_, handles_[kPKHashDataCF], data_key.Encode(), &data_value); + if (s.ok()) { + ParsedPKHashDataValue parsed_internal_value(&data_value); + if (parsed_internal_value.IsStale()) { + rets->push_back(-2); + } else { + parsed_internal_value.SetTimestamp(timestamp); + batch.Put(handles_[kPKHashDataCF], data_key.Encode(), data_value); + rets->push_back(1); + } + } + } + s = db_->Write(default_write_options_, &batch); + return s; + } + } else if (s.IsNotFound()) { + return Status::NotFound(is_stale ? "Stale" : "NotFound"); + } + return s; +} + +Status Redis::PKHExpiretime(const Slice& key, int32_t numfields, const std::vector& fields, + std::vector* timestamps) { + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + bool is_stale = false; + uint64_t version = 0; + + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + timestamps->assign(numfields, -2); + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + timestamps->assign(numfields, -2); + return Status::NotFound(); + } else { + version = parsed_hashes_meta_value.Version(); + + for (const auto& field : fields) { + HashesDataKey data_key(key, version, field); + std::string data_value; + s = db_->Get(default_read_options_, handles_[kPKHashDataCF], data_key.Encode(), &data_value); + if (s.ok()) { + ParsedPKHashDataValue parsed_internal_value(&data_value); + if (parsed_internal_value.IsStale()) { + timestamps->push_back(-2); + } else { + int64_t etime = parsed_internal_value.Etime(); + if (etime == 0) { + timestamps->push_back(-1); + } else { + timestamps->push_back(etime); + } + } + } + } + return s; + } + } else if (s.IsNotFound()) { + timestamps->assign(numfields, -2); + return Status::NotFound(is_stale ? "Stale" : "NotFound"); + } + return s; +} + +Status Redis::PKHTTL(const Slice& key, int32_t numfields, const std::vector& fields, + std::vector* ttls) { + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + bool is_stale = false; + uint64_t version = 0; + + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + ttls->assign(numfields, -2); + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + ttls->assign(numfields, -2); + return Status::NotFound(); + } else { + version = parsed_hashes_meta_value.Version(); + + for (const auto& field : fields) { + HashesDataKey data_key(key, version, field); + std::string data_value; + s = db_->Get(default_read_options_, handles_[kPKHashDataCF], data_key.Encode(), &data_value); + if (s.ok()) { + ParsedPKHashDataValue parsed_internal_value(&data_value); + if (parsed_internal_value.IsStale()) { + ttls->push_back(-2); + } else { + int64_t etime = parsed_internal_value.Etime(); + if (etime == 0) { + ttls->push_back(-1); + } else { + // int64_t unix_time; + // rocksdb::Env::Default()->GetCurrentTime(&unix_time); + pstd::TimeType curtime = pstd::NowMillis(); + int64_t ttl_millsec = etime - curtime; + ttls->push_back(ttl_millsec); + } + } + } + } + + return s; + } + } else if (s.IsNotFound()) { + return Status::NotFound(is_stale ? "Stale" : "NotFound"); + } + return s; +} + +Status Redis::PKHPersist(const Slice& key, int32_t numfields, const std::vector& fields, + std::vector* rets) { + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + bool is_stale = false; + uint64_t version = 0; + + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + rets->assign(numfields, -2); + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + rets->assign(numfields, -2); + return Status::NotFound(); + } else { + version = parsed_hashes_meta_value.Version(); + + for (const auto& field : fields) { + HashesDataKey data_key(key, version, field); + std::string data_value; + s = db_->Get(default_read_options_, handles_[kPKHashDataCF], data_key.Encode(), &data_value); + if (s.ok()) { + ParsedPKHashDataValue parsed_internal_value(&data_value); + if (parsed_internal_value.IsStale()) { + rets->push_back(-1); + } else { + rets->push_back(1); + parsed_internal_value.SetEtime(0); + batch.Put(handles_[kPKHashDataCF], data_key.Encode(), data_value); + } + } + } + s = db_->Write(default_write_options_, &batch); + + return s; + } + } else if (s.IsNotFound()) { + return Status::NotFound(is_stale ? "Stale" : "NotFound"); + } + return s; +} + +Status Redis::PKHSetex(const Slice& key, const Slice& field, const Slice& value, int32_t ttl_millsec, int32_t* ret) { + if (ttl_millsec <= 0) { + return Status::InvalidArgument("invalid expire time"); + } + + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + uint64_t version = 0; + + std::string meta_value; + uint32_t statistic = 0; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + char meta_value_buf[4] = {0}; + // 1. 判断类型是否匹配和key是否过期。 + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.Count() == 0 || parsed_hashes_meta_value.IsStale()) { + version = parsed_hashes_meta_value.InitialMetaValue(); + parsed_hashes_meta_value.SetCount(1); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + HashesDataKey data_key(key, version, field); + PKHashDataValue ehashes_value(value); + ehashes_value.SetRelativeTimeInMillsec(ttl_millsec); + batch.Put(handles_[kPKHashDataCF], data_key.Encode(), ehashes_value.Encode()); + *ret = 1; + } else { + version = parsed_hashes_meta_value.Version(); + std::string data_value; + HashesDataKey hashes_data_key(key, version, field); + s = db_->Get(default_read_options_, handles_[kPKHashDataCF], hashes_data_key.Encode(), &data_value); + if (s.ok()) { + *ret = 1; + if (s.ok()) { + PKHashDataValue ehashes_value(value); + ehashes_value.SetRelativeTimeInMillsec(ttl_millsec); + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), ehashes_value.Encode()); + statistic++; + } else if (s.IsNotFound()) { + parsed_hashes_meta_value.ModifyCount(1); + batch.Put(handles_[kMetaCF], key, meta_value); + PKHashDataValue ehashes_value(value); + ehashes_value.SetRelativeTimeInMillsec(ttl_millsec); + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), ehashes_value.Encode()); + statistic++; + } else { + return s; + } + + } else if (s.IsNotFound()) { + if (!parsed_hashes_meta_value.CheckModifyCount(1)) { + return Status::InvalidArgument("hash size overflow"); + } + parsed_hashes_meta_value.ModifyCount(1); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + PKHashDataValue ehashes_value(value); + ehashes_value.SetRelativeTimeInMillsec(ttl_millsec); + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), ehashes_value.Encode()); + *ret = 1; + } else { + return s; + } + } + } else if (s.IsNotFound()) { + EncodeFixed32(meta_value_buf, 1); + HashesMetaValue hashes_meta_value(DataType::kPKHashes, Slice(meta_value_buf, 4)); + version = hashes_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + HashesDataKey data_key(key, version, field); + PKHashDataValue ehashes_value(value); + ehashes_value.SetRelativeTimeInMillsec(ttl_millsec); + batch.Put(handles_[kPKHashDataCF], data_key.Encode(), ehashes_value.Encode()); + *ret = 1; + } else { + return s; + } + + return db_->Write(default_write_options_, &batch); +} + +Status Redis::PKHExists(const Slice& key, const Slice& field) { + std::string value; + return PKHGet(key, field, &value); +} + +Status Redis::PKHDel(const Slice& key, const std::vector& fields, int32_t* ret) { + uint32_t statistic = 0; + std::vector filtered_fields; + std::unordered_set field_set; + for (const auto& iter : fields) { + const std::string& field = iter; + if (field_set.find(field) == field_set.end()) { + field_set.insert(field); + filtered_fields.push_back(iter); + } + } + + rocksdb::WriteBatch batch; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + int32_t del_cnt = 0; + uint64_t version = 0; + ScopeRecordLock l(lock_mgr_, key); + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + *ret = 0; + return Status::OK(); + } else { + std::string data_value; + version = parsed_hashes_meta_value.Version(); + for (const auto& field : filtered_fields) { + HashesDataKey hashes_data_key(key, version, field); + s = db_->Get(read_options, handles_[kPKHashDataCF], hashes_data_key.Encode(), &data_value); + if (s.ok()) { + del_cnt++; + statistic++; + batch.Delete(handles_[kPKHashDataCF], hashes_data_key.Encode()); + } else if (s.IsNotFound()) { + continue; + } else { + return s; + } + } + *ret = del_cnt; + if (!parsed_hashes_meta_value.CheckModifyCount(-del_cnt)) { + return Status::InvalidArgument("hash size overflow"); + } + parsed_hashes_meta_value.ModifyCount(-del_cnt); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } else if (s.IsNotFound()) { + *ret = 0; + return Status::OK(); + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kPKHashes, key.ToString(), statistic); + return s; +} + +Status Redis::PKHLen(const Slice& key, int32_t* ret, std::string&& prefetch_meta) { + *ret = 0; + Status s; + std::string meta_value(std::move(prefetch_meta)); + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + BaseMetaKey base_meta_key(key); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kPKHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + *ret = 0; + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + *ret = parsed_hashes_meta_value.Count(); + } + } else if (s.IsNotFound()) { + *ret = 0; + } + return s; +} + +// Status Redis::PKHLenForce(const Slice& key, int32_t* ret) {} + +Status Redis::PKHStrlen(const Slice& key, const Slice& field, int32_t* len) { + std::string value; + Status s = PKHGet(key, field, &value); + if (s.ok()) { + *len = static_cast(value.size()); + } else { + *len = 0; + } + return s; +} + +Status Redis::PKHIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret, int32_t ttl_millsec) { + *ret = 0; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + uint64_t version = 0; + uint32_t statistic = 0; + std::string old_value; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + char value_buf[32] = {0}; + char meta_value_buf[4] = {0}; + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + version = parsed_hashes_meta_value.UpdateVersion(); + parsed_hashes_meta_value.SetCount(1); + parsed_hashes_meta_value.SetEtime(0); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + HashesDataKey hashes_data_key(key, version, field); + Int64ToStr(value_buf, 32, value); + PKHashDataValue internal_value(value_buf); + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), internal_value.Encode()); + *ret = value; + } else { + version = parsed_hashes_meta_value.Version(); + HashesDataKey hashes_data_key(key, version, field); + s = db_->Get(default_read_options_, handles_[kPKHashDataCF], hashes_data_key.Encode(), &old_value); + if (s.ok()) { + ParsedPKHashDataValue parsed_internal_value(&old_value); + parsed_internal_value.StripSuffix(); + int64_t ival = 0; + if (StrToInt64(old_value.data(), old_value.size(), &ival) == 0) { + return Status::Corruption("hash value is not an integer"); + } + if ((value >= 0 && LLONG_MAX - value < ival) || (value < 0 && LLONG_MIN - value > ival)) { + return Status::InvalidArgument("Overflow"); + } + *ret = ival + value; + Int64ToStr(value_buf, 32, *ret); + PKHashDataValue internal_value(value_buf); + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), internal_value.Encode()); + statistic++; + } else if (s.IsNotFound()) { + Int64ToStr(value_buf, 32, value); + if (!parsed_hashes_meta_value.CheckModifyCount(1)) { + return Status::InvalidArgument("hash size overflow"); + } + PKHashDataValue internal_value(value_buf); + parsed_hashes_meta_value.ModifyCount(1); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), internal_value.Encode()); + *ret = value; + } else { + return s; + } + } + } else if (s.IsNotFound()) { + EncodeFixed32(meta_value_buf, 1); + HashesMetaValue hashes_meta_value(DataType::kPKHashes, Slice(meta_value_buf, 4)); + version = hashes_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + HashesDataKey hashes_data_key(key, version, field); + + Int64ToStr(value_buf, 32, value); + PKHashDataValue internal_value(value_buf); + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), internal_value.Encode()); + *ret = value; + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kPKHashes, key.ToString(), statistic); + return s; +} + +Status Redis::PKHMSet(const Slice& key, const std::vector& fvs) { + uint32_t statistic = 0; + std::unordered_set fields; + std::vector filtered_fvs; + for (auto iter = fvs.rbegin(); iter != fvs.rend(); ++iter) { + std::string field = iter->field; + if (fields.find(field) == fields.end()) { + fields.insert(field); + filtered_fvs.push_back(*iter); + } + } + + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + uint64_t version = 0; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + char meta_value_buf[4] = {0}; + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + version = parsed_hashes_meta_value.InitialMetaValue(); + if (!parsed_hashes_meta_value.check_set_count(static_cast(filtered_fvs.size()))) { + return Status::InvalidArgument("hash size overflow"); + } + parsed_hashes_meta_value.SetCount(static_cast(filtered_fvs.size())); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + for (const auto& fv : filtered_fvs) { + HashesDataKey hashes_data_key(key, version, fv.field); + PKHashDataValue inter_value(fv.value); + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), inter_value.Encode()); + } + } else { + int32_t count = 0; + std::string data_value; + version = parsed_hashes_meta_value.Version(); + for (const auto& fv : filtered_fvs) { + HashesDataKey hashes_data_key(key, version, fv.field); + PKHashDataValue inter_value(fv.value); + s = db_->Get(default_read_options_, handles_[kPKHashDataCF], hashes_data_key.Encode(), &data_value); + if (s.ok()) { + statistic++; + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), inter_value.Encode()); + } else if (s.IsNotFound()) { + count++; + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), inter_value.Encode()); + } else { + return s; + } + } + if (!parsed_hashes_meta_value.CheckModifyCount(count)) { + return Status::InvalidArgument("hash size overflow"); + } + parsed_hashes_meta_value.ModifyCount(count); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } else if (s.IsNotFound()) { + EncodeFixed32(meta_value_buf, filtered_fvs.size()); + HashesMetaValue hashes_meta_value(DataType::kPKHashes, Slice(meta_value_buf, 4)); + version = hashes_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + for (const auto& fv : filtered_fvs) { + HashesDataKey hashes_data_key(key, version, fv.field); + PKHashDataValue inter_value(fv.value); + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), inter_value.Encode()); + } + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kPKHashes, key.ToString(), statistic); + return s; +} +Status Redis::PKHMSetex(const Slice& key, const std::vector& fvts) { + uint32_t statistic = 0; + std::unordered_set fields; + std::vector filtered_fvs; + for (auto iter = fvts.rbegin(); iter != fvts.rend(); ++iter) { + std::string field = iter->field; + if (fields.find(field) == fields.end()) { + fields.insert(field); + filtered_fvs.push_back(*iter); + } + } + + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + uint64_t version = 0; + + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + char meta_value_buf[4] = {0}; + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.Count() == 0 || parsed_hashes_meta_value.IsStale()) { + version = parsed_hashes_meta_value.InitialMetaValue(); + if (!parsed_hashes_meta_value.check_set_count(static_cast(filtered_fvs.size()))) { + return Status::InvalidArgument("hash size overflow"); + } + parsed_hashes_meta_value.SetCount(static_cast(filtered_fvs.size())); + // parsed_hashes_meta_value.set_timestamp(0); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + for (const auto& fv : filtered_fvs) { + HashesDataKey hashes_data_key(key, version, fv.field); + PKHashDataValue ehashes_value(fv.value); + if (fv.ttl_millsec > 0) { + ehashes_value.SetRelativeTimeInMillsec(fv.ttl_millsec); + } + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), ehashes_value.Encode()); + } + } else { + int32_t count = 0; + std::string data_value; + version = parsed_hashes_meta_value.Version(); + for (const auto& fv : filtered_fvs) { + HashesDataKey hashes_data_key(key, version, fv.field); + s = db_->Get(default_read_options_, handles_[kPKHashDataCF], hashes_data_key.Encode(), &data_value); + if (s.ok()) { + statistic++; + PKHashDataValue ehashes_value(fv.value); + if (fv.ttl_millsec > 0) { + ehashes_value.SetRelativeTimeInMillsec(fv.ttl_millsec); + } + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), ehashes_value.Encode()); + } else if (s.IsNotFound()) { + count++; + PKHashDataValue ehashes_value(fv.value); + if (fv.ttl_millsec > 0) { + ehashes_value.SetRelativeTimeInMillsec(fv.ttl_millsec); + } + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), ehashes_value.Encode()); + } else { + return s; + } + } + + if (!parsed_hashes_meta_value.CheckModifyCount(count)) { + return Status::InvalidArgument("hash size overflow"); + } + + parsed_hashes_meta_value.ModifyCount(count); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } else if (s.IsNotFound()) { + // char str[4]; + EncodeFixed32(meta_value_buf, filtered_fvs.size()); + HashesMetaValue hashes_meta_value(DataType::kPKHashes, Slice(meta_value_buf, 4)); + version = hashes_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + for (const auto& fv : filtered_fvs) { + HashesDataKey hashes_data_key(key, version, fv.field); + PKHashDataValue ehashes_value(fv.value); + if (fv.ttl_millsec > 0) { + ehashes_value.SetRelativeTimeInMillsec(fv.ttl_millsec); + } + batch.Put(handles_[kPKHashDataCF], hashes_data_key.Encode(), ehashes_value.Encode()); + } + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kPKHashes, key.ToString(), statistic); + return s; +} + +Status Redis::PKHMGet(const Slice& key, const std::vector& fields, std::vector* vss) { + vss->clear(); + + uint64_t version = 0; + bool is_stale = false; + std::string value; + std::string meta_value; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if ((is_stale = parsed_hashes_meta_value.IsStale()) || parsed_hashes_meta_value.Count() == 0) { + for (size_t idx = 0; idx < fields.size(); ++idx) { + vss->push_back({std::string(), Status::NotFound()}); + } + return Status::NotFound(is_stale ? "Stale" : ""); + } else { + version = parsed_hashes_meta_value.Version(); + for (const auto& field : fields) { + HashesDataKey hashes_data_key(key, version, field); + s = db_->Get(read_options, handles_[kPKHashDataCF], hashes_data_key.Encode(), &value); + if (s.ok()) { + ParsedPKHashDataValue parsed_internal_value(&value); + parsed_internal_value.StripSuffix(); + vss->push_back({value, Status::OK()}); + } else if (s.IsNotFound()) { + vss->push_back({std::string(), Status::NotFound()}); + } else { + vss->clear(); + return s; + } + } + } + return Status::OK(); + } else if (s.IsNotFound()) { + for (size_t idx = 0; idx < fields.size(); ++idx) { + vss->push_back({std::string(), Status::NotFound()}); + } + } + return s; +} + +Status Redis::PKHKeys(const Slice& key, std::vector* fields) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + version = parsed_hashes_meta_value.Version(); + HashesDataKey hashes_data_key(key, version, ""); + Slice prefix = hashes_data_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kPKHashes, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kPKHashDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + fields->push_back(parsed_hashes_data_key.field().ToString()); + } + delete iter; + } + } + return s; +} + +Status Redis::PKHVals(const Slice& key, std::vector* values) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + version = parsed_hashes_meta_value.Version(); + HashesDataKey hashes_data_key(key, version, ""); + Slice prefix = hashes_data_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kPKHashes, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kPKHashDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedPKHashDataValue parsed_internal_value(iter->value()); + values->push_back(parsed_internal_value.UserValue().ToString()); + } + delete iter; + } + } + return s; +} + +Status Redis::PKHGetall(const Slice& key, std::vector* fvts) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + version = parsed_hashes_meta_value.Version(); + HashesDataKey hashes_data_key(key, version, ""); + Slice prefix = hashes_data_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kPKHashes, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kPKHashDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + ParsedPKHashDataValue parsed_internal_value(iter->value()); + + if (!parsed_internal_value.IsStale()) { + int64_t ttl_millsec = 0; + int64_t etime = parsed_internal_value.Etime(); + if (etime == 0) { + ttl_millsec = -1; + } else { + // int64_t curtime; + // rocksdb::Env::Default()->GetCurrentTime(&curtime); + pstd::TimeType curtime = pstd::NowMillis(); + ttl_millsec = (etime - curtime >= 0) ? etime - curtime : -2; + } + + fvts->push_back({parsed_hashes_data_key.field().ToString(), parsed_internal_value.UserValue().ToString(), + static_cast(ttl_millsec)}); + } + } + delete iter; + } + } + return s; +} + +Status Redis::PKHScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* fvts, int64_t* next_cursor) { + *next_cursor = 0; + fvts->clear(); + if (cursor < 0) { + *next_cursor = 0; + return Status::OK(); + } + + int64_t rest = count; + int64_t step_length = count; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kPKHashes)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + *next_cursor = 0; + return Status::NotFound(); + } else { + std::string sub_field; + std::string start_point; + uint64_t version = parsed_hashes_meta_value.Version(); + s = GetScanStartPoint(DataType::kPKHashes, key, pattern, cursor, &start_point); + if (s.IsNotFound()) { + cursor = 0; + if (isTailWildcard(pattern)) { + start_point = pattern.substr(0, pattern.size() - 1); + } + } + if (isTailWildcard(pattern)) { + sub_field = pattern.substr(0, pattern.size() - 1); + } + + HashesDataKey hashes_data_prefix(key, version, sub_field); + HashesDataKey hashes_start_data_key(key, version, start_point); + std::string prefix = hashes_data_prefix.EncodeSeekKey().ToString(); + KeyStatisticsDurationGuard guard(this, DataType::kPKHashes, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kPKHashDataCF]); + for (iter->Seek(hashes_start_data_key.Encode()); iter->Valid() && rest > 0 && iter->key().starts_with(prefix); + iter->Next()) { + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + std::string field = parsed_hashes_data_key.field().ToString(); + if (StringMatch(pattern.data(), pattern.size(), field.data(), field.size(), 0) != 0) { + ParsedPKHashDataValue parsed_internal_value(iter->value()); + + if (!parsed_internal_value.IsStale()) { + int64_t ttl_millsec; + int64_t timestamp = parsed_internal_value.Etime(); + if (timestamp == 0) { + ttl_millsec = -1; + } else { + // int64_t curtime; + // rocksdb::Env::Default()->GetCurrentTime(&curtime); + pstd::TimeType curtime = pstd::NowMillis(); + ttl_millsec = (timestamp - curtime >= 0) ? timestamp - curtime : -2; + } + fvts->push_back({field, parsed_internal_value.UserValue().ToString(), static_cast(ttl_millsec)}); + } + } + rest--; + } + + if (iter->Valid() && (iter->key().compare(prefix) <= 0 || iter->key().starts_with(prefix))) { + *next_cursor = cursor + step_length; + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + std::string next_field = parsed_hashes_data_key.field().ToString(); + StoreScanNextPoint(DataType::kPKHashes, key, pattern, *next_cursor, next_field); + } else { + *next_cursor = 0; + } + delete iter; + } + } else { + *next_cursor = 0; + return s; + } + return Status::OK(); +} + +Status Redis::PKHashesExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kPKHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kPKHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } + + if (ttl_millsec > 0) { + parsed_hashes_meta_value.SetRelativeTimestamp(ttl_millsec); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } else { + parsed_hashes_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } + return s; +} + +} // namespace storage diff --git a/src/storage/src/redis_strings.cc b/src/storage/src/redis_strings.cc index e402f175ca..0297586ba8 100644 --- a/src/storage/src/redis_strings.cc +++ b/src/storage/src/redis_strings.cc @@ -3,9 +3,9 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include #include #include +#include #include #include @@ -14,10 +14,10 @@ #include "pstd/include/pika_codis_slot.h" #include "src/base_key_format.h" +#include "src/redis.h" #include "src/scope_record_lock.h" #include "src/scope_snapshot.h" #include "src/strings_filter.h" -#include "src/redis.h" #include "storage/util.h" namespace storage { @@ -74,10 +74,9 @@ Status Redis::Append(const Slice& key, const Slice& value, int32_t* ret, int64_t if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -122,8 +121,7 @@ int GetBitCount(const unsigned char* value, int64_t bytes) { return bit_num; } -Status Redis::BitCount(const Slice& key, int64_t start_offset, int64_t end_offset, int32_t* ret, - bool have_range) { +Status Redis::BitCount(const Slice& key, int64_t start_offset, int64_t end_offset, int32_t* ret, bool have_range) { *ret = 0; std::string value; @@ -133,10 +131,9 @@ Status Redis::BitCount(const Slice& key, int64_t start_offset, int64_t end_offse if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -220,7 +217,8 @@ std::string BitOpOperate(BitOpType op, const std::vector& src_value return dest_str; } -Status Redis::BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, std::string& value_to_dest, int64_t* ret) { +Status Redis::BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, + std::string& value_to_dest, int64_t* ret) { Status s; if (op == kBitOpNot && src_keys.size() != 1) { return Status::InvalidArgument("the number of source keys is not right"); @@ -231,7 +229,7 @@ Status Redis::BitOp(BitOpType op, const std::string& dest_key, const std::vector int64_t max_len = 0; int64_t value_len = 0; std::vector src_values; - for (const auto & src_key : src_keys) { + for (const auto& src_key : src_keys) { std::string value; BaseKey base_key(src_key); s = db_->Get(default_read_options_, base_key.Encode(), &value); @@ -239,10 +237,9 @@ Status Redis::BitOp(BitOpType op, const std::string& dest_key, const std::vector if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + dest_key + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + dest_key + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -285,10 +282,9 @@ Status Redis::Decrby(const Slice& key, int64_t value, int64_t* ret) { if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -336,10 +332,9 @@ Status Redis::Get(const Slice& key, std::string* value) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -407,10 +402,9 @@ Status Redis::GetWithTTL(const Slice& key, std::string* value, int64_t* ttl_mill if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + " get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + " get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } @@ -455,10 +449,9 @@ Status Redis::GetBit(const Slice& key, int64_t offset, int32_t* ret) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -493,10 +486,9 @@ Status Redis::Getrange(const Slice& key, int64_t start_offset, int64_t end_offse if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -538,10 +530,9 @@ Status Redis::GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -564,14 +555,11 @@ Status Redis::GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t int64_t size = value->size(); int64_t start_t = start_offset >= 0 ? start_offset : size + start_offset; int64_t end_t = end_offset >= 0 ? end_offset : size + end_offset; - if (start_t > size - 1 || - (start_t != 0 && start_t > end_t) || - (start_t != 0 && end_t < 0) - ) { + if (start_t > size - 1 || (start_t != 0 && start_t > end_t) || (start_t != 0 && end_t < 0)) { return Status::OK(); } if (start_t < 0) { - start_t = 0; + start_t = 0; } if (end_t >= size) { end_t = size - 1; @@ -579,7 +567,7 @@ Status Redis::GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t if (start_t == 0 && end_t < 0) { end_t = 0; } - *ret = value->substr(start_t, end_t-start_t+1); + *ret = value->substr(start_t, end_t - start_t + 1); return Status::OK(); } } else if (s.IsNotFound()) { @@ -599,10 +587,9 @@ Status Redis::GetSet(const Slice& key, const Slice& value, std::string* old_valu if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -686,10 +673,9 @@ Status Redis::Incrbyfloat(const Slice& key, const Slice& value, std::string* ret if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -749,7 +735,7 @@ Status Redis::MSetnx(const std::vector& kvs, int32_t* ret) { bool exists = false; *ret = 0; std::string value; - for (const auto & kv : kvs) { + for (const auto& kv : kvs) { BaseKey base_key(kv.key); s = db_->Get(default_read_options_, base_key.Encode(), &value); if (!s.ok() && !s.IsNotFound()) { @@ -790,10 +776,9 @@ Status Redis::Setxx(const Slice& key, const Slice& value, int32_t* ret, int64_t if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -830,10 +815,9 @@ Status Redis::SetBit(const Slice& key, int64_t offset, int32_t on, int32_t* ret) if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok() || s.IsNotFound()) { @@ -930,10 +914,9 @@ Status Redis::Setvx(const Slice& key, const Slice& value, const Slice& new_value if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -974,10 +957,9 @@ Status Redis::Delvx(const Slice& key, const Slice& value, int32_t* ret) { if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -1013,10 +995,9 @@ Status Redis::Setrange(const Slice& key, int64_t start_offset, const Slice& valu if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -1122,10 +1103,9 @@ Status Redis::BitPos(const Slice& key, int32_t bit, int64_t* ret) { if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -1169,10 +1149,9 @@ Status Redis::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_ if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -1229,10 +1208,9 @@ Status Redis::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_ if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -1314,10 +1292,9 @@ Status Redis::StringsExpire(const Slice& key, int64_t ttl_millsec, std::string&& if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } } @@ -1350,10 +1327,9 @@ Status Redis::StringsDel(const Slice& key, std::string&& prefetch_meta) { if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } } @@ -1381,10 +1357,9 @@ Status Redis::StringsExpireat(const Slice& key, int64_t timestamp_millsec, std:: if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } } @@ -1418,10 +1393,9 @@ Status Redis::StringsPersist(const Slice& key, std::string&& prefetch_meta) { if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } } @@ -1456,10 +1430,9 @@ Status Redis::StringsTTL(const Slice& key, int64_t* ttl_millsec, std::string&& p if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument( - "WRONGTYPE, key: " + key.ToString() + ", expect type: " + - DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + - DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } } @@ -1504,10 +1477,9 @@ void Redis::ScanStrings() { survival_time = parsed_strings_value.Etime() - current_time > 0 ? parsed_strings_value.Etime() - current_time : -1; } - LOG(INFO) << fmt::format("[key : {:<30}] [value : {:<30}] [timestamp : {:<10}] [version : {}] [survival_time : {}]", parsed_strings_key.Key().ToString(), - parsed_strings_value.UserValue().ToString(), parsed_strings_value.Etime(), parsed_strings_value.Version(), - survival_time); - + LOG(INFO) << fmt::format("[key : {:<30}] [value : {:<30}] [timestamp : {:<10}] [version : {}] [survival_time : {}]", + parsed_strings_key.Key().ToString(), parsed_strings_value.UserValue().ToString(), + parsed_strings_value.Etime(), parsed_strings_value.Version(), survival_time); } delete iter; } @@ -1583,6 +1555,8 @@ rocksdb::Status Redis::Expire(const Slice& key, int64_t ttl_millsec) { return ZsetsExpire(key, ttl_millsec, std::move(meta_value)); case DataType::kHashes: return HashesExpire(key, ttl_millsec, std::move(meta_value)); + case DataType::kPKHashes: + return PKHashesExpire(key, ttl_millsec, std::move(meta_value)); case DataType::kLists: return ListsExpire(key, ttl_millsec, std::move(meta_value)); case DataType::kStrings: @@ -1692,7 +1666,9 @@ rocksdb::Status Redis::IsExist(const storage::Slice& key) { /* * Example Delete the specified prefix key */ -rocksdb::Status Redis::PKPatternMatchDelWithRemoveKeys(const std::string& pattern, int64_t* ret, std::vector* remove_keys, const int64_t& max_count) { +rocksdb::Status Redis::PKPatternMatchDelWithRemoveKeys(const std::string& pattern, int64_t* ret, + std::vector* remove_keys, + const int64_t& max_count) { rocksdb::ReadOptions iterator_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -1714,8 +1690,8 @@ rocksdb::Status Redis::PKPatternMatchDelWithRemoveKeys(const std::string& patter if (meta_type == DataType::kStrings) { ParsedStringsValue parsed_strings_value(&meta_value); - if (!parsed_strings_value.IsStale() && - (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != 0)) { + if (!parsed_strings_value.IsStale() && (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), + parsed_meta_key.Key().size(), 0) != 0)) { batch.Delete(key); remove_keys->push_back(parsed_meta_key.Key().data()); } @@ -1732,7 +1708,8 @@ rocksdb::Status Redis::PKPatternMatchDelWithRemoveKeys(const std::string& patter StreamMetaValue stream_meta_value; stream_meta_value.ParseFrom(meta_value); if ((stream_meta_value.length() != 0) && - (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != 0)) { + (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != + 0)) { stream_meta_value.InitMetaValue(); batch.Put(handles_[kMetaCF], key, stream_meta_value.value()); remove_keys->push_back(parsed_meta_key.Key().data()); diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index cc7ca864f0..ab523384fd 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -3,22 +3,22 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include #include +#include #include -#include "storage/util.h" -#include "storage/storage.h" +#include "include/pika_conf.h" +#include "pstd/include/pika_codis_slot.h" #include "scope_snapshot.h" #include "src/lru_cache.h" #include "src/mutex_impl.h" #include "src/options_helper.h" +#include "src/redis.h" #include "src/redis_hyperloglog.h" #include "src/type_iterator.h" -#include "src/redis.h" -#include "include/pika_conf.h" -#include "pstd/include/pika_codis_slot.h" +#include "storage/storage.h" +#include "storage/util.h" namespace storage { extern std::string BitOpOperate(BitOpType op, const std::vector& src_values, int64_t max_len); @@ -197,7 +197,7 @@ Status Storage::MSet(const std::vector& kvs) { Status Storage::MGet(const std::vector& keys, std::vector* vss) { vss->clear(); Status s; - for(const auto& key : keys) { + for (const auto& key : keys) { auto& inst = GetDBInstance(key); std::string value; s = inst->MGet(key, &value); @@ -216,7 +216,7 @@ Status Storage::MGet(const std::vector& keys, std::vector& keys, std::vector* vss) { vss->clear(); Status s; - for(const auto& key : keys) { + for (const auto& key : keys) { auto& inst = GetDBInstance(key); std::string value; int64_t ttl_millsec; @@ -302,9 +302,11 @@ Status Storage::BitCount(const Slice& key, int64_t start_offset, int64_t end_off // disallowed in codis proxy, only runs in classic mode Status Storage::BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, - std::string &value_to_dest, int64_t* ret) { + std::string& value_to_dest, int64_t* ret) { assert(is_classic_mode_); - if (op == storage::BitOpType::kBitOpNot && src_keys.size() >= 2) { return Status::InvalidArgument(); } + if (op == storage::BitOpType::kBitOpNot && src_keys.size() >= 2) { + return Status::InvalidArgument(); + } Status s; int64_t max_len = 0; int64_t value_len = 0; @@ -484,6 +486,114 @@ Status Storage::PKHRScanRange(const Slice& key, const Slice& field_start, const return inst->PKHRScanRange(key, field_start, field_end, pattern, limit, field_values, next_field); } +// Pika Hash Commands + +Status Storage::PKHExpire(const Slice& key, int32_t ttl, int32_t numfields, const std::vector& fields, + std::vector* rets) { + auto& inst = GetDBInstance(key); + return inst->PKHExpire(key, ttl, numfields, fields, rets); +} + +Status Storage::PKHExpireat(const Slice& key, int64_t timestamp, int32_t numfields, + const std::vector& fields, std::vector* rets) { + auto& inst = GetDBInstance(key); + return inst->PKHExpireat(key, timestamp, numfields, fields, rets); +} + +Status Storage::PKHExpiretime(const Slice& key, int32_t numfields, const std::vector& fields, + std::vector* timestamps) { + auto& inst = GetDBInstance(key); + return inst->PKHExpiretime(key, numfields, fields, timestamps); +} + +Status Storage::PKHPersist(const Slice& key, int32_t numfields, const std::vector& fields, + std::vector* rets) { + auto& inst = GetDBInstance(key); + return inst->PKHPersist(key, numfields, fields, rets); +} + +Status Storage::PKHTTL(const Slice& key, int32_t numfields, const std::vector& fields, + std::vector* ttls) { + auto& inst = GetDBInstance(key); + return inst->PKHTTL(key, numfields, fields, ttls); +} + +Status Storage::PKHGet(const Slice& key, const Slice& field, std::string* value) { + auto& inst = GetDBInstance(key); + return inst->PKHGet(key, field, value); +} + +Status Storage::PKHSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res) { + auto& inst = GetDBInstance(key); + return inst->PKHSet(key, field, value, res); +} + +Status Storage::PKHSetex(const Slice& key, const Slice& field, const Slice& value, int32_t ttl, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->PKHSetex(key, field, value, ttl, ret); +} + +Status Storage::PKHExists(const Slice& key, const Slice& field) { + auto& inst = GetDBInstance(key); + return inst->PKHExists(key, field); +} + +Status Storage::PKHDel(const Slice& key, const std::vector& fields, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->PKHDel(key, fields, ret); +} + +Status Storage::PKHLen(const Slice& key, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->PKHLen(key, ret); +} + +Status Storage::PKHStrlen(const Slice& key, const Slice& field, int32_t* len) { + auto& inst = GetDBInstance(key); + return inst->PKHStrlen(key, field, len); +} + +Status Storage::PKHIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret, int32_t ttl) { + auto& inst = GetDBInstance(key); + return inst->PKHIncrby(key, field, value, ret, ttl); +} + +Status Storage::PKHMSet(const Slice& key, const std::vector& fvs) { + auto& inst = GetDBInstance(key); + return inst->PKHMSet(key, fvs); +} + +Status Storage::PKHMSetex(const Slice& key, const std::vector& fvts) { + auto& inst = GetDBInstance(key); + return inst->PKHMSetex(key, fvts); +} + +Status Storage::PKHMGet(const Slice& key, const std::vector& fields, std::vector* vss) { + auto& inst = GetDBInstance(key); + return inst->PKHMGet(key, fields, vss); +} + +Status Storage::PKHKeys(const Slice& key, std::vector* fields) { + auto& inst = GetDBInstance(key); + return inst->PKHKeys(key, fields); +} + +Status Storage::PKHVals(const Slice& key, std::vector* values) { + auto& inst = GetDBInstance(key); + return inst->PKHVals(key, values); +} + +Status Storage::PKHGetall(const Slice& key, std::vector* fvts) { + auto& inst = GetDBInstance(key); + return inst->PKHGetall(key, fvts); +} + +Status Storage::PKHScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* fvts, int64_t* next_cursor) { + auto& inst = GetDBInstance(key); + return inst->PKHScan(key, cursor, pattern, count, fvts, next_cursor); +} + // Sets Commands Status Storage::SAdd(const Slice& key, const std::vector& members, int32_t* ret) { auto& inst = GetDBInstance(key); @@ -534,7 +644,8 @@ Status Storage::SDiff(const std::vector& keys, std::vector& keys, std::vector& value_to_dest, int32_t* ret) { +Status Storage::SDiffstore(const Slice& destination, const std::vector& keys, + std::vector& value_to_dest, int32_t* ret) { Status s; // in codis mode, users should garentee keys will be hashed to same slot @@ -601,7 +712,8 @@ Status Storage::SInter(const std::vector& keys, std::vector& keys, std::vector& value_to_dest, int32_t* ret) { +Status Storage::SInterstore(const Slice& destination, const std::vector& keys, + std::vector& value_to_dest, int32_t* ret) { Status s; // in codis mode, users should garentee keys will be hashed to same slot @@ -707,8 +819,7 @@ Status Storage::SUnion(const std::vector& keys, std::vector(vec.begin()), - std::move_iterator(vec.end()), + std::copy(std::move_iterator(vec.begin()), std::move_iterator(vec.end()), std::insert_iterator(member_set, member_set.begin())); } @@ -716,7 +827,8 @@ Status Storage::SUnion(const std::vector& keys, std::vector& keys, std::vector& value_to_dest, int32_t* ret) { +Status Storage::SUnionstore(const Slice& destination, const std::vector& keys, + std::vector& value_to_dest, int32_t* ret) { Status s; value_to_dest.clear(); @@ -907,7 +1019,7 @@ Status Storage::ZRangebyscore(const Slice& key, double min, double max, bool lef score_members->clear(); auto& inst = GetDBInstance(key); return inst->ZRangebyscore(key, min, max, left_close, right_close, std::numeric_limits::max(), 0, - score_members); + score_members); } Status Storage::ZRangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, @@ -956,8 +1068,8 @@ Status Storage::ZRevrangebyscore(const Slice& key, double min, double max, bool // maximum number of zset is std::numeric_limits::max() score_members->clear(); auto& inst = GetDBInstance(key); - return inst->ZRevrangebyscore(key, min, max, left_close, right_close, std::numeric_limits::max(), - 0, score_members); + return inst->ZRevrangebyscore(key, min, max, left_close, right_close, std::numeric_limits::max(), 0, + score_members); } Status Storage::ZRevrank(const Slice& key, const Slice& member, int32_t* rank) { @@ -1021,9 +1133,8 @@ Status Storage::ZUnionstore(const Slice& destination, const std::vector score_members; - std::for_each(value_to_dest.begin(), value_to_dest.end(), [&score_members](auto kv) { - score_members.emplace_back(kv.second, kv.first); - }); + std::for_each(value_to_dest.begin(), value_to_dest.end(), + [&score_members](auto kv) { score_members.emplace_back(kv.second, kv.first); }); *ret = score_members.size(); int unused_ret; return inst->ZAdd(destination, score_members, &unused_ret); @@ -1097,21 +1208,21 @@ Status Storage::ZInterstore(const Slice& destination, const std::vectorZAdd(destination, value_to_dest, &unused_ret); } -Status Storage::ZRangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, - bool right_close, std::vector* members) { +Status Storage::ZRangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + std::vector* members) { members->clear(); auto& inst = GetDBInstance(key); return inst->ZRangebylex(key, min, max, left_close, right_close, members); } -Status Storage::ZLexcount(const Slice& key, const Slice& min, const Slice& max, bool left_close, - bool right_close, int32_t* ret) { +Status Storage::ZLexcount(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + int32_t* ret) { auto& inst = GetDBInstance(key); return inst->ZLexcount(key, min, max, left_close, right_close, ret); } -Status Storage::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& max, - bool left_close, bool right_close, int32_t* ret) { +Status Storage::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + int32_t* ret) { auto& inst = GetDBInstance(key); return inst->ZRemrangebylex(key, min, max, left_close, right_close, ret); } @@ -1154,7 +1265,7 @@ Status Storage::XLen(const Slice& key, int32_t& len) { } Status Storage::XRead(const StreamReadGroupReadArgs& args, std::vector>& results, - std::vector& reserved_keys) { + std::vector& reserved_keys) { Status s; for (int i = 0; i < args.unparsed_ids.size(); i++) { StreamReadGroupReadArgs single_args; @@ -1174,7 +1285,7 @@ Status Storage::XRead(const StreamReadGroupReadArgs& args, std::vectorXInfo(key, result); } @@ -1192,7 +1303,6 @@ int32_t Storage::Expire(const Slice& key, int64_t ttl_millsec) { return ret; } - int64_t Storage::Del(const std::vector& keys) { Status s; int64_t count = 0; @@ -1244,7 +1354,8 @@ int64_t Storage::Scan(const DataType& dtype, int64_t cursor, const std::string& Status s = LoadCursorStartKey(dtype, cursor, &key_type, &start_key); if (!s.ok()) { // If want to scan all the databases, we start with the strings database - key_type = dtype == DataType::kAll ? DataTypeTag[static_cast(DataType::kStrings)] : DataTypeTag[static_cast(dtype)]; + key_type = dtype == DataType::kAll ? DataTypeTag[static_cast(DataType::kStrings)] + : DataTypeTag[static_cast(dtype)]; start_key = prefix; cursor = 0; } @@ -1271,8 +1382,7 @@ int64_t Storage::Scan(const DataType& dtype, int64_t cursor, const std::string& std::vector inst_iters; for (const auto& inst : insts_) { IterSptr iter_sptr; - iter_sptr.reset(inst->CreateIterator(type, pattern, - nullptr/*lower_bound*/, nullptr/*upper_bound*/)); + iter_sptr.reset(inst->CreateIterator(type, pattern, nullptr /*lower_bound*/, nullptr /*upper_bound*/)); inst_iters.push_back(iter_sptr); } @@ -1286,9 +1396,7 @@ int64_t Storage::Scan(const DataType& dtype, int64_t cursor, const std::string& } bool is_finish = !miter.Valid(); - if (miter.Valid() && - (miter.Key().compare(prefix) <= 0 || - miter.Key().substr(0, prefix.size()) == prefix)) { + if (miter.Valid() && (miter.Key().compare(prefix) <= 0 || miter.Key().substr(0, prefix.size()) == prefix)) { is_finish = false; } @@ -1332,8 +1440,8 @@ Status Storage::PKScanRange(const DataType& data_type, const Slice& key_start, c std::vector inst_iters; for (const auto& inst : insts_) { IterSptr iter_sptr; - iter_sptr.reset(inst->CreateIterator(data_type, pattern.ToString(), - nullptr/*lower_bound*/, nullptr/*upper_bound*/)); + iter_sptr.reset( + inst->CreateIterator(data_type, pattern.ToString(), nullptr /*lower_bound*/, nullptr /*upper_bound*/)); inst_iters.push_back(iter_sptr); } @@ -1345,8 +1453,7 @@ Status Storage::PKScanRange(const DataType& data_type, const Slice& key_start, c miter.Seek(temp); } - while (miter.Valid() && limit > 0 && - (end_no_limit || miter.Key().compare(key_end.ToString()) <= 0)) { + while (miter.Valid() && limit > 0 && (end_no_limit || miter.Key().compare(key_end.ToString()) <= 0)) { if (data_type == DataType::kStrings) { kvs->push_back({miter.Key(), miter.Value()}); } else { @@ -1381,8 +1488,8 @@ Status Storage::PKRScanRange(const DataType& data_type, const Slice& key_start, std::vector inst_iters; for (const auto& inst : insts_) { IterSptr iter_sptr; - iter_sptr.reset(inst->CreateIterator(data_type, pattern.ToString(), - nullptr/*lower_bound*/, nullptr/*upper_bound*/)); + iter_sptr.reset( + inst->CreateIterator(data_type, pattern.ToString(), nullptr /*lower_bound*/, nullptr /*upper_bound*/)); inst_iters.push_back(iter_sptr); } MergingIterator miter(inst_iters); @@ -1392,8 +1499,7 @@ Status Storage::PKRScanRange(const DataType& data_type, const Slice& key_start, miter.SeekForPrev(base_key_start.Encode().ToString()); } - while (miter.Valid() && limit > 0 && - (end_no_limit || miter.Key().compare(key_end.ToString()) >= 0)) { + while (miter.Valid() && limit > 0 && (end_no_limit || miter.Key().compare(key_end.ToString()) >= 0)) { if (data_type == DataType::kStrings) { kvs->push_back({miter.Key(), miter.Value()}); } else { @@ -1409,7 +1515,7 @@ Status Storage::PKRScanRange(const DataType& data_type, const Slice& key_start, return Status::OK(); } -Status Storage::PKPatternMatchDelWithRemoveKeys(const std::string& pattern, int64_t* ret, +Status Storage::PKPatternMatchDelWithRemoveKeys(const std::string& pattern, int64_t* ret, std::vector* remove_keys, const int64_t& max_count) { Status s; *ret = 0; @@ -1418,7 +1524,7 @@ Status Storage::PKPatternMatchDelWithRemoveKeys(const std::string& pattern, int6 s = inst->PKPatternMatchDelWithRemoveKeys(pattern, &tmp_ret, remove_keys, max_count - *ret); if (!s.ok()) { return s; - } + } *ret += tmp_ret; if (*ret == max_count) { return s; @@ -1436,8 +1542,7 @@ Status Storage::Scanx(const DataType& data_type, const std::string& start_key, c std::vector inst_iters; for (const auto& inst : insts_) { IterSptr iter_sptr; - iter_sptr.reset(inst->CreateIterator(data_type, pattern, - nullptr/*lower_bound*/, nullptr/*upper_bound*/)); + iter_sptr.reset(inst->CreateIterator(data_type, pattern, nullptr /*lower_bound*/, nullptr /*upper_bound*/)); inst_iters.push_back(iter_sptr); } @@ -1892,8 +1997,7 @@ Status Storage::GetKeyNum(std::vector* key_infos) { if (!s.ok()) { return s; } - std::transform(db_key_infos.begin(), db_key_infos.end(), - key_infos->begin(), key_infos->begin(), std::plus<>{}); + std::transform(db_key_infos.begin(), db_key_infos.end(), key_infos->begin(), key_infos->begin(), std::plus<>{}); } if (scan_keynum_exit_) { scan_keynum_exit_ = false; @@ -1909,15 +2013,14 @@ Status Storage::StopScanKeyNum() { rocksdb::DB* Storage::GetDBByIndex(int index) { if (index < 0 || index >= db_instance_num_) { - LOG(WARNING) << "Invalid DB Index: " << index << "total: " - << db_instance_num_; + LOG(WARNING) << "Invalid DB Index: " << index << "total: " << db_instance_num_; return nullptr; } return insts_[index]->GetDB(); } Status Storage::SetOptions(const OptionType& option_type, const std::string& db_type, - const std::unordered_map& options) { + const std::unordered_map& options) { Status s; for (const auto& inst : insts_) { s = inst->SetOptions(option_type, options); @@ -1935,8 +2038,8 @@ void Storage::SetCompactRangeOptions(const bool is_canceled) { } } -Status Storage::EnableDymayticOptions(const OptionType& option_type, - const std::string& db_type, const std::unordered_map& options) { +Status Storage::EnableDymayticOptions(const OptionType& option_type, const std::string& db_type, + const std::unordered_map& options) { Status s; auto it = options.find("disable_auto_compactions"); if (it != options.end() && it->second == "false") { @@ -1947,19 +2050,21 @@ Status Storage::EnableDymayticOptions(const OptionType& option_type, return s; } -Status Storage::EnableAutoCompaction(const OptionType& option_type, - const std::string& db_type, const std::unordered_map& options) { +Status Storage::EnableAutoCompaction(const OptionType& option_type, const std::string& db_type, + const std::unordered_map& options) { Status s; for (const auto& inst : insts_) { std::vector cfhds; auto string_cfhds = inst->GetStringCFHandles(); auto hash_cfhds = inst->GetHashCFHandles(); + auto pk_hash_cfhds = inst->GetPKHashCFHandles(); auto list_cfhds = inst->GetListCFHandles(); auto set_cfhds = inst->GetSetCFHandles(); auto zset_cfhds = inst->GetZsetCFHandles(); cfhds.insert(cfhds.end(), string_cfhds.begin(), string_cfhds.end()); cfhds.insert(cfhds.end(), hash_cfhds.begin(), hash_cfhds.end()); + cfhds.insert(cfhds.end(), pk_hash_cfhds.begin(), pk_hash_cfhds.end()); cfhds.insert(cfhds.end(), list_cfhds.begin(), list_cfhds.end()); cfhds.insert(cfhds.end(), set_cfhds.begin(), set_cfhds.end()); cfhds.insert(cfhds.end(), zset_cfhds.begin(), zset_cfhds.end()); @@ -1993,7 +2098,6 @@ int64_t Storage::IsExist(const Slice& key, std::map* type_stat return type_count; } - void Storage::DisableWal(const bool is_wal_disable) { for (const auto& inst : insts_) { inst->SetWriteWalOptions(is_wal_disable); diff --git a/src/storage/tests/hashes_test.cc b/src/storage/tests/hashes_test.cc index 8ee0f0490a..c21324c542 100644 --- a/src/storage/tests/hashes_test.cc +++ b/src/storage/tests/hashes_test.cc @@ -12,8 +12,8 @@ #include "glog/logging.h" -#include "pstd/include/pika_codis_slot.h" #include "pstd/include/env.h" +#include "pstd/include/pika_codis_slot.h" #include "storage/storage.h" #include "storage/util.h" @@ -354,7 +354,7 @@ TEST_F(HashesTest, HIncrbyfloat) { s = db.HIncrbyfloat("GP1_HINCRBYFLOAT_KEY", "GP1_HINCRBYFLOAT_FIELD", "1.234", &new_value); ASSERT_TRUE(s.ok()); - //ASSERT_EQ(new_value, "2.468"); + // ASSERT_EQ(new_value, "2.468"); // ***************** Group 2 Test ***************** s = db.HSet("GP2_HINCRBYFLOAT_KEY", "GP2_HINCRBYFLOAT_FIELD", " 1.234", &ret); @@ -388,7 +388,7 @@ TEST_F(HashesTest, HIncrbyfloat) { ASSERT_EQ(new_value, "12.3456"); s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_FIELD", &new_value); ASSERT_TRUE(s.ok()); - //ASSERT_EQ(new_value, "12.3456"); + // ASSERT_EQ(new_value, "12.3456"); s = db.HLen("HINCRBYFLOAT_KEY", &ret); ASSERT_TRUE(s.ok()); ASSERT_EQ(ret, 1); diff --git a/src/storage/tests/pkhashes_test.cc b/src/storage/tests/pkhashes_test.cc new file mode 100644 index 0000000000..3125666117 --- /dev/null +++ b/src/storage/tests/pkhashes_test.cc @@ -0,0 +1,1499 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include +#include +#include + +#include "glog/logging.h" + +#include "pstd/include/env.h" +#include "pstd/include/pika_codis_slot.h" +#include "storage/storage.h" +#include "storage/util.h" + +using namespace storage; + +class PKHashesTest : public ::testing::Test { + public: + PKHashesTest() = default; + ~PKHashesTest() override = default; + + void SetUp() override { + std::string path = "./db/pkhashes"; + pstd::DeleteDirIfExist(path); + mkdir(path.c_str(), 0755); + storage_options.options.create_if_missing = true; + s = db.Open(storage_options, path); + } + + void TearDown() override { + std::string path = "./db/pkhashes"; + DeleteFiles(path.c_str()); + } + + static void SetUpTestSuite() {} + static void TearDownTestSuite() {} + + StorageOptions storage_options; + storage::Storage db; + storage::Status s; +}; + +static bool field_value_match(storage::Storage* const db, const Slice& key, + const std::vector& expect_field_value) { + std::vector field_value_out; + Status s = db->HGetall(key, &field_value_out); + if (!s.ok() && !s.IsNotFound()) { + return false; + } + if (field_value_out.size() != expect_field_value.size()) { + return false; + } + if (s.IsNotFound() && expect_field_value.empty()) { + return true; + } + for (const auto& field_value : expect_field_value) { + if (find(field_value_out.begin(), field_value_out.end(), field_value) == field_value_out.end()) { + return false; + } + } + return true; +} + +static bool field_value_match(const std::vector& field_value_out, + const std::vector& expect_field_value) { + if (field_value_out.size() != expect_field_value.size()) { + return false; + } + for (const auto& field_value : expect_field_value) { + if (find(field_value_out.begin(), field_value_out.end(), FieldValueTTL{field_value.field, field_value.value, -1}) == + field_value_out.end()) { + return false; + } + } + return true; +} + +static bool size_match(storage::Storage* const db, const Slice& key, int32_t expect_size) { + int32_t size = 0; + Status s = db->PKHLen(key, &size); + if (!s.ok() && !s.IsNotFound()) { + return false; + } + if (s.IsNotFound() && (expect_size == 0)) { + return true; + } + return size == expect_size; +} + +static bool make_expired(storage::Storage* const db, const Slice& key) { + std::map type_status; + int ret = db->Expire(key, 1); + if ((ret == 0) || !type_status[storage::DataType::kHashes].ok()) { + return false; + } + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + return true; +} + +// PKHExpireTest +TEST_F(PKHashesTest, PKHExpireTest) { // NOLINT + + int32_t ret = 0; + std::vector rets; + + std::string value; + + // ***************** Group 1 Test ***************** + // If a field with expired time(sec) in the hash and the field should expire after ttl sec. + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHLen("GP1_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + uint64_t ttl = 2; + + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); + + s = db.PKHExpire("GP1_HSET_KEY", ttl * 1000, 1, {"HSET_TEST_FIELD"}, &rets); + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + + // ASSERT_TRUE(s.ok()); + // ASSERT_EQ(value, "HSET_TEST_VALUE"); + + std::this_thread::sleep_for(std::chrono::milliseconds(3100)); + + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_FALSE(s.ok()); + + // if [field:value] already expire and then the [field:value] should be updated + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + // ***************** Group 2 Test ***************** + // If multi fields with expired timestamp(sec) in the hash and the fields should expire after timestamp. + // TODO(DDD: cmd basic test cases fisrt) +} + +// PKHExpireatTest +TEST_F(PKHashesTest, PKHExpireatTest) { // NOLINT + + int32_t ret = 0; + std::vector rets; + + std::string value; + + // ***************** Group 1 Test ***************** + // If a field with expired time(sec) in the hash and the field should expire after ttl sec. + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHLen("GP1_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + int64_t unix_time; + rocksdb::Env::Default()->GetCurrentTime(&unix_time); + int64_t timestamp = unix_time + 2; + + // It will expire in 2 seconds + s = db.PKHExpireat("GP1_HSET_KEY", timestamp * 1000, 1, {"HSET_TEST_FIELD"}, &rets); + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); + + std::this_thread::sleep_for(std::chrono::milliseconds(3100)); + + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_FALSE(s.ok()); // the field has expired + + // if [field:value] already expire and then the [field:value] should be updated + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); +} + +// PKHExpiretimeTest +TEST_F(PKHashesTest, PKHExpiretimeTest) { // NOLINT + + int32_t ret = 0; + std::vector rets; + + std::string value; + + // ***************** Group 1 Test ***************** + // If a field with expired time(sec) in the hash and the field should expire after ttl sec. + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHLen("GP1_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + int64_t unix_time; + rocksdb::Env::Default()->GetCurrentTime(&unix_time); + int64_t timestamp = unix_time + 3; + std::cout << " unix_time: " << unix_time << " timestamp: " << timestamp << std::endl; + + // It will expire in 3 seconds + s = db.PKHExpireat("GP1_HSET_KEY", timestamp * 1000, 1, {"HSET_TEST_FIELD"}, &rets); + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); + + std::vector timestamps; + s = db.PKHExpiretime("GP1_HSET_KEY", 1, {"HSET_TEST_FIELD"}, ×tamps); + + std::cout << " timestamps[0]: " << timestamps[0] << " timestamp: " << timestamp * 1000 << std::endl; + + ASSERT_EQ(timestamps[0], timestamp * 1000); + + std::this_thread::sleep_for(std::chrono::milliseconds(3100)); + + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + // the field has not expired and should wait for 4 sec + ASSERT_FALSE(s.ok()); // the field has ex/pired + + std::this_thread::sleep_for(std::chrono::milliseconds(4100)); + + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_FALSE(s.ok()); // the field has expired + + // if [field:value] already expire and then the [field:value] should be updated + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); +} + +// PKHTTLTest +TEST_F(PKHashesTest, PKHTTLTest) { // NOLINT + int32_t ret = 0; + std::vector rets; + + std::string value; + + // ***************** Group 1 Test ***************** + // If a field with expired time(sec) in the hash and the field should expire after ttl sec. + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHLen("GP1_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + int64_t ttl = 2 * 1000; + s = db.PKHExpire("GP1_HSET_KEY", ttl, 1, {"HSET_TEST_FIELD"}, &rets); + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); + + std::vector ttls; + + s = db.PKHTTL("GP1_HSET_KEY", 1, {"HSET_TEST_FIELD"}, &ttls); + + std::cout << " ttls[0]: " << ttls[0] << " ttl: " << ttl << std::endl; + + ASSERT_EQ(ttls[0], ttl); + + std::this_thread::sleep_for(std::chrono::milliseconds(3100)); + + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_FALSE(s.ok()); + + // if [field:value] already expire and then the [field:value] should be updated + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); +} + +// PKHPersistTest +TEST_F(PKHashesTest, PKHPersistTest) { // NOLINT + + int32_t ret = 0; + std::vector rets; + + std::string value; + + // ***************** Group 1 Test ***************** + // If a field with expired time(sec) in the hash and the field should expire after ttl sec. + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHLen("GP1_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + int64_t ttl = 2 * 1000; + s = db.PKHExpire("GP1_HSET_KEY", ttl, 1, {"HSET_TEST_FIELD"}, &rets); + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); + + std::vector ttls; + + s = db.PKHTTL("GP1_HSET_KEY", 1, {"HSET_TEST_FIELD"}, &ttls); + + std::cout << " ttls[0]: " << ttls[0] << " ttl: " << ttl << std::endl; + + ASSERT_EQ(ttls[0], ttl); + + std::this_thread::sleep_for(std::chrono::milliseconds(3100)); + + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_FALSE(s.ok()); + + // if [field:value] already expire and then the [field:value] should be updated + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + rets.clear(); + s = db.PKHExpire("GP1_HSET_KEY", ttl, 1, {"HSET_TEST_FIELD"}, &rets); + + rets.clear(); + s = db.PKHPersist("GP1_HSET_KEY", 1, {"HSET_TEST_FIELD"}, &rets); + + std::this_thread::sleep_for(std::chrono::milliseconds(3100)); + + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); +} + +// PKHSetexTest +TEST_F(PKHashesTest, PKHSetexTest) { // NOLINT + + int32_t ret = 0; + std::vector rets; + + std::string value; + + // ***************** Group 1 Test ***************** + // If a field with expired time(sec) in the hash and the field should expire after ttl sec. + int64_t ttl = 2 * 1000; + s = db.PKHSetex("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", ttl, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.PKHLen("GP1_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); + + std::vector ttls; + + s = db.PKHTTL("GP1_HSET_KEY", 1, {"HSET_TEST_FIELD"}, &ttls); + + std::cout << " ttls[0]: " << ttls[0] << " ttl: " << ttl << std::endl; + + // ttls[0]: 1999 ttl: 2000 + // ttls[0]: 1978 ttl: 2000 + // ASSERT_EQ(ttls[0], ttl); + EXPECT_NEAR(ttls[0], ttl, 50); // 50ms tolerance + + std::this_thread::sleep_for(std::chrono::milliseconds(3100)); + + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_FALSE(s.ok()); + + // if [field:value] already expire and then the [field:value] should be updated + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + // ***************** Group 2 Test ***************** + // If a field with expired time(sec) in the hash and persist the field. + s = db.PKHSetex("GP1_HSET_KEY_1", "HSET_TEST_FIELD_1", "HSET_TEST_VALUE_1", 2 * 1000, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + rets.clear(); + s = db.PKHPersist("GP1_HSET_KEY_1", 1, {"HSET_TEST_FIELD_1"}, &rets); + + std::this_thread::sleep_for(std::chrono::milliseconds(3100)); + + s = db.PKHGet("GP1_HSET_KEY_1", "HSET_TEST_FIELD_1", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE_1"); +} + +// PKHGet +TEST_F(PKHashesTest, PKHGetTest) { + int32_t ret = 0; + std::string value; + s = db.PKHSet("HGET_KEY", "HGET_TEST_FIELD", "HGET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHGet("HGET_KEY", "HGET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HGET_TEST_VALUE"); + + // If key does not exist. + s = db.PKHGet("HGET_NOT_EXIST_KEY", "HGET_TEST_FIELD", &value); + ASSERT_TRUE(s.IsNotFound()); + + // If field is not present in the hash + s = db.PKHGet("HGET_KEY", "HGET_NOT_EXIST_FIELD", &value); + ASSERT_TRUE(s.IsNotFound()); +} + +// PKHMSet +TEST_F(PKHashesTest, PKHMSetTest) { + int32_t ret = 0; + std::vector fvs1; + fvs1.push_back({"TEST_FIELD1", "TEST_VALUE1"}); + fvs1.push_back({"TEST_FIELD2", "TEST_VALUE2"}); + + // If field already exists in the hash, it is overwritten + std::vector fvs2; + fvs2.push_back({"TEST_FIELD2", "TEST_VALUE2"}); + fvs2.push_back({"TEST_FIELD3", "TEST_VALUE3"}); + fvs2.push_back({"TEST_FIELD4", "TEST_VALUE4"}); + fvs2.push_back({"TEST_FIELD3", "TEST_VALUE5"}); + + s = db.PKHMSet("HMSET_KEY", fvs1); + ASSERT_TRUE(s.ok()); + s = db.PKHLen("HMSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + s = db.PKHMSet("HMSET_KEY", fvs2); + ASSERT_TRUE(s.ok()); + + s = db.PKHLen("HMSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + std::vector vss1; + std::vector fields1{"TEST_FIELD1", "TEST_FIELD2", "TEST_FIELD3", "TEST_FIELD4"}; + s = db.PKHMGet("HMSET_KEY", fields1, &vss1); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(vss1.size(), 4); + + ASSERT_EQ(vss1[0].value, "TEST_VALUE1"); + ASSERT_EQ(vss1[1].value, "TEST_VALUE2"); + ASSERT_EQ(vss1[2].value, "TEST_VALUE5"); + ASSERT_EQ(vss1[3].value, "TEST_VALUE4"); + + std::map type_status; + db.Expire("HMSET_KEY", 1); + ASSERT_TRUE(type_status[storage::DataType::kHashes].ok()); + + // The key has timeout + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + std::vector fvs3; + fvs3.push_back({"TEST_FIELD3", "TEST_VALUE3"}); + fvs3.push_back({"TEST_FIELD4", "TEST_VALUE4"}); + fvs3.push_back({"TEST_FIELD5", "TEST_VALUE5"}); + s = db.PKHMSet("HMSET_KEY", fvs3); + ASSERT_TRUE(s.ok()); + + s = db.PKHLen("HMSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector vss2; + std::vector fields2{"TEST_FIELD3", "TEST_FIELD4", "TEST_FIELD5"}; + s = db.PKHMGet("HMSET_KEY", fields2, &vss2); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(vss2.size(), 3); + + ASSERT_EQ(vss2[0].value, "TEST_VALUE3"); + ASSERT_EQ(vss2[1].value, "TEST_VALUE4"); + ASSERT_EQ(vss2[2].value, "TEST_VALUE5"); +} + +// PKHExists +TEST_F(PKHashesTest, PKHExistsTest) { + int32_t ret; + s = db.PKHSet("HEXIST_KEY", "HEXIST_FIELD", "HEXIST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + + s = db.PKHExists("HEXIST_KEY", "HEXIST_FIELD"); + ASSERT_TRUE(s.ok()); + + // If key does not exist. + s = db.PKHExists("HEXIST_NOT_EXIST_KEY", "HEXIST_FIELD"); + ASSERT_TRUE(s.IsNotFound()); + + // If field is not present in the hash + s = db.PKHExists("HEXIST_KEY", "HEXIST_NOT_EXIST_FIELD"); + ASSERT_TRUE(s.IsNotFound()); +} + +// PKHDel +TEST_F(PKHashesTest, PKHDel) { + int32_t ret = 0; + std::vector fvs; + fvs.push_back({"TEST_FIELD1", "TEST_VALUE1"}); + fvs.push_back({"TEST_FIELD2", "TEST_VALUE2"}); + fvs.push_back({"TEST_FIELD3", "TEST_VALUE3"}); + fvs.push_back({"TEST_FIELD4", "TEST_VALUE4"}); + + s = db.PKHMSet("HDEL_KEY", fvs); + ASSERT_TRUE(s.ok()); + + std::vector fields{"TEST_FIELD1", "TEST_FIELD2", "TEST_FIELD3", "TEST_FIElD2", "TEST_NOT_EXIST_FIELD"}; + s = db.PKHDel("HDEL_KEY", fields, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + s = db.PKHLen("HDEL_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + // Delete not exist hash table + s = db.PKHDel("HDEL_NOT_EXIST_KEY", fields, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + // Delete timeout hash table + s = db.PKHMSet("HDEL_TIMEOUT_KEY", fvs); + ASSERT_TRUE(s.ok()); + + std::map type_status; + db.Expire("HDEL_TIMEOUT_KEY", 1); + ASSERT_TRUE(type_status[storage::DataType::kHashes].ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + s = db.PKHDel("HDEL_TIMEOUT_KEY", fields, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); +} + +// PKHSet +TEST_F(PKHashesTest, PKHSetTest) { + int32_t ret = 0; + std::string value; + + // ***************** Group 1 Test ***************** + // If field is a new field in the hash and value was set. + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHLen("GP1_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); + + // If field already exists in the hash and the value was updated. + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + s = db.PKHLen("GP1_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_NEW_VALUE"); + + // If field already exists in the hash and the value was equal. + s = db.PKHSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + s = db.PKHLen("GP1_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_NEW_VALUE"); + + // ***************** Group 2 Test ***************** + s = db.PKHSet("GP2_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHLen("GP2_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + // 从这里开始有问题。 + s = db.PKHGet("GP2_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); + + ASSERT_TRUE(make_expired(&db, "GP2_HSET_KEY")); + + s = db.PKHSet("GP2_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.PKHGet("GP2_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); + + s = db.PKHLen("GP2_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHGet("GP2_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); + + // ***************** Group 3 Test ***************** + s = db.PKHSet("GP3_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHLen("GP3_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHGet("GP3_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_NEW_VALUE"); + + ASSERT_TRUE(make_expired(&db, "GP3_HSET_KEY")); + + s = db.PKHSet("GP3_HSET_KEY", "HSET_TEST_NEW_FIELD", "HSET_TEST_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHLen("GP3_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.PKHGet("GP3_HSET_KEY", "HSET_TEST_NEW_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_NEW_VALUE"); + + // ***************** Group 4 Test ***************** + // hset after string type key expires, should success + s = db.Setex("GP4_HSET_KEY", "STRING_VALUE_WITH_TTL", 1); + ASSERT_TRUE(s.ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2100)); + s = db.PKHSet("GP4_HSET_KEY", "HSET_TEST_NEW_FIELD", "HSET_TEST_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); +} + +// PKHMGet +TEST_F(PKHashesTest, PKHMGetTest) { + int32_t ret = 0; + std::vector vss; + + // ***************** Group 1 Test ***************** + std::vector fvs1; + fvs1.push_back({"TEST_FIELD1", "TEST_VALUE1"}); + fvs1.push_back({"TEST_FIELD2", "TEST_VALUE2"}); + fvs1.push_back({"TEST_FIELD3", "TEST_VALUE3"}); + s = db.PKHMSet("GP1_HMGET_KEY", fvs1); + ASSERT_TRUE(s.ok()); + + s = db.PKHLen("GP1_HMGET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector fields1{"TEST_FIELD1", "TEST_FIELD2", "TEST_FIELD3", "TEST_NOT_EXIST_FIELD"}; + s = db.PKHMGet("GP1_HMGET_KEY", fields1, &vss); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(vss.size(), 4); + + ASSERT_TRUE(vss[0].status.ok()); + ASSERT_EQ(vss[0].value, "TEST_VALUE1"); + ASSERT_TRUE(vss[1].status.ok()); + ASSERT_EQ(vss[1].value, "TEST_VALUE2"); + ASSERT_TRUE(vss[2].status.ok()); + ASSERT_EQ(vss[2].value, "TEST_VALUE3"); + ASSERT_TRUE(vss[3].status.IsNotFound()); + ASSERT_EQ(vss[3].value, ""); + + // ***************** Group 2 Test ***************** + std::vector fvs2; + fvs2.push_back({"TEST_FIELD1", "TEST_VALUE1"}); + fvs2.push_back({"TEST_FIELD2", ""}); + s = db.PKHMSet("GP2_HMGET_KEY", fvs2); + ASSERT_TRUE(s.ok()); + + s = db.PKHLen("GP2_HMGET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + vss.clear(); + std::vector fields2{"TEST_FIELD1", "TEST_FIELD2", "TEST_NOT_EXIST_FIELD"}; + s = db.PKHMGet("GP2_HMGET_KEY", fields2, &vss); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(vss.size(), 3); + + ASSERT_TRUE(vss[0].status.ok()); + ASSERT_EQ(vss[0].value, "TEST_VALUE1"); + ASSERT_TRUE(vss[1].status.ok()); + ASSERT_EQ(vss[1].value, ""); + ASSERT_TRUE(vss[2].status.IsNotFound()); + ASSERT_EQ(vss[2].value, ""); + + // ***************** Group 3 Test ***************** + vss.clear(); + std::vector fields3{"TEST_FIELD1", "TEST_FIELD2", "TEST_FIELD3"}; + s = db.PKHMGet("GP3_HMGET_KEY", fields3, &vss); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(vss.size(), 3); + + ASSERT_TRUE(vss[0].status.IsNotFound()); + ASSERT_EQ(vss[0].value, ""); + ASSERT_TRUE(vss[1].status.IsNotFound()); + ASSERT_EQ(vss[1].value, ""); + ASSERT_TRUE(vss[2].status.IsNotFound()); + ASSERT_EQ(vss[2].value, ""); + + // ***************** Group 4 Test ***************** + std::vector fvs4; + fvs4.push_back({"TEST_FIELD1", "TEST_VALUE1"}); + fvs4.push_back({"TEST_FIELD2", "TEST_VALUE2"}); + fvs4.push_back({"TEST_FIELD3", "TEST_VALUE3"}); + + s = db.PKHMSet("GP4_HMGET_KEY", fvs4); + ASSERT_TRUE(s.ok()); + + ASSERT_TRUE(make_expired(&db, "GP4_HMGET_KEY")); + + vss.clear(); + std::vector fields4{"TEST_FIELD1", "TEST_FIELD2", "TEST_FIELD3"}; + s = db.PKHMGet("GP4_HMGET_KEY", fields4, &vss); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(vss.size(), 3); + + ASSERT_TRUE(vss[0].status.IsNotFound()); + ASSERT_EQ(vss[0].value, ""); + ASSERT_TRUE(vss[1].status.IsNotFound()); + ASSERT_EQ(vss[1].value, ""); + ASSERT_TRUE(vss[2].status.IsNotFound()); + ASSERT_EQ(vss[2].value, ""); +} + +// PKHLen +TEST_F(PKHashesTest, PKHLenTest) { + int32_t ret = 0; + + // ***************** Group 1 Test ***************** + std::vector fvs1; + fvs1.push_back({"GP1_TEST_FIELD1", "GP1_TEST_VALUE1"}); + fvs1.push_back({"GP1_TEST_FIELD2", "GP1_TEST_VALUE2"}); + fvs1.push_back({"GP1_TEST_FIELD3", "GP1_TEST_VALUE3"}); + s = db.PKHMSet("GP1_HLEN_KEY", fvs1); + ASSERT_TRUE(s.ok()); + + s = db.PKHLen("GP1_HLEN_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + // ***************** Group 2 Test ***************** + std::vector fvs2; + fvs2.push_back({"GP2_TEST_FIELD1", "GP2_TEST_VALUE1"}); + fvs2.push_back({"GP2_TEST_FIELD2", "GP2_TEST_VALUE2"}); + fvs2.push_back({"GP2_TEST_FIELD3", "GP2_TEST_VALUE3"}); + s = db.PKHMSet("GP2_HLEN_KEY", fvs2); + ASSERT_TRUE(s.ok()); + + s = db.PKHDel("GP2_HLEN_KEY", {"GP2_TEST_FIELD1", "GP2_TEST_FIELD2"}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + s = db.PKHLen("GP2_HLEN_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.PKHDel("GP2_HLEN_KEY", {"GP2_TEST_FIELD3"}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.PKHLen("GP2_HLEN_KEY", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); +} + +// PKHGetall +TEST_F(PKHashesTest, PKHGetall) { + int32_t ret = 0; + std::vector mid_fvs_in; + mid_fvs_in.push_back({"MID_TEST_FIELD1", "MID_TEST_VALUE1"}); + mid_fvs_in.push_back({"MID_TEST_FIELD2", "MID_TEST_VALUE2"}); + mid_fvs_in.push_back({"MID_TEST_FIELD3", "MID_TEST_VALUE3"}); + s = db.PKHMSet("B_HGETALL_KEY", mid_fvs_in); + ASSERT_TRUE(s.ok()); + + std::vector fvs_out; + s = db.PKHGetall("B_HGETALL_KEY", &fvs_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(fvs_out.size(), 3); + ASSERT_EQ(fvs_out[0].field, "MID_TEST_FIELD1"); + ASSERT_EQ(fvs_out[0].value, "MID_TEST_VALUE1"); + ASSERT_EQ(fvs_out[1].field, "MID_TEST_FIELD2"); + ASSERT_EQ(fvs_out[1].value, "MID_TEST_VALUE2"); + ASSERT_EQ(fvs_out[2].field, "MID_TEST_FIELD3"); + ASSERT_EQ(fvs_out[2].value, "MID_TEST_VALUE3"); + + // Insert some kv who's position above "mid kv" + std::vector pre_fvs_in; + pre_fvs_in.push_back({"PRE_TEST_FIELD1", "PRE_TEST_VALUE1"}); + pre_fvs_in.push_back({"PRE_TEST_FIELD2", "PRE_TEST_VALUE2"}); + pre_fvs_in.push_back({"PRE_TEST_FIELD3", "PRE_TEST_VALUE3"}); + s = db.PKHMSet("A_HGETALL_KEY", pre_fvs_in); + ASSERT_TRUE(s.ok()); + fvs_out.clear(); + s = db.PKHGetall("B_HGETALL_KEY", &fvs_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(fvs_out.size(), 3); + ASSERT_EQ(fvs_out[0].field, "MID_TEST_FIELD1"); + ASSERT_EQ(fvs_out[0].value, "MID_TEST_VALUE1"); + ASSERT_EQ(fvs_out[1].field, "MID_TEST_FIELD2"); + ASSERT_EQ(fvs_out[1].value, "MID_TEST_VALUE2"); + ASSERT_EQ(fvs_out[2].field, "MID_TEST_FIELD3"); + ASSERT_EQ(fvs_out[2].value, "MID_TEST_VALUE3"); + + // Insert some kv who's position below "mid kv" + std::vector suf_fvs_in; + suf_fvs_in.push_back({"SUF_TEST_FIELD1", "SUF_TEST_VALUE1"}); + suf_fvs_in.push_back({"SUF_TEST_FIELD2", "SUF_TEST_VALUE2"}); + suf_fvs_in.push_back({"SUF_TEST_FIELD3", "SUF_TEST_VALUE3"}); + s = db.PKHMSet("C_HGETALL_KEY", suf_fvs_in); + ASSERT_TRUE(s.ok()); + fvs_out.clear(); + s = db.PKHGetall("B_HGETALL_KEY", &fvs_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(fvs_out.size(), 3); + ASSERT_EQ(fvs_out[0].field, "MID_TEST_FIELD1"); + ASSERT_EQ(fvs_out[0].value, "MID_TEST_VALUE1"); + ASSERT_EQ(fvs_out[1].field, "MID_TEST_FIELD2"); + ASSERT_EQ(fvs_out[1].value, "MID_TEST_VALUE2"); + ASSERT_EQ(fvs_out[2].field, "MID_TEST_FIELD3"); + ASSERT_EQ(fvs_out[2].value, "MID_TEST_VALUE3"); + + // PKHGetall timeout hash table + fvs_out.clear(); + std::map type_status; + db.Expire("B_HGETALL_KEY", 1); + ASSERT_TRUE(type_status[storage::DataType::kHashes].ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + s = db.PKHGetall("B_HGETALL_KEY", &fvs_out); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(fvs_out.size(), 0); + + // PKHGetall not exist hash table + fvs_out.clear(); + s = db.PKHGetall("HGETALL_NOT_EXIST_KEY", &fvs_out); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(fvs_out.size(), 0); +} + +// PKHIncrby +TEST_F(PKHashesTest, PKHIncrby) { + int32_t ret; + int64_t value; + std::string str_value; + + // ***************** Group 1 Test ***************** + s = db.PKHSet("GP1_HINCRBY_KEY", "GP1_HINCRBY_FIELD", "1", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.PKHIncrby("GP1_HINCRBY_KEY", "GP1_HINCRBY_FIELD", 1, &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, 2); + + // ***************** Group 2 Test ***************** + s = db.PKHSet("GP2_HINCRBY_KEY", "GP2_HINCRBY_FIELD", " 1", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.PKHIncrby("GP2_HINCRBY_KEY", "GP2_HINCRBY_FIELD", 1, &value); + ASSERT_TRUE(s.IsCorruption()); + ASSERT_EQ(value, 0); + + // ***************** Group 3 Test ***************** + s = db.PKHSet("GP3_HINCRBY_KEY", "GP3_HINCRBY_FIELD", "1 ", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.PKHIncrby("GP3_HINCRBY_KEY", "GP3_HINCRBY_FIELD", 1, &value); + ASSERT_TRUE(s.IsCorruption()); + ASSERT_EQ(value, 0); + + // If key does not exist the value is set to 0 before the + // operation is performed + s = db.PKHIncrby("HINCRBY_NEW_KEY", "HINCRBY_EXIST_FIELD", 1000, &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, 1000); + s = db.PKHGet("HINCRBY_NEW_KEY", "HINCRBY_EXIST_FIELD", &str_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(atoll(str_value.data()), 1000); + + // If the hash field contains a string that can not be + // represented as integer + s = db.PKHSet("HINCRBY_KEY", "HINCRBY_STR_FIELD", "HINCRBY_VALEU", &ret); + ASSERT_TRUE(s.ok()); + s = db.PKHIncrby("HINCRBY_KEY", "HINCRBY_STR_FIELD", 100, &value); + ASSERT_TRUE(s.IsCorruption()); + + // If field does not exist the value is set to 0 before the + // operation is performed + s = db.PKHIncrby("HINCRBY_KEY", "HINCRBY_NOT_EXIST_FIELD", 100, &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, 100); + s = db.PKHGet("HINCRBY_KEY", "HINCRBY_NOT_EXIST_FIELD", &str_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(atoll(str_value.data()), 100); + + s = db.PKHSet("HINCRBY_KEY", "HINCRBY_NUM_FIELD", "100", &ret); + ASSERT_TRUE(s.ok()); + + // Positive test + s = db.PKHIncrby("HINCRBY_KEY", "HINCRBY_NUM_FIELD", 100, &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, 200); + s = db.PKHGet("HINCRBY_KEY", "HINCRBY_NUM_FIELD", &str_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(atoll(str_value.data()), 200); + + // Negative test + s = db.PKHIncrby("HINCRBY_KEY", "HINCRBY_NUM_FIELD", -100, &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, 100); + s = db.PKHGet("HINCRBY_KEY", "HINCRBY_NUM_FIELD", &str_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(atoll(str_value.data()), 100); + + // Larger than the maximum number 9223372036854775807 + s = db.PKHSet("HINCRBY_KEY", "HINCRBY_NUM_FIELD", "10", &ret); + ASSERT_TRUE(s.ok()); + s = db.PKHIncrby("HINCRBY_KEY", "HINCRBY_NUM_FIELD", 9223372036854775807, &value); + ASSERT_TRUE(s.IsInvalidArgument()); + + // Less than the minimum number -9223372036854775808 + s = db.PKHSet("HINCRBY_KEY", "HINCRBY_NUM_FIELD", "-10", &ret); + ASSERT_TRUE(s.ok()); + s = db.PKHIncrby("HINCRBY_KEY", "HINCRBY_NUM_FIELD", -9223372036854775807, &value); + ASSERT_TRUE(s.IsInvalidArgument()); +} + +// PKHKeys +TEST_F(PKHashesTest, PKHKeys) { + int32_t ret = 0; + std::vector mid_fvs_in; + mid_fvs_in.push_back({"MID_TEST_FIELD1", "MID_TEST_VALUE1"}); + mid_fvs_in.push_back({"MID_TEST_FIELD2", "MID_TEST_VALUE2"}); + mid_fvs_in.push_back({"MID_TEST_FIELD3", "MID_TEST_VALUE3"}); + s = db.PKHMSet("B_HKEYS_KEY", mid_fvs_in); + ASSERT_TRUE(s.ok()); + + std::vector fields; + s = db.PKHKeys("B_HKEYS_KEY", &fields); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(fields.size(), 3); + ASSERT_EQ(fields[0], "MID_TEST_FIELD1"); + ASSERT_EQ(fields[1], "MID_TEST_FIELD2"); + ASSERT_EQ(fields[2], "MID_TEST_FIELD3"); + + // Insert some kv who's position above "mid kv" + std::vector pre_fvs_in; + pre_fvs_in.push_back({"PRE_TEST_FIELD1", "PRE_TEST_VALUE1"}); + pre_fvs_in.push_back({"PRE_TEST_FIELD2", "PRE_TEST_VALUE2"}); + pre_fvs_in.push_back({"PRE_TEST_FIELD3", "PRE_TEST_VALUE3"}); + s = db.PKHMSet("A_HKEYS_KEY", pre_fvs_in); + ASSERT_TRUE(s.ok()); + fields.clear(); + s = db.PKHKeys("B_HKEYS_KEY", &fields); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(fields.size(), 3); + ASSERT_EQ(fields[0], "MID_TEST_FIELD1"); + ASSERT_EQ(fields[1], "MID_TEST_FIELD2"); + ASSERT_EQ(fields[2], "MID_TEST_FIELD3"); + + // Insert some kv who's position below "mid kv" + std::vector suf_fvs_in; + suf_fvs_in.push_back({"SUF_TEST_FIELD1", "SUF_TEST_VALUE1"}); + suf_fvs_in.push_back({"SUF_TEST_FIELD2", "SUF_TEST_VALUE2"}); + suf_fvs_in.push_back({"SUF_TEST_FIELD3", "SUF_TEST_VALUE3"}); + s = db.PKHMSet("A_HKEYS_KEY", suf_fvs_in); + ASSERT_TRUE(s.ok()); + fields.clear(); + s = db.PKHKeys("B_HKEYS_KEY", &fields); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(fields.size(), 3); + ASSERT_EQ(fields[0], "MID_TEST_FIELD1"); + ASSERT_EQ(fields[1], "MID_TEST_FIELD2"); + ASSERT_EQ(fields[2], "MID_TEST_FIELD3"); + + // PKHKeys timeout hash table + fields.clear(); + std::map type_status; + db.Expire("B_HKEYS_KEY", 1); + ASSERT_TRUE(type_status[storage::DataType::kHashes].ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + s = db.PKHKeys("B_HKEYS_KEY", &fields); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(fields.size(), 0); + + // PKHKeys not exist hash table + fields.clear(); + s = db.PKHKeys("HKEYS_NOT_EXIST_KEY", &fields); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(fields.size(), 0); +} + +// PKHVals +TEST_F(PKHashesTest, PKHVals) { + int32_t ret = 0; + std::vector mid_fvs_in; + mid_fvs_in.push_back({"MID_TEST_FIELD1", "MID_TEST_VALUE1"}); + mid_fvs_in.push_back({"MID_TEST_FIELD2", "MID_TEST_VALUE2"}); + mid_fvs_in.push_back({"MID_TEST_FIELD3", "MID_TEST_VALUE3"}); + s = db.PKHMSet("B_HVALS_KEY", mid_fvs_in); + ASSERT_TRUE(s.ok()); + + std::vector values; + s = db.PKHVals("B_HVALS_KEY", &values); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(values.size(), 3); + ASSERT_EQ(values[0], "MID_TEST_VALUE1"); + ASSERT_EQ(values[1], "MID_TEST_VALUE2"); + ASSERT_EQ(values[2], "MID_TEST_VALUE3"); + + // Insert some kv who's position above "mid kv" + std::vector pre_fvs_in; + pre_fvs_in.push_back({"PRE_TEST_FIELD1", "PRE_TEST_VALUE1"}); + pre_fvs_in.push_back({"PRE_TEST_FIELD2", "PRE_TEST_VALUE2"}); + pre_fvs_in.push_back({"PRE_TEST_FIELD3", "PRE_TEST_VALUE3"}); + s = db.PKHMSet("A_HVALS_KEY", pre_fvs_in); + ASSERT_TRUE(s.ok()); + values.clear(); + s = db.PKHVals("B_HVALS_KEY", &values); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(values.size(), 3); + ASSERT_EQ(values[0], "MID_TEST_VALUE1"); + ASSERT_EQ(values[1], "MID_TEST_VALUE2"); + ASSERT_EQ(values[2], "MID_TEST_VALUE3"); + + // Insert some kv who's position below "mid kv" + std::vector suf_fvs_in; + suf_fvs_in.push_back({"SUF_TEST_FIELD1", "SUF_TEST_VALUE1"}); + suf_fvs_in.push_back({"SUF_TEST_FIELD2", "SUF_TEST_VALUE2"}); + suf_fvs_in.push_back({"SUF_TEST_FIELD3", "SUF_TEST_VALUE3"}); + s = db.PKHMSet("C_HVALS_KEY", suf_fvs_in); + ASSERT_TRUE(s.ok()); + values.clear(); + s = db.PKHVals("B_HVALS_KEY", &values); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(values.size(), 3); + ASSERT_EQ(values[0], "MID_TEST_VALUE1"); + ASSERT_EQ(values[1], "MID_TEST_VALUE2"); + ASSERT_EQ(values[2], "MID_TEST_VALUE3"); + + // PKHVals timeout hash table + values.clear(); + std::map type_status; + db.Expire("B_HVALS_KEY", 1); + ASSERT_TRUE(type_status[storage::DataType::kHashes].ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + s = db.PKHVals("B_HVALS_KEY", &values); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(values.size(), 0); + + // PKHVals not exist hash table + values.clear(); + s = db.PKHVals("HVALS_NOT_EXIST_KEY", &values); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(values.size(), 0); +} + +// PKHStrlen +TEST_F(PKHashesTest, PKHStrlenTest) { + int32_t ret = 0; + int32_t len = 0; + s = db.PKHSet("HSTRLEN_KEY", "HSTRLEN_TEST_FIELD", "HSTRLEN_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.PKHStrlen("HSTRLEN_KEY", "HSTRLEN_TEST_FIELD", &len); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(len, 18); + + // If the key or the field do not exist, 0 is returned + s = db.PKHStrlen("HSTRLEN_KEY", "HSTRLEN_NOT_EXIST_FIELD", &len); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(len, 0); +} + +// PKHScan +TEST_F(PKHashesTest, PKHScanTest) { // NOLINT + int64_t cursor = 0; + int64_t next_cursor = 0; + std::vector field_value_out; + + // ***************** Group 1 Test ***************** + // {a,v} {b,v} {c,v} {d,v} {e,v} {f,v} {g,v} {h,v} + // 0 1 2 3 4 5 6 7 + std::vector gp1_field_value{{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, + {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}}; + s = db.PKHMSet("GP1_HSCAN_KEY", gp1_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP1_HSCAN_KEY", 8)); + + s = db.PKHScan("GP1_HSCAN_KEY", 0, "*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_cursor, 3); + ASSERT_TRUE(field_value_match(field_value_out, {{"a", "v"}, {"b", "v"}, {"c", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP1_HSCAN_KEY", cursor, "*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_cursor, 6); + ASSERT_TRUE(field_value_match(field_value_out, {{"d", "v"}, {"e", "v"}, {"f", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP1_HSCAN_KEY", cursor, "*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 2); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"g", "v"}, {"h", "v"}})); + + // ***************** Group 2 Test ***************** + // {a,v} {b,v} {c,v} {d,v} {e,v} {f,v} {g,v} {h,v} + // 0 1 2 3 4 5 6 7 + std::vector gp2_field_value{{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, + {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}}; + s = db.PKHMSet("GP2_HSCAN_KEY", gp2_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP2_HSCAN_KEY", 8)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(field_value_match(field_value_out, {{"a", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"b", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 3); + ASSERT_TRUE(field_value_match(field_value_out, {{"c", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 4); + ASSERT_TRUE(field_value_match(field_value_out, {{"d", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 5); + ASSERT_TRUE(field_value_match(field_value_out, {{"e", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 6); + ASSERT_TRUE(field_value_match(field_value_out, {{"f", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 7); + ASSERT_TRUE(field_value_match(field_value_out, {{"g", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"h", "v"}})); + + // ***************** Group 3 Test ***************** + // {a,v} {b,v} {c,v} {d,v} {e,v} {f,v} {g,v} {h,v} + // 0 1 2 3 4 5 6 7 + std::vector gp3_field_value{{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, + {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}}; + s = db.PKHMSet("GP3_HSCAN_KEY", gp3_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP3_HSCAN_KEY", 8)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP3_HSCAN_KEY", cursor, "*", 5, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 5); + ASSERT_EQ(next_cursor, 5); + ASSERT_TRUE(field_value_match(field_value_out, {{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, {"e", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP3_HSCAN_KEY", cursor, "*", 5, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"f", "v"}, {"g", "v"}, {"h", "v"}})); + + // ***************** Group 4 Test ***************** + // {a,v} {b,v} {c,v} {d,v} {e,v} {f,v} {g,v} {h,v} + // 0 1 2 3 4 5 6 7 + std::vector gp4_field_value{{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, + {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}}; + s = db.PKHMSet("GP4_HSCAN_KEY", gp4_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP4_HSCAN_KEY", 8)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP4_HSCAN_KEY", cursor, "*", 10, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 8); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match( + field_value_out, + {{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}})); + + // ***************** Group 5 Test ***************** + // {a_1_,v} {a_2_,v} {a_3_,v} {b_1_,v} {b_2_,v} {b_3_,v} {c_1_,v} {c_2_,v} {c_3_,v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp5_field_value{{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}, {"b_1_", "v"}, {"b_2_", "v"}, + {"b_3_", "v"}, {"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}}; + s = db.PKHMSet("GP5_HSCAN_KEY", gp5_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP5_HSCAN_KEY", 9)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP5_HSCAN_KEY", cursor, "*1*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 3); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_1_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP5_HSCAN_KEY", cursor, "*1*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 6); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_1_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP5_HSCAN_KEY", cursor, "*1*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_1_", "v"}})); + + // ***************** Group 6 Test ***************** + // {a_1_,v} {a_2_,v} {a_3_,v} {b_1_,v} {b_2_,v} {b_3_,v} {c_1_,v} {c_2_,v} {c_3_,v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp6_field_value{{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}, {"b_1_", "v"}, {"b_2_", "v"}, + {"b_3_", "v"}, {"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}}; + s = db.PKHMSet("GP6_HSCAN_KEY", gp6_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP6_HSCAN_KEY", 9)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP6_HSCAN_KEY", cursor, "a*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}})); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP6_HSCAN_KEY", cursor, "a*", 2, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 2); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_1_", "v"}, {"a_2_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP6_HSCAN_KEY", cursor, "a*", 2, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_3_", "v"}})); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP6_HSCAN_KEY", cursor, "a*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_1_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP6_HSCAN_KEY", cursor, "a*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_2_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP6_HSCAN_KEY", cursor, "a*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_3_", "v"}})); + + // ***************** Group 7 Test ***************** + // {a_1_,v} {a_2_,v} {a_3_,v} {b_1_,v} {b_2_,v} {b_3_,v} {c_1_,v} {c_2_,v} {c_3_,v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp7_field_value{{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}, {"b_1_", "v"}, {"b_2_", "v"}, + {"b_3_", "v"}, {"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}}; + s = db.PKHMSet("GP7_HSCAN_KEY", gp7_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP7_HSCAN_KEY", 9)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP7_HSCAN_KEY", cursor, "b*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_1_", "v"}, {"b_2_", "v"}, {"b_3_", "v"}})); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP7_HSCAN_KEY", cursor, "b*", 2, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 2); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_1_", "v"}, {"b_2_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP7_HSCAN_KEY", cursor, "b*", 2, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_3_", "v"}})); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP7_HSCAN_KEY", cursor, "b*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_1_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP7_HSCAN_KEY", cursor, "b*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_2_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP7_HSCAN_KEY", cursor, "b*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_3_", "v"}})); + + // ***************** Group 8 Test ***************** + // {a_1_,v} {a_2_,v} {a_3_,v} {b_1_,v} {b_2_,v} {b_3_,v} {c_1_,v} {c_2_,v} {c_3_,v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp8_field_value{{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}, {"b_1_", "v"}, {"b_2_", "v"}, + {"b_3_", "v"}, {"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}}; + s = db.PKHMSet("GP8_HSCAN_KEY", gp8_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP8_HSCAN_KEY", 9)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP8_HSCAN_KEY", cursor, "c*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP8_HSCAN_KEY", cursor, "c*", 2, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 2); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_1_", "v"}, {"c_2_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP8_HSCAN_KEY", cursor, "c*", 2, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_3_", "v"}})); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP8_HSCAN_KEY", cursor, "c*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_1_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP8_HSCAN_KEY", cursor, "c*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_2_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.PKHScan("GP8_HSCAN_KEY", cursor, "c*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_3_", "v"}})); + + // ***************** Group 9 Test ***************** + // {a_1_,v} {a_2_,v} {a_3_,v} {b_1_,v} {b_2_,v} {b_3_,v} {c_1_,v} {c_2_,v} {c_3_,v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp9_field_value{{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}, {"b_1_", "v"}, {"b_2_", "v"}, + {"b_3_", "v"}, {"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}}; + s = db.PKHMSet("GP9_HSCAN_KEY", gp9_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP9_HSCAN_KEY", 9)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP9_HSCAN_KEY", cursor, "d*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + // ***************** Group 10 Test ***************** + // {a_1_,v} {a_2_,v} {a_3_,v} {b_1_,v} {b_2_,v} {b_3_,v} {c_1_,v} {c_2_,v} {c_3_,v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp10_field_value{{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}, {"b_1_", "v"}, {"b_2_", "v"}, + {"b_3_", "v"}, {"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}}; + s = db.PKHMSet("GP10_HSCAN_KEY", gp10_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP10_HSCAN_KEY", 9)); + + ASSERT_TRUE(make_expired(&db, "GP10_HSCAN_KEY")); + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP10_HSCAN_KEY", cursor, "*", 10, &field_value_out, &next_cursor); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + // ***************** Group 11 Test ***************** + // PKHScan Not Exist Key + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.PKHScan("GP11_HSCAN_KEY", cursor, "*", 10, &field_value_out, &next_cursor); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {})); +} + +int main(int argc, char** argv) { + if (!pstd::FileExists("./log")) { + pstd::CreatePath("./log"); + } + FLAGS_log_dir = "./log"; + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("pkhashes_test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +}