From e149ba3971aa9aacccceab058e355c2660ba8151 Mon Sep 17 00:00:00 2001 From: Philippe Wooding Date: Fri, 30 Mar 2018 21:57:57 +0200 Subject: [PATCH] Allow preferred nodes to be configured If a cluster is distributed geographically, it can be preferable to query the local nodes. For readonly commands, we have the posibility to chose a subset of the clsuter nodes that will be prioritized when choosing a node. A specific local slave can therefore be used. --- corvus.conf | 17 +++++++ src/config.c | 77 +++++++++++++++++++++++++++++-- src/config.h | 5 ++ src/connection.c | 11 ++++- src/slot.c | 109 ++++++++++++++++++++++++++++++++++++++++++-- src/slot.h | 11 ++++- src/socket.c | 5 ++ src/socket.h | 1 + tests/test_config.c | 8 ++-- tests/test_slot.c | 70 ++++++++++++++++++++++++++-- tests/test_socket.c | 15 ++++++ 11 files changed, 310 insertions(+), 19 deletions(-) diff --git a/corvus.conf b/corvus.conf index a61dfcc..f8032a1 100644 --- a/corvus.conf +++ b/corvus.conf @@ -57,6 +57,7 @@ syslog 0 # # * `master`, forward all reading commands to master, the default # * `read-slave-only`, forward all reading commands to slaves +# * `read-preferred-only` forward all reading commands to a slave or master based on the preferred_node list # * `both`, forward reading commands to both master and slaves # # If new slaves are added to the cluster, `PROXY UPDATESLOTMAP` should be emmited @@ -64,6 +65,22 @@ syslog 0 # # read-strategy master +# +# When the `read-preferred-only` strategy is configured, the nodes from the `preferred_nodes` list +# will be preferred to any other node for a given slot. +# This is useful if a cluster is distributed geographically. It can be preferable to query the local nodes +# If the cluster shows a preferred node as disconnected, it won't be used but the cluster status will be +# polled until all preferred nodes are available again. +# +# preferred_nodes localhost:8003,localhost:8004 + +# +# When `read-preferred` read-strategy is used and the cluster shows a preferred node to be disconnected, +# an active polling of the cluster configuration is started to determine as soon as possible when the +# preferred node is back online so that it can be used again. +# +polling_interval 30 + # Slowlog # The following two configs are almost the same with redis. # Every command whose lantency exceeds `slowlog-log-slower-than` will be considered a slow command, diff --git a/src/config.c b/src/config.c index e779e48..a871123 100644 --- a/src/config.c +++ b/src/config.c @@ -31,6 +31,8 @@ const char * CONFIG_OPTIONS[] = { "metric_interval", "stats", "read-strategy", + "preferred_nodes", + "polling_interval", "requirepass", "client_timeout", "server_timeout", @@ -48,6 +50,9 @@ void config_init() config.bind = 12345; config.node = cv_calloc(1, sizeof(struct node_conf)); config.node->refcount = 1; + config.preferred_node = cv_calloc(1, sizeof(struct node_conf)); + config.preferred_node->refcount = 1; + config.polling_interval = 10; config.thread = DEFAULT_THREAD; config.loglevel = INFO; config.syslog = 0; @@ -56,7 +61,7 @@ void config_init() config.server_timeout = 0; config.bufsize = DEFAULT_BUFSIZE; config.requirepass = NULL; - config.readslave = config.readmasterslave = false; + config.readslave = config.readmasterslave = config.readpreferred = false; config.slowlog_max_len = 1024; config.slowlog_log_slower_than = -1; config.slowlog_statsd_enabled = 0; @@ -68,6 +73,7 @@ void config_init() void config_free() { config_node_dec_ref(config.node); + config_node_dec_ref(config.preferred_node); pthread_mutex_destroy(&lock_conf_node); pthread_mutex_destroy(&lock_config_rewrite); } @@ -106,6 +112,25 @@ void config_set_node(struct node_conf *node) config_node_dec_ref(oldnode); } +struct node_conf *config_get_preferred_node() +{ + pthread_mutex_lock(&lock_conf_node); + struct node_conf *node = config.preferred_node; + int refcount = ATOMIC_INC(node->refcount, 1); + pthread_mutex_unlock(&lock_conf_node); + assert(refcount >= 1); + return node; +} + +void config_set_preferred_node(struct node_conf *node) +{ + pthread_mutex_lock(&lock_conf_node); + struct node_conf *oldnode = config.preferred_node; + config.preferred_node = node; + pthread_mutex_unlock(&lock_conf_node); + config_node_dec_ref(oldnode); +} + void config_node_dec_ref(struct node_conf *node) { int refcount = ATOMIC_DEC(node->refcount, 1); @@ -134,6 +159,24 @@ void config_node_to_str(char *str, size_t max_len) config_node_dec_ref(nodes); } +void config_preferred_node_to_str(char *str, size_t max_len) +{ + struct node_conf *nodes = config_get_preferred_node(); + char buf[ADDRESS_LEN + 1]; + for (size_t i = 0; i != nodes->len; i++) { + struct address *addr = &nodes->addr[i]; + size_t len = snprintf(buf, max_len, "%s:%u", + addr->ip, addr->port); + size_t comma_len = i > 0 ? 1 : 0; + if (len + comma_len > max_len) break; + if (comma_len) *str++ = ','; + strcpy(str, buf); + str += len; + max_len -= len + comma_len; + } + config_node_dec_ref(nodes); +} + int parse_int(char *s, int *result) { const int max = 100000000; @@ -188,11 +231,15 @@ int config_add(char *name, char *value) config.readslave = true; } } else if (strcmp(name, "read-strategy") == 0) { + config.readpreferred = false; if (strcmp(value, "read-slave-only") == 0) { config.readmasterslave = false; config.readslave = true; } else if (strcmp(value, "both") == 0) { config.readmasterslave = config.readslave = true; + } else if (strcmp(value, "read-preferred") == 0) { + config.readmasterslave = config.readslave = false; + config.readpreferred = true; } else { config.readmasterslave = config.readslave = false; } @@ -224,6 +271,9 @@ int config_add(char *name, char *value) } else if (strcmp(name, "metric_interval") == 0) { TRY_PARSE_INT(); config.metric_interval = val > 0 ? val : 10; + } else if (strcmp(name, "polling_interval") == 0) { + TRY_PARSE_INT(); + config.polling_interval = val > 0 ? val : 10; } else if (strcmp(name, "loglevel") == 0) { if (strcasecmp(value, "debug") == 0) { ATOMIC_SET(config.loglevel, DEBUG); @@ -247,7 +297,7 @@ int config_add(char *name, char *value) config.requirepass = cv_calloc(strlen(value) + 1, sizeof(char)); memcpy(config.requirepass, value, strlen(value)); } - } else if (strcmp(name, "node") == 0) { + } else if (strcmp(name, "node") == 0 || strcmp(name, "preferred_nodes") == 0) { // strtok will modify `value` to tokenize it. // Copy it first in case value is a string literal char buf[strlen(value) + 1]; @@ -266,14 +316,18 @@ int config_add(char *name, char *value) p = strtok(NULL, ","); } if (addr_cnt == 0) { - LOG(WARN, "received empty node value in config set"); + LOG(WARN, "received empty %s value in config set", name); return CORVUS_ERR; } struct node_conf *newnode = cv_malloc(sizeof(struct node_conf)); newnode->addr = addr; newnode->len = addr_cnt; newnode->refcount = 1; - config_set_node(newnode); + if (strcmp(name, "node") == 0) { + config_set_node(newnode); + } else { + config_set_preferred_node(newnode); + } } else if (strcmp(name, "slowlog-log-slower-than") == 0) { TRY_PARSE_INT(); ATOMIC_SET(config.slowlog_log_slower_than, val); @@ -297,6 +351,8 @@ int config_get(const char *name, char *value, size_t max_len) snprintf(value, max_len, "%u", config.bind); } else if (strcmp(name, "node") == 0) { config_node_to_str(value, max_len); + } else if (strcmp(name, "preferred_nodes") == 0) { + config_preferred_node_to_str(value, max_len); } else if (strcmp(name, "thread") == 0) { snprintf(value, max_len, "%d", config.thread); } else if (strcmp(name, "loglevel") == 0) { @@ -335,6 +391,8 @@ int config_get(const char *name, char *value, size_t max_len) snprintf(value, max_len, "%d", config.slowlog_max_len); } else if (strcmp(name, "slowlog-statsd-enabled") == 0) { strncpy(value, BOOL_STR(config.slowlog_statsd_enabled), max_len); + } else if (strcmp(name, "polling_interval") == 0) { + snprintf(value, max_len, "%d", config.polling_interval); } else { return CORVUS_ERR; } @@ -612,3 +670,14 @@ bool config_option_changable(const char *option) } return false; } + +bool config_is_preferred_node(struct address *node) +{ + struct node_conf *preferred_node = config_get_preferred_node(); + for (size_t i = 0; i < preferred_node->len; i++) { + if (socket_cmp(node, &preferred_node->addr[i]) == 0) { + return true; + } + } + return false; +} diff --git a/src/config.h b/src/config.h index ccb4228..b9c27dc 100644 --- a/src/config.h +++ b/src/config.h @@ -17,6 +17,7 @@ struct corvus_config { char cluster[CLUSTER_NAME_SIZE + 1]; uint16_t bind; struct node_conf *node; + struct node_conf *preferred_node; /* List of nodes that should be set a higher priority */ int thread; int loglevel; bool syslog; @@ -25,6 +26,8 @@ struct corvus_config { bool stats; bool readslave; bool readmasterslave; + bool readpreferred; + uint16_t polling_interval; /* Intervall to use when polling for cluster configuration */ char *requirepass; int64_t client_timeout; int64_t server_timeout; @@ -46,5 +49,7 @@ void config_set_node(struct node_conf *node); void config_node_dec_ref(struct node_conf *node); int config_add(char *name, char *value); bool config_option_changable(const char *option); +struct node_conf *config_get_preferred_node(); +bool config_is_preferred_node(struct address *node); #endif /* end of include guard: CONFIG_H */ \ No newline at end of file diff --git a/src/connection.c b/src/connection.c index 94ae413..d74cf0e 100644 --- a/src/connection.c +++ b/src/connection.c @@ -325,14 +325,21 @@ struct connection *conn_get_server(struct context *ctx, uint16_t slot, bool readonly = false; if (slot_get_node_addr(slot, &info)) { - addr = &info.nodes[0]; + addr = &info.nodes[0].addr; if (access != CMD_ACCESS_WRITE && config.readslave && info.index > 1) { int r = rand_r(&ctx->seed); if (!config.readmasterslave || r % info.index != 0) { int i = r % (info.index - 1); - addr = &info.nodes[++i]; + addr = &info.nodes[++i].addr; readonly = true; } + } else if (access != CMD_ACCESS_WRITE && config.readpreferred) { + /* + * If readpreferred is set and we're not running a 'write' command + * use the first node available that's in the preferred list. + */ + addr = &info.preferred_nodes[0].addr; + readonly = true; } if (addr->port > 0) { return conn_get_server_from_pool(ctx, addr, readonly); diff --git a/src/slot.c b/src/slot.c index 208a6db..621ad34 100644 --- a/src/slot.c +++ b/src/slot.c @@ -16,6 +16,11 @@ static const char SLOTS_CMD[] = "*2\r\n$7\r\nCLUSTER\r\n$5\r\nNODES\r\n"; static int8_t in_progress = 0; +/* + * Polling is required when a preferred node is not available. + * We need to poll cluster config to find out when node can be used. + */ +static int8_t polling_required = 0; static pthread_mutex_t job_mutex; static pthread_cond_t signal_cond; @@ -82,6 +87,79 @@ static inline void node_map_free(struct dict *map) } } +/* + * For every slot, set the preferred_nodes list + * The nodes available for that slot are sorted such that the preferred nodes + * come first in the list. + */ +static void sort_nodes() +{ + bool invalid_found = false; + bool polling_changed = false; + struct node_conf *preferred_nodes = config_get_preferred_node(); + struct dict_iter iter = DICT_ITER_INITIALIZER; + DICT_FOREACH(&slot_map.node_map, &iter) { + struct node_info *n = iter.value; + struct node sorted_nodes[MAX_SLAVE_NODES + 1]; + struct node remaining_nodes[MAX_SLAVE_NODES + 1]; + int sorted_size = 0; + int remaining_size = 0; + for (int i = 0; i < preferred_nodes->len; i++) { + for (int j = 0; j < n->index; j++) { + // Build list of nodes that are in the preferred list + if (socket_cmp(&n->nodes[j].addr, &preferred_nodes->addr[i]) == 0) { + if (n->nodes[j].available) { + memcpy(&sorted_nodes[sorted_size], &n->nodes[j], sizeof(struct address)); + sorted_size++; + } else { + // A preferred node is not valid, set polling_required flag so that the cluster config + // gets retrieved regulary in case preferred node becomes available again. + invalid_found = true; + } + } + } + } + + // Add remaining nodes to list + for (int i = 0; i < n->index; i++) { + bool found = false; + if (!n->nodes[i].available) continue; + + for (int j = 0; j < sorted_size; j++) { + if (socket_cmp(&n->nodes[i].addr, &sorted_nodes[j].addr) == 0) { + found = true; + break; + } + } + if (!found) { + memcpy(&remaining_nodes[remaining_size], &n->nodes[i], sizeof(struct address)); + remaining_size++; + } + } + + // Copy sorted_nodes back to node_info structure + memcpy(&n->preferred_nodes[0], &sorted_nodes[0], sizeof(struct address) * sorted_size); + // Add remaining nodes to the end of list + memcpy(&n->preferred_nodes[sorted_size], &remaining_nodes[0], sizeof(struct address) * remaining_size); + } + + if (invalid_found) { + LOG(INFO, "Unreachable preferred node found, polling required."); + // A preferred node has been found to be invalid, start polling for cluster config + // Only lock mutex if required + if (!polling_required) polling_changed = true; + } else if (polling_required) { + LOG(INFO, "Preferred nodes ok, polling stopped."); + polling_changed = true; + } + + if (polling_changed) { + pthread_mutex_lock(&job_mutex); + polling_required = !polling_required; + pthread_mutex_unlock(&job_mutex); + } +} + void node_desc_add(struct node_desc *b, uint8_t *start, uint8_t *end, bool partial) { if (b->len <= b->index) { @@ -203,6 +281,11 @@ int parse_cluster_nodes(struct redis_data *data) goto end; } bool is_master = d->parts[3].data[0] == '-'; + /* + * A node is considered available (can be sent commands) if it is connected to the cluster + * and it's replying to PINGs + */ + bool is_available = strcmp(d->parts[7].data, "connected") == 0 && d->parts[4].data[0] == '0'; char *name = is_master ? d->parts[0].data : d->parts[3].data; struct node_info *node = dict_get(&slot_map.node_map, name); if (node == NULL) { @@ -212,16 +295,24 @@ int parse_cluster_nodes(struct redis_data *data) dict_set(&slot_map.node_map, node->name, node); } if (is_master) { - socket_parse_addr(d->parts[1].data, &node->nodes[0]); + socket_parse_addr(d->parts[1].data, &node->nodes[0].addr); + node->nodes[0].available = is_available; node->slot_spec = d->parts; node->spec_length = d->index - 8; } else if (node->index <= MAX_SLAVE_NODES && ( strcasecmp(d->parts[2].data, "slave") == 0 || strcasecmp(d->parts[2].data, "myself,slave") == 0)) { - socket_parse_addr(d->parts[1].data, &node->nodes[node->index++]); + socket_parse_addr(d->parts[1].data, &node->nodes[node->index].addr); + node->nodes[node->index].available = is_available; + node->index++; } } + /* + * If readpreferred is set, nodes need to be sorted by preferrence. + */ + if (config.readpreferred) sort_nodes(); + slot_count = parse_slots(); end: @@ -371,15 +462,25 @@ void do_job(struct context *ctx, int job) void *slot_manager(void *data) { - int job; + int job, ret; struct context *ctx = data; + struct timespec timeToWait; pthread_mutex_lock(&job_mutex); in_progress = 0; while (ctx->state != CTX_QUIT) { if (slot_job == SLOT_UPDATE_UNKNOWN) { - pthread_cond_wait(&signal_cond, &job_mutex); + if (polling_required) { + timeToWait.tv_sec = time(NULL) + config.polling_interval; + ret = pthread_cond_timedwait(&signal_cond, &job_mutex, &timeToWait); + /* If polling interval is over, retrieve cluster config */ + if (ret == ETIMEDOUT) { + slot_job = SLOT_UPDATE; + } + } else { + pthread_cond_wait(&signal_cond, &job_mutex); + } continue; } diff --git a/src/slot.h b/src/slot.h index 305834a..d9e7ba8 100644 --- a/src/slot.h +++ b/src/slot.h @@ -30,11 +30,18 @@ struct node_desc { uint16_t index, len; }; +struct node { + struct address addr; + bool available; +}; + struct node_info { char name[64]; - // contains master and slaves of one shard - struct address nodes[MAX_SLAVE_NODES + 1]; + // contains preferred node (if exists), master and slaves of one shard + struct node nodes[MAX_SLAVE_NODES + 1]; size_t index; // length of `nodes` above + struct node preferred_nodes[MAX_SLAVE_NODES + 1]; + size_t preferred_index; // length of `preferred_nodes` above int refcount; // for parsing slots of a master node struct desc_part *slot_spec; diff --git a/src/socket.c b/src/socket.c index 4350116..e95ed4f 100644 --- a/src/socket.c +++ b/src/socket.c @@ -391,3 +391,8 @@ int socket_trigger_event(int evfd) } return CORVUS_OK; } + +int socket_cmp(struct address *a, struct address *b) +{ + return strcmp(a->ip, b->ip) || a->port != b->port; +} diff --git a/src/socket.h b/src/socket.h index 24de40d..391b391 100644 --- a/src/socket.h +++ b/src/socket.h @@ -33,5 +33,6 @@ int socket_parse_addr(char *addr, struct address *address); int socket_parse_ip(char *addr, struct address *address); int socket_create_eventfd(); int socket_trigger_event(int evfd); +int socket_cmp(struct address *a, struct address *b); #endif /* end of include guard: SOCKET_H */ diff --git a/tests/test_config.c b/tests/test_config.c index 003256b..aae6738 100644 --- a/tests/test_config.c +++ b/tests/test_config.c @@ -45,11 +45,13 @@ TEST(test_config_read_strategy) { char n[] = "read-strategy"; ASSERT(config_add(n, "read-slave-only") == 0); - ASSERT(config.readslave && !config.readmasterslave); + ASSERT(config.readslave && !config.readmasterslave && !config.readpreferred); ASSERT(config_add(n, "both") == 0); - ASSERT(config.readslave && config.readmasterslave); + ASSERT(config.readslave && config.readmasterslave && !config.readpreferred); ASSERT(config_add(n, "master") == 0); - ASSERT(!config.readslave && !config.readmasterslave); + ASSERT(!config.readslave && !config.readmasterslave && !config.readpreferred); + ASSERT(config_add(n, "read-preferred") == 0); + ASSERT(!config.readslave && !config.readmasterslave && config.readpreferred); PASS(NULL); } diff --git a/tests/test_slot.c b/tests/test_slot.c index 97158fd..27906b5 100644 --- a/tests/test_slot.c +++ b/tests/test_slot.c @@ -12,6 +12,10 @@ extern int split_node_description(struct node_desc *desc, struct pos_array *pos_array); extern int parse_cluster_nodes(struct redis_data *data); +extern void config_init(); +extern void config_free(); +extern int config_add(char *name, char *value); +extern void config_set_preferred_node(struct node_conf *node); TEST(test_slot_get1) { struct pos p[] = { @@ -122,12 +126,33 @@ TEST(test_parse_cluster_nodes) { struct node_info info; ASSERT(slot_get_node_addr(5499, &info)); - ASSERT(strcmp(info.nodes[0].ip, "127.0.0.1") == 0 && info.nodes[0].port == 8001); + ASSERT(strcmp(info.nodes[0].addr.ip, "127.0.0.1") == 0 && info.nodes[0].addr.port == 8001); ASSERT(!slot_get_node_addr(9, &info)); PASS(NULL); } +TEST(test_parse_cluster_nodes_disconnected_master) { + char data[] = "4f6d838441c4f652f970cd7570c0cf16bbd0f3a9 127.0.0.1:8001 " + "master - 0 1464764873814 9 disconnected 0-5460\n" + "67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:8002 " + "master - 0 1426238316232 2 connected 5461-10922\n"; + struct pos p[] = {{(uint8_t*)data, strlen(data)}}; + struct pos_array pos = {p, strlen(data), 1, 0}; + struct redis_data redis_data; + redis_data.type = REP_STRING; + memcpy(&redis_data.pos, &pos, sizeof(pos)); + int count = parse_cluster_nodes(&redis_data); + + struct node_info info; + ASSERT(slot_get_node_addr(0, &info)); + ASSERT(!info.nodes[0].available); + ASSERT(slot_get_node_addr(10000, &info)); + ASSERT(info.nodes[0].available); + + PASS(NULL); +} + TEST(test_parse_cluster_nodes_slave) { char data[] = "4f6d838441c4f652f970cd7570c0cf16bbd0f3a9 127.0.0.1:8001 " "master - 0 1464764873814 9 connected 0\n" @@ -144,9 +169,9 @@ TEST(test_parse_cluster_nodes_slave) { struct node_info info; ASSERT(slot_get_node_addr(0, &info)); - ASSERT(strcmp(info.nodes[0].ip, "127.0.0.1") == 0 && info.nodes[0].port == 8001); + ASSERT(strcmp(info.nodes[0].addr.ip, "127.0.0.1") == 0 && info.nodes[0].addr.port == 8001); ASSERT(info.index == 2); - ASSERT(strcmp(info.nodes[1].ip, "127.0.0.1") == 0 && info.nodes[1].port == 8003); + ASSERT(strcmp(info.nodes[1].addr.ip, "127.0.0.1") == 0 && info.nodes[1].addr.port == 8003); PASS(NULL); } @@ -167,18 +192,55 @@ TEST(test_parse_cluster_nodes_fail_slave) { struct node_info info; ASSERT(slot_get_node_addr(0, &info)); - ASSERT(strcmp(info.nodes[0].ip, "127.0.0.1") == 0 && info.nodes[0].port == 8001); + ASSERT(strcmp(info.nodes[0].addr.ip, "127.0.0.1") == 0 && info.nodes[0].addr.port == 8001); ASSERT(info.index == 1); PASS(NULL); } +TEST(test_parse_cluster_nodes_preferred_nodes) { + char data[] = "07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004 " + "slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected\n" + "67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002 " + "master - 0 1426238316232 2 connected 5461-10922\n" + "292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003 " + "master - 0 1426238318243 3 connected 10923-16383\n" + "6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005 " + "slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected\n" + "824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006 " + "slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected\n" + "e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001 " + "myself,master - 0 0 1 disconnected 0-5460\n"; + struct pos p[] = {{(uint8_t*)data, strlen(data)}}; + struct pos_array pos = {p, strlen(data), 1, 0}; + struct redis_data redis_data; + redis_data.type = REP_STRING; + memcpy(&redis_data.pos, &pos, sizeof(pos)); + + config.preferred_node = cv_calloc(1, sizeof(struct node_conf)); + config.preferred_node->refcount = 1; + config.readpreferred = true; + config_add("preferred_nodes", "127.0.0.1:30004"); + + parse_cluster_nodes(&redis_data); + + struct node_info info; + ASSERT(slot_get_node_addr(0, &info)); + ASSERT(strcmp(info.preferred_nodes[0].addr.ip, "127.0.0.1") == 0 && info.preferred_nodes[0].addr.port == 30004); + + config_node_dec_ref(config.preferred_node); + + PASS(NULL); +} + TEST_CASE(test_slot) { RUN_TEST(test_slot_get1); RUN_TEST(test_slot_get2); RUN_TEST(test_slot_get3); RUN_TEST(test_split_node_description); RUN_TEST(test_parse_cluster_nodes); + RUN_TEST(test_parse_cluster_nodes_disconnected_master); RUN_TEST(test_parse_cluster_nodes_slave); RUN_TEST(test_parse_cluster_nodes_fail_slave); + RUN_TEST(test_parse_cluster_nodes_preferred_nodes); } diff --git a/tests/test_socket.c b/tests/test_socket.c index a81426a..752ac0e 100644 --- a/tests/test_socket.c +++ b/tests/test_socket.c @@ -65,9 +65,24 @@ TEST(test_socket_parse_addr_wrong) { PASS(NULL); } +TEST(test_socket_compare) { + struct address addr1; + struct address addr2; + socket_parse_addr("127.0.0.1:12345", &addr1); + socket_parse_addr("127.0.0.1:12345", &addr2); + ASSERT(socket_cmp(&addr1, &addr2) == 0); + + socket_parse_addr("127.0.0.1:1234", &addr2); + ASSERT(socket_cmp(&addr1, &addr2) == 1); + + socket_parse_addr("127.0.0.2:12345", &addr2); + ASSERT(socket_cmp(&addr1, &addr2) == 1); +} + TEST_CASE(test_socket) { RUN_TEST(test_socket_address_init); RUN_TEST(test_parse_port); RUN_TEST(test_socket_parse_addr); RUN_TEST(test_socket_parse_addr_wrong); + RUN_TEST(test_socket_compare); }