Skip to content
This repository has been archived by the owner on Dec 23, 2024. It is now read-only.

implement new test framework with real tarantools #54

Merged
merged 1 commit into from
Sep 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ FEATURES:
* Added etcd v2 topology provider implementation (#16)
* Add TopologyController mock for testing improve
* Add linter job (#33)
* New test framework with real tarantools

REFACTOR:

Expand Down
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,6 @@ testrace: BUILD_TAGS+=testonly
testrace:
@CGO_ENABLED=1 \
$(GO_CMD) test -tags='$(BUILD_TAGS)' -race -timeout=$(EXTENDED_TEST_TIMEOUT) -parallel=20

test/tnt:
@$(MAKE) -C ./tests/tnt
57 changes: 57 additions & 0 deletions tests/tnt/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
NREPLICASETS?=5
START_PORT?=33000

RED=\033[0;31m
GREEN=\033[0;32m
YELLOW=\033[0;33m
NC=\033[0m # No Color

default: run

all: default

run: | clean cluster-up bootstrap gotest cluster-down

# cleanup tmp working directory
clean:
@echo "${GREEN}STAGE: CLEANUP${NC}"
rm -rf tmp

# prepare vshard-storages, that contains ${NREPLICASETS} replicasets.
# every replicaset has one master and one follower instance.
# every replicaset runs in background mode, no logs are stored (/dev/null)
cluster-up:
@echo "${GREEN}STAGE: CLUSTER UP${NC}"
mkdir -p tmp
rsid=1 ; while [[ $$rsid -le ${NREPLICASETS} ]] ; do \
mkdir -p tmp/$${rsid}/master; \
mkdir -p tmp/$${rsid}/follower; \
ln -sf `(pwd)`/storage.lua tmp/$${rsid}/master/storage_$${rsid}_master.lua; \
ln -sf `(pwd)`/cfgmaker.lua tmp/$${rsid}/master/cfgmaker.lua; \
ln -sf `(pwd)`/storage.lua tmp/$${rsid}/follower/storage_$${rsid}_follower.lua; \
ln -sf `(pwd)`/cfgmaker.lua tmp/$${rsid}/follower/cfgmaker.lua; \
TT_WORK_DIR=tmp/$${rsid}/master/ TT_PID_FILE="tarantool.pid" TT_BACKGROUND=true START_PORT=${START_PORT} TT_LOG=/dev/null NREPLICASETS=${NREPLICASETS} tarantool tmp/$${rsid}/master/storage_$${rsid}_master.lua; \
TT_WORK_DIR=tmp/$${rsid}/follower/ TT_PID_FILE="tarantool.pid" TT_BACKGROUND=true START_PORT=${START_PORT} TT_LOG=/dev/null NREPLICASETS=${NREPLICASETS} tarantool tmp/$${rsid}/follower/storage_$${rsid}_follower.lua; \
((rsid = rsid + 1)) ; \
done

# bootstrap vshard cluster using lua vshard.router
bootstrap:
@echo "${GREEN}STAGE: BOOTSTRAP CLUSTER${NC}"
mkdir -p tmp/router_work_dir
TT_WORK_DIR=tmp/router_work_dir/ NREPLICASETS=${NREPLICASETS} START_PORT=${START_PORT} tarantool router.lua

# stop vshard storage tarantool
cluster-down:
@echo "${GREEN}STAGE: CLUSTER DOWN${NC}"
rsid=1 ; while [[ $$rsid -le ${NREPLICASETS} ]] ; do \
kill -9 `cat tmp/$${rsid}/master/tarantool.pid`; \
kill -9 `cat tmp/$${rsid}/follower/tarantool.pid`; \
((rsid = rsid + 1)) ; \
done

# run go tests, minus "-" signs before command allows failures, otherwise cluster-down stage won't run.
gotest:
@echo "${GREEN}STAGE: RUN GOTESTS${NC}"
-NREPLICASETS=${NREPLICASETS} START_PORT=${START_PORT} go test -race -parallel=20 -coverpkg="../../" -coverprofile cover.out -timeout=90s
# go tool cover -html=cover.out
12 changes: 12 additions & 0 deletions tests/tnt/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# A framework to test go-vshard-router module using real tarantools

## Requirements

Your system must have:
- an installed tarantool that supports vshard (1.9+)
- an installed tnt vshard library (`local vshard = require('vshard')` must work)

The next will run all go-tests in this directory
```bash
make run
```
73 changes: 73 additions & 0 deletions tests/tnt/cfgmaker.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
package tnt_test

import (
"fmt"

vshardrouter "github.com/KaymeKaydex/go-vshard-router"
"github.com/google/uuid"
)

type cfgmaker struct {
startPort int
nreplicasets int
}

func (c cfgmaker) getUUID(rsID int, n int) uuid.UUID {
const uuidTemplate = "00000000-0000-%04d-%04d-000000000000"

uuidStr := fmt.Sprintf(uuidTemplate, rsID, n)

uuid, err := uuid.Parse(uuidStr)
if err != nil {
panic(err)
}

return uuid
}

func (c cfgmaker) replicasetUUID(rsID int) uuid.UUID {
return c.getUUID(rsID, 0)
}

func (c cfgmaker) masterUUID(rsID int) uuid.UUID {
return c.getUUID(rsID, 1)
}

func (c cfgmaker) followerUUID(rsID int) uuid.UUID {
return c.getUUID(rsID, 2)
}

func (c cfgmaker) getInstanceAddr(port int) string {
const addrTemplate = "127.0.0.1:%d"

return fmt.Sprintf(addrTemplate, port)
}

func (c cfgmaker) masterAddr(rsID int) string {
port := c.startPort + 2*(rsID-1)
return c.getInstanceAddr(port)
}

func (c cfgmaker) followerAddr(rsID int) string {
port := c.startPort + 2*(rsID-1) + 1
return c.getInstanceAddr(port)
}

func (c cfgmaker) clusterCfg() map[vshardrouter.ReplicasetInfo][]vshardrouter.InstanceInfo {
cfg := make(map[vshardrouter.ReplicasetInfo][]vshardrouter.InstanceInfo)

for rsID := 1; rsID <= c.nreplicasets; rsID++ {
cfg[vshardrouter.ReplicasetInfo{
Name: fmt.Sprintf("replicaset_%d", rsID),
UUID: c.replicasetUUID(rsID),
}] = []vshardrouter.InstanceInfo{{
Addr: c.masterAddr(rsID),
UUID: c.masterUUID(rsID),
}, {
Addr: c.followerAddr(rsID),
UUID: c.followerUUID(rsID),
}}
}

return cfg
}
118 changes: 118 additions & 0 deletions tests/tnt/cfgmaker.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
#!/usr/bin/env tarantool

require('strict').on()

local config_example = {
sharding = {
['cbf06940-0790-498b-948d-042b62cf3d29'] = { -- replicaset #1
replicas = {
['8a274925-a26d-47fc-9e1b-af88ce939412'] = {
uri = 'storage:storage@127.0.0.1:3301',
name = 'storage_1_a',
master = true
},
['3de2e3e1-9ebe-4d0d-abb1-26d301b84633'] = {
uri = 'storage:storage@127.0.0.1:3302',
name = 'storage_1_b'
}
},
}, -- replicaset #1
['ac522f65-aa94-4134-9f64-51ee384f1a54'] = { -- replicaset #2
replicas = {
['1e02ae8a-afc0-4e91-ba34-843a356b8ed7'] = {
uri = 'storage:storage@127.0.0.1:3303',
name = 'storage_2_a',
master = true
},
['001688c3-66f8-4a31-8e19-036c17d489c2'] = {
uri = 'storage:storage@127.0.0.1:3304',
name = 'storage_2_b'
}
},
}, -- replicaset #2
}, -- sharding
replication_connect_quorum = 0,
}

local function get_uid(rs_id, instance_id)
local uuid_template = "00000000-0000-%04d-%04d-000000000000"

return string.format(uuid_template, rs_id, instance_id)
end

local function replicaset_uuid(rs_id)
return get_uid(rs_id, 0)
end

local function master_replica_uuid(rs_id)
return get_uid(rs_id, 1)
end

local function follower_replica_uuid(rs_id)
return get_uid(rs_id, 2)
end

local function master_replica_name(rs_id)
return string.format("storage_%d_master", rs_id)
end

local function follower_replica_name(rs_id)
return string.format("storage_%d_follower", rs_id)
end

local function replica_cfg(start_port, rs_id, is_master)
local uri_template = 'storage:storage@127.0.0.1:%d'
local port, name
if is_master then
port = start_port + 2 * (rs_id - 1) -- multiple to 2 because there are 2 instances per replicaset
name = master_replica_name(rs_id)
else
port = start_port + 2 * (rs_id - 1) + 1
name = follower_replica_name(rs_id)
end

return {
uri = string.format(uri_template, port),
name = name,
master = is_master,
}
end

local function master_replica_cfg(start_port, rs_id)
return replica_cfg(start_port, rs_id, true)
end

local function follower_replica_cfg(start_port, rs_id)
return replica_cfg(start_port, rs_id, false)
end

local function clustercfg(start_port, nreplicasets)
local cfg = {
sharding = {},
replication_connect_quorum = 0,
}

for rs_id = 1, nreplicasets do
local master_uuid = master_replica_uuid(rs_id)
local follower_uuid = follower_replica_uuid(rs_id)

local replicas = {
[master_uuid] = master_replica_cfg(start_port, rs_id),
[follower_uuid] = follower_replica_cfg(start_port, rs_id),
}

local rs_uuid = replicaset_uuid(rs_id)

cfg.sharding[rs_uuid] = {
replicas = replicas,
}
end

return cfg
end

return {
clustercfg = clustercfg,
master_uuid = master_replica_uuid,
follower_uuid = follower_replica_uuid,
}
Loading
Loading