diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 792c8cc..aded7c7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,4 +1,4 @@ -name: Create and push rockspec for moonlibs/config +name: Run unit tests on: push: @@ -7,13 +7,40 @@ env: ROCK_NAME: config jobs: - test-matrix: + run-luacheck: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - uses: tarantool/setup-tarantool@v2 + with: + tarantool-version: '2.10.7' + - name: install luacheck 0.26.0 + run: tarantoolctl rocks install luacheck 0.26.0 + - name: run luacheck + run: .rocks/bin/luacheck . + run-unit-tests: runs-on: ubuntu-latest strategy: matrix: - version: ["1.10.15", "2.8.4", "2.10.6", "2.11.0", "2.11.1"] + version: ["1.10.15", "2.8.4", "2.10.6", "2.10.7-gc64-amd64", "2.11.0", "2.11.1"] steps: - uses: actions/checkout@master - uses: docker/setup-buildx-action@v2 - name: run test suite for ${{matrix.version}} - run: make test-${{matrix.version}} \ No newline at end of file + run: make test-${{matrix.version}} + run-coverage-report: + runs-on: ubuntu-latest + steps: + - uses: tarantool/setup-tarantool@v2 + with: + tarantool-version: '2.10.7' + - name: install luacov-console 1.1.0 + run: tarantoolctl rocks install luacov-console 1.1.0 + - name: install luacov-coveralls 0.2.3 + run: tarantoolctl rocks install --server=http://luarocks.org luacov-coveralls 0.2.3 + - name: prepare coverage report + run: .rocks/bin/luacov-console "$(pwd)" && .rocks/bin/luacov-console -s + # - name: publish coveralls report + # env: + # COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }} + # run: .rocks/bin/luacov-coveralls -v diff --git a/.luacov b/.luacov new file mode 100644 index 0000000..c03ef6c --- /dev/null +++ b/.luacov @@ -0,0 +1,19 @@ +runreport = false +deletestats = false + +exclude = { + "spec/", + "test/", + "test_peek", + "%.rocks/", + "builtin/", +} + +coveralls = { + root = "/", + debug = true, + pathcorrect = { + { "^/home/runner/work/config/config/", "" }, + { "^/source/config", "" }, + }, +} \ No newline at end of file diff --git a/config.lua b/config.lua index 85259ba..d6a9862 100644 --- a/config.lua +++ b/config.lua @@ -1,4 +1,8 @@ +---@diagnostic disable: inject-field local log = require 'log' +if log.new then + log = log.new('moonlibs.config') +end local fio = require 'fio' local json = require 'json'.new() local yaml = require 'yaml'.new() @@ -8,6 +12,9 @@ local clock = require 'clock' json.cfg{ encode_invalid_as_nil = true } yaml.cfg{ encode_use_tostring = true } +---Retrieves all upvalues of given function and returns them as kv-map +---@param fun fun() +---@return table variables local function lookaround(fun) local vars = {} local i = 1 @@ -17,14 +24,26 @@ local function lookaround(fun) vars[n] = v i = i + 1 end - i = 1 - return vars, i - 1 + return vars end +---@private +---@class moonlibs.config.reflect_internals +---@field dynamic_cfg table +---@field default_cfg table +---@field upgrade_cfg? fun(cfg: table, translate_cfg: table): table +---@field template_cfg? table +---@field translate_cfg? table +---@field log? table + + +---Unwraps box.cfg and retrieves dynamic_cfg, default_cfg tables +---@return moonlibs.config.reflect_internals local function reflect_internals() local peek = { dynamic_cfg = {}; + default_cfg = {}; upgrade_cfg = true; translate_cfg = true; template_cfg = true; @@ -47,7 +66,12 @@ local function reflect_internals() error(string.format("Neither function nor callable argument %s after steps: %s", peekf, table.concat(steps, ", "))) end - local vars, _ = lookaround(peekf) + local vars = lookaround(peekf) + if type(vars.default_cfg) == 'table' then + for k in pairs(vars.default_cfg) do + peek.default_cfg[k] = vars.default_cfg[k] + end + end if allow_unwrap and (vars.orig_cfg or vars.origin_cfg) then -- It's a wrap of tarantoolctl/tt, unwrap and repeat peekf = (vars.orig_cfg or vars.origin_cfg) @@ -124,7 +148,11 @@ end local load_cfg = reflect_internals() --- TODO: suppress deprecation +---Filters only valid keys from given cfg +--- +---Edits given cfg and returns only clear config +---@param cfg table +---@return table local function prepare_box_cfg(cfg) -- 1. take config, if have upgrade, upgrade it if load_cfg.upgrade_cfg then @@ -311,6 +339,7 @@ local function toboolean(v) return false end +---@type table local master_selection_policies; master_selection_policies = { ['etcd.instance.single'] = function(M, instance_name, common_cfg, instance_cfg, cluster_cfg, local_cfg) @@ -373,8 +402,10 @@ master_selection_policies = { if cluster_cfg.master == instance_name then log.info("Instance is declared as cluster master, set read_only=false") cfg.box.read_only = false - cfg.box.replication_connect_quorum = 1 - cfg.box.replication_connect_timeout = 1 + if cfg.box.bootstrap_strategy ~= 'auto' then + cfg.box.replication_connect_quorum = 1 + cfg.box.replication_connect_timeout = 1 + end else log.info("Cluster has another master %s, not me %s, set read_only=true", cluster_cfg.master, instance_name) cfg.box.read_only = true @@ -472,6 +503,17 @@ local function gen_cluster_uuid(cluster_name) error("Can't generate uuid for cluster "..cluster_name, 2) end +---@class moonlibs.config.opts.etcd:moonlibs.config.etcd.opts +---@field instance_name string Mandatory name of the instance +---@field prefix string Mandatory prefix inside etcd tree +---@field uuid? 'auto' When auto config generates replicaset_uuid and instance_uuid for nodes +---@field fixed? table Optional ETCD tree + +---Loads configuration from etcd and evaluate master_selection_policy +---@param M moonlibs.config +---@param etcd_conf moonlibs.config.opts.etcd +---@param local_cfg table +---@return table local function etcd_load( M, etcd_conf, local_cfg ) local etcd @@ -552,19 +594,13 @@ local function etcd_load( M, etcd_conf, local_cfg ) print("Loaded config from etcd",yaml.encode(all_cfg)) end local common_cfg = all_cfg.common - -- local common_cfg = etcd:get_common() local all_instances_cfg = all_cfg.instances - -- local all_instances_cfg = etcd:get_instances() local instance_cfg = all_instances_cfg[instance_name] assert(instance_cfg,"Instance name "..instance_name.." is not known to etcd") - -- local all_clusters_cfg = etcd:get_clusters() local all_clusters_cfg = all_cfg.clusters or all_cfg.shards - -- print(yaml.encode(all_clusters_cfg)) - - local master_selection_policy local cluster_cfg if instance_cfg.cluster or local_cfg.cluster then @@ -628,13 +664,14 @@ local function etcd_load( M, etcd_conf, local_cfg ) "Start instance "..cfg.box.listen, " with replication:"..table.concat(cfg.box.replication,", "), string.format("timeout: %s, quorum: %s, lag: %s", - cfg.box.replication_connect_timeout or 'def:30', + cfg.box.replication_connect_timeout + or ('def:%s'):format(load_cfg.default_cfg.replication_connect_quorum or 30), cfg.box.replication_connect_quorum or 'def:full', - cfg.box.replication_sync_lag or 'def:10' + cfg.box.replication_sync_lag + or ('def:%s'):format(load_cfg.default_cfg.replication_sync_lag or 10) ) ) end - --end end -- print(yaml.encode(cfg)) @@ -675,9 +712,57 @@ local function optimal_rcq(upstreams) return rcq end +local function do_cfg(boxcfg, cfg) + for key, val in pairs(cfg) do + if load_cfg.default_cfg[key] == nil and load_cfg.dynamic_cfg[key] == nil then + local warn = string.format("Dropping non-boxcfg option '%s' given '%s'",key,val) + log.warn("%s",warn) + print(warn) + cfg[key] = nil + end + end + log.info("Just before box.cfg %s", yaml.encode(cfg)) + boxcfg(cfg) +end + + +---@class moonlibs.config.opts +---@field bypass_non_dynamic? boolean (default: true) drops every changed non-dynamic option on reconfiguration +---@field tidy_load? boolean (default: true) recoveries tarantool with read_only=true +---@field mkdir? boolean (default: false) should moonlibs/config create memtx_dir and wal_dir +---@field etcd? moonlibs.config.opts.etcd [legacy] configuration of etcd +---@field default_replication_connect_timeout? number (default: 1.1) default RCT in seconds +---@field default_election_mode? election_mode (default: candidate) option is respected only when etcd.cluster.raft is used +---@field default_synchro_quorum? string|number (default: 'N/2+1') option is respected only when etcd.cluster.raft is used +---@field default_read_only? boolean (default: false) option is respected only when etcd.instance.read_only is used (deprecated) +---@field master_selection_policy? 'etcd.cluster.master'|'etcd.cluster.vshard'|'etcd.cluster.raft'|'etcd.instance.single' master selection policy +---@field strict_mode? boolean (default: false) stricts config retrievals. if key is not found config.get will raise an exception +---@field strict? boolean (default: false) stricts config retrievals. if key is not found config.get will raise an exception +---@field default? table (default: nil) globally default options for config.get +---@field on_load? fun(conf: moonlibs.config, cfg: table) callback which is called every time config is loaded from file and ETCD +---@field load? fun(conf: moonlibs.config, cfg: table): table do not use this callback +---@field on_before_cfg? fun(conf: moonlibs.config, cfg: table) callback is called right before running box.cfg (but after on_load) +---@field boxcfg? fun(cfg: table) [legacy] when provided this function will be called instead box.cfg. tidy_load and everything else will not be used. +---@field wrap_box_cfg? fun(cfg: table) callback is called instead box.cfg. But tidy_load is respected. Use this, if you need to proxy every option to box.cfg on application side +---@field on_after_cfg? fun(conf: moonlibs.config, cfg: table) callback which is called after full tarantool configuration + +---@class moonlibs.config: moonlibs.config.opts +---@field etcd moonlibs.config.etcd +---@field public _load_cfg table +---@field public _flat table +---@field public _fencing_f? Fiber +---@operator call(moonlibs.config.opts): moonlibs.config + +---@type moonlibs.config local M M = setmetatable({ console = {}; + ---Retrieves value from config + ---@overload fun(k: string, def: any?): any? + ---@param self moonlibs.config + ---@param k string path inside config + ---@param def? any optional default value + ---@return any? get = function(self,k,def) if self ~= M then def = k @@ -696,6 +781,9 @@ local M end end },{ + ---Reinitiates moonlibs.config + ---@param args moonlibs.config.opts + ---@return moonlibs.config __call = function(_, args) -- args MUST belong to us, because of modification local file @@ -721,6 +809,7 @@ local M M.master_selection_policy = args.master_selection_policy M.default = args.default M.strict_mode = args.strict_mode or args.strict or false + M._load_cfg = load_cfg -- print("config", "loading ",file, json.encode(args)) if not file then file = get_opt() @@ -802,6 +891,10 @@ local M error("No box.* config given", 2) end + if cfg.box.remote_addr then + cfg.box.remote_addr = nil + end + if args.bypass_non_dynamic then cfg.box = prepare_box_cfg(cfg.box) end @@ -812,10 +905,6 @@ local M cfg.sys.boxcfg = nil cfg.sys.on_load = nil - -- if not cfg.box.custom_proc_title and args.instance_name then - -- cfg.box.custom_proc_title = args.instance_name - -- end - -- latest modifications and fixups if args.on_load then args.on_load(M,cfg) @@ -823,7 +912,7 @@ local M return cfg end - local cfg = load_config() + local cfg = load_config() --[[@as table]] M._flat = flatten(cfg) @@ -852,16 +941,10 @@ local M end end - if cfg.box.remote_addr then - cfg.box.remote_addr = nil - end - - - -- print(string.format("Starting app: %s", yaml.encode(cfg.box))) - local boxcfg + local boxcfg = box.cfg if args.boxcfg then - args.boxcfg( cfg.box ) + do_cfg(args.boxcfg, cfg.box) else if args.wrap_box_cfg then boxcfg = args.wrap_box_cfg @@ -877,7 +960,7 @@ local M snap_dir = "." end end - local bootstrapped = false + local bootstrapped for _,v in pairs(fio.glob(snap_dir..'/*.snap')) do bootstrapped = v end @@ -886,13 +969,15 @@ local M print("Have etcd, use tidy load") local ro = cfg.box.read_only cfg.box.read_only = true - if not ro then - -- Only if node should be master - cfg.box.replication_connect_quorum = 1 - cfg.box.replication_connect_timeout = M.default_replication_connect_timeout - elseif not cfg.box.replication_connect_quorum then - -- For replica tune up to N/2+1 - cfg.box.replication_connect_quorum = optimal_rcq(cfg.box.replication) + if cfg.box.bootstrap_strategy ~= 'auto' then + if not ro then + -- Only if node should be master + cfg.box.replication_connect_quorum = 1 + cfg.box.replication_connect_timeout = M.default_replication_connect_timeout + elseif not cfg.box.replication_connect_quorum then + -- For replica tune up to N/2+1 + cfg.box.replication_connect_quorum = optimal_rcq(cfg.box.replication) + end end log.info("Start tidy loading with ro=true%s rcq=%s rct=%s (snap=%s)", ro ~= true and string.format(' (would be %s)',ro) or '', @@ -900,17 +985,31 @@ local M bootstrapped ) else - if not cfg.box.replication_connect_quorum then - cfg.box.replication_connect_quorum = optimal_rcq(cfg.box.replication) + -- not bootstraped yet cluster + + -- if cfg.box.bootstrap_strategy == 'auto' then -- ≥ Tarantool 2.11 + -- local ro = cfg.box.read_only + -- local is_candidate = cfg.box.election_mode == 'candidate' + -- if not ro and not is_candidate then + -- -- master but not Raft/candidate + -- -- we decrease replication for master, + -- -- to allow him bootstrap himself + -- cfg.box.replication = {cfg.box.remote_addr or cfg.box.listen} + -- end + if cfg.box.bootstrap_strategy ~= 'auto' then -- < Tarantool 2.11 + if cfg.box.replication_connect_quorum == nil then + cfg.box.replication_connect_quorum = optimal_rcq(cfg.box.replication) + end end + log.info("Start non-bootstrapped tidy loading with ro=%s rcq=%s rct=%s (dir=%s)", - cfg.box.read_only, cfg.box.replication_connect_quorum, cfg.box.replication_connect_timeout, snap_dir) + cfg.box.read_only, cfg.box.replication_connect_quorum, + cfg.box.replication_connect_timeout, snap_dir + ) end end - log.info("Just before box.cfg %s", yaml.encode( cfg.box )) - - ;(boxcfg or box.cfg)( cfg.box ) + do_cfg(boxcfg, cfg.box) log.info("Reloading config after start") @@ -932,14 +1031,14 @@ local M if diff_box then log.info("Reconfigure after load with %s",require'json'.encode(diff_box)) - ;(boxcfg or box.cfg)(diff_box) + do_cfg(boxcfg, diff_box) else log.info("Config is actual after load") end M._flat = flatten(new_cfg) else - (boxcfg or box.cfg)( cfg.box ) + do_cfg(boxcfg, cfg.box) end else local replication = cfg.box.replication_source or cfg.box.replication @@ -951,12 +1050,12 @@ local M cfg.box.replication = nil cfg.box.replication_source = nil - (boxcfg or box.cfg)( cfg.box ) + do_cfg(boxcfg, cfg.box) cfg.box.replication = r cfg.box.replication_source = rs else - (boxcfg or box.cfg)( cfg.box ) + do_cfg(boxcfg, cfg.box) end end end @@ -969,7 +1068,7 @@ local M local msp = config.get('sys.master_selection_policy') if type(cfg.etcd) == 'table' and config.get('etcd.fencing_enabled') - and msp == 'etcd.cluster.master' + and (msp == 'etcd.cluster.master' or msp == 'etcd.cluster.vshard') and type(cfg.cluster) == 'string' and cfg.cluster ~= '' and config.get('etcd.reduce_listing_quorum') ~= true then diff --git a/config/etcd.lua b/config/etcd.lua index f6d5ee1..6eac97d 100644 --- a/config/etcd.lua +++ b/config/etcd.lua @@ -1,11 +1,34 @@ local json = require 'json' local log = require 'log' +if log.new then + log = log.new('moonlibs.config') +end local fiber = require 'fiber' local clock = require 'clock' local http_client = require 'http.client' local digest = require 'digest' +---@class moonlibs.config.etcd.opts +---@field endpoints? string[] (default: {'http://127.0.0.1:4001','http://127.0.0.1:2379'}) list of clientURLs to etcd +---@field timeout? number (default: 1) timeout of request to each node to etcd +---@field boolean_auto? boolean (default: false) when true each string value `true`, `false` is converted to boolean value +---@field print_config? boolean (default: false) when true loaded configuration from etcd is printed out +---@field discover_endpoints? boolean (default: true) when false connector does not automatically discovers etcd endpoints +---@field reduce_listing_quorum? boolean (default: false) when true connector does not request etcd:list with quorum +---@field login? string allows to specify username for each request (Basic-auth) +---@field password? string allows to specify password for each request (Basic-auth) + +---@class moonlibs.config.etcd +---@field endpoints string[] (default: {'http://127.0.0.1:4001','http://127.0.0.1:2379'}) list of clientURLs to etcd +---@field client http +---@field timeout number (default: 1) timeout of request to each node to etcd +---@field boolean_auto? boolean (default: false) when true each string value `true`, `false` is converted to boolean value +---@field print_config? boolean (default: false) when true loaded configuration from etcd is printed out +---@field discover_endpoints boolean (default: true) when false connector does not automatically discovers etcd endpoints +---@field reduce_listing_quorum? boolean (default: false) when true connector does not request etcd:list with quorum +---@field authorization? string Authorization header for Basic-auth (is set only when login is present) +---@field headers? table headers which are provided on each request local M = {} M.err = {} @@ -30,6 +53,10 @@ function M.errstr(code) return M.err[ tonumber(code) ] or string.format("Unknown error %s",code) end +---Creates new etcd connector +---@param mod moonlibs.config.etcd +---@param options moonlibs.config.etcd.opts +---@return moonlibs.config.etcd function M.new(mod,options) local self = setmetatable({},{__index=mod}) self.endpoints = options.endpoints or {'http://127.0.0.1:4001','http://127.0.0.1:2379'} @@ -51,6 +78,8 @@ end setmetatable(M,{ __call = M.new }) +---Discovers every ETCD endpoint by requesting clientURLs (/v2/members) +--- ?: make it parallel function M:discovery() local start_at = clock.time() local timeout = self.timeout or 1 @@ -97,6 +126,16 @@ function M:discovery() self.current = math.random(#self.endpoints) end +---@class moonlibs.etcd.request.opts +---@field deadline? number deadline of request (in seconds, fractional) +---@field timeout? number timeout of request to each node (in seconds, fractional) +---@field body? string request body (for PUT) + +---Performs etcd request +---@param method 'PUT'|'GET'|'DELETE'|'HEAD' http_method +---@param path string etcd path after /v2/ +---@param args? moonlibs.etcd.request.opts +---@return table, HTTPResponse function M:request(method, path, args ) -- path must be prefixed outside -- TODO: auth @@ -108,6 +147,8 @@ function M:request(method, path, args ) table.insert(query, '=') table.insert(query, tostring(v)) end + else + args = {} end local qs if #query > 0 then qs = '?'..table.concat(query) else qs = '' end @@ -127,9 +168,20 @@ function M:request(method, path, args ) if deadline then request_timeout = math.min(deadline-fiber.time(), request_timeout) end + local s = clock.time() local x = self.client.request(method,uri,body,{timeout = request_timeout; headers = self.headers}) lastresponse = x local status,reply = pcall(json.decode,x and x.body) + local logger = log.verbose + if x.status >= 500 then + logger = log.error + end + logger("%s %s (to:%.3fs) finished with %s%s %s (in %.3fs)", + method, uri, request_timeout, x.status, + status and reply and reply.errorCode and (reply.message or M.err[reply.errorCode] or reply.errorCode), + (x.headers or {})['X-Etcd-Index'], + clock.time()-s + ) -- 408 for timeout if x.status < 500 and x.status ~= 408 then @@ -193,6 +245,15 @@ function M:recursive_extract(cut, node, storage) if not storage then return _storage[''] end end +---@class moonlibs.config.etcd.list.opts:moonlibs.etcd.request.opts +---@field recursive? boolean (default: true) should listing be recursive +---@field quorum? boolean (default: not reduce_listing_quorum) when true requests quorum read + +---Performs listing by given path +---@param keyspath string path inside etcd +---@param opts moonlibs.config.etcd.list.opts +---@return unknown +---@return HTTPResponse function M:list(keyspath, opts) if type(opts) ~= 'table' then opts = {} @@ -218,6 +279,14 @@ function M:list(keyspath, opts) end end +---@class moonlibs.config.etcd.wait.opts +---@field timeout? number (default: etcd.timeout) timeout for each node to await changes +---@field index number etcd-index that should be awaited + +---Awaits any change in subtree recursively +---@param keyspath string +---@param args moonlibs.config.etcd.wait.opts +---@return boolean not_timed_out, HTTPResponse function M:wait(keyspath, args) args = args or {} local _, response = self:request("GET","keys"..keyspath, { diff --git a/run_test_in_docker.sh b/run_test_in_docker.sh index b9bff2e..d8bd305 100755 --- a/run_test_in_docker.sh +++ b/run_test_in_docker.sh @@ -2,5 +2,5 @@ pwd rm -rf /root/.cache/ -cp -ar /root/.rocks /source/config/.rocks +cp -ar /root/.rocks /source/config/ /source/config/.rocks/bin/luatest --coverage -v spec/ diff --git a/spec/02_cluster_master_test.lua b/spec/02_cluster_master_test.lua index 93343f3..db3877c 100644 --- a/spec/02_cluster_master_test.lua +++ b/spec/02_cluster_master_test.lua @@ -1,5 +1,6 @@ local t = require 'luatest' --[[@as luatest]] local uuid = require 'uuid' +local fiber = require 'fiber' ---@class test.config.master:luatest.group local g = t.group('master', { @@ -32,6 +33,20 @@ local init_lua = fio.pathjoin(root, 'mock', 'single', 'init.lua') local base_env local h = require 'spec.helper' + +---@class moonlibs.config.test.tarantool +---@field server luatest.server +---@field net_box_port number +---@field env table +---@field name string + +---@class moonlibs.config.test.context +---@field tts moonlibs.config.test.tarantool[] +---@field env table +---@field etcd_config table +---@field params table + +---@type table local test_ctx = {} g.before_each(function(cg) @@ -47,12 +62,12 @@ g.before_each(function(cg) base_env.TT_MEMTX_DIR = working_dir local base_config = { - etcd = { - fencing_enabled = true, - }, apps = { single = { - common = { box = { log_level = 1 } }, + common = { + etcd = { fencing_enabled = true }, + box = { log_level = 5 }, + }, clusters = { single = { master = cg.params.master, @@ -188,3 +203,97 @@ function g.test_reload(cg) }, 'get("sys") has correct fields after restart') end end + +function g.test_fencing(cg) + local ctx = test_ctx[cg.name] + t.skip_if(not ctx.etcd_config.apps.single.common.etcd.fencing_enabled, "fencing disabled") + + -- Start tarantools + h.start_all_tarantools(ctx, init_lua, root, ctx.etcd_config.apps.single.instances) + + -- Check configuration + for _, tnt in ipairs(ctx.tts) do + tnt.server:connect_net_box() + local box_cfg = tnt.server:get_box_cfg() + t.assert_covers(box_cfg, { + log_level = ctx.etcd_config.apps.single.common.box.log_level, + listen = ctx.etcd_config.apps.single.instances[tnt.name].box.listen, + read_only = ctx.etcd_config.apps.single.clusters.single.master ~= tnt.name, + }, 'box.cfg is correct') + + local conn = tnt.server --[[@as luatest.server]] + local ret = conn:exec(function() + local r = table.deepcopy(config.get('sys')) + for k, v in pairs(r) do + if type(v) == 'function' then + r[k] = nil + end + end + return r + end) + + t.assert_covers(ret, { + instance_name = tnt.name, + master_selection_policy = 'etcd.cluster.master', + file = base_env.TT_CONFIG, + }, 'get("sys") has correct fields') + end + + local master_name = ctx.params.master + + ---@type moonlibs.config.test.tarantool + local master + for _, tt in ipairs(ctx.tts) do + if tt.name == master_name then + master = tt + break + end + end + + t.assert(master, 'master is not connected') + + local ret = master.server:exec(function() + return { cfg_ro = box.cfg.read_only, ro = box.info.ro } + end) + + t.assert_equals(ret.cfg_ro, false, 'box.cfg.read_only == false (before fencing)') + t.assert_equals(ret.ro, false, 'box.info.ro == false (before fencing)') + + ctx.etcd_config.apps.single.clusters.single.master = 'not_exists' + h.upload_to_etcd(ctx.etcd_config) + + local fencing_cfg = ctx.etcd_config.apps.single.common.etcd + local fencing_timeout = fencing_cfg.fencing_timeout or 10 + local fencing_pause = fencing_cfg.fencing_pause or fencing_timeout/2 + + t.helpers.retrying({ + timeout = fencing_pause, + delay = 0.1, + }, function () + local ret = master.server:exec(function() + return { cfg_ro = box.cfg.read_only, ro = box.info.ro } + end) + assert(ret.cfg_ro, "cfg.read_only must be true") + assert(ret.ro, "info.ro must be true") + end) + + local ret = master.server:exec(function() + return { cfg_ro = box.cfg.read_only, ro = box.info.ro } + end) + + t.assert_equals(ret.cfg_ro, true, 'box.cfg.read_only == true') + t.assert_equals(ret.ro, true, 'box.info.ro == true') + + ctx.etcd_config.apps.single.clusters.single.master = master_name + h.upload_to_etcd(ctx.etcd_config) + + local deadline = 2*fencing_timeout+fiber.time() + while fiber.time() < deadline do + local ret2 = master.server:exec(function() + return { cfg_ro = box.cfg.read_only, ro = box.info.ro } + end) + + t.assert_equals(ret2.cfg_ro, true, 'box.cfg.read_only == true (double check)') + t.assert_equals(ret2.ro, true, 'box.info.ro == true (double check)') + end +end diff --git a/test/Dockerfile b/test/Dockerfile index a3e071c..a5549e2 100644 --- a/test/Dockerfile +++ b/test/Dockerfile @@ -1,4 +1,4 @@ -FROM tarantool/tarantool:2.11 +FROM tarantool/tarantool:2.11.1 RUN apk add --no-cache -u iproute2 make bind-tools WORKDIR /opt/tarantool diff --git a/test/app/conf.lua b/test/app/conf.lua index d8cd24c..1e53e8d 100644 --- a/test/app/conf.lua +++ b/test/app/conf.lua @@ -2,19 +2,8 @@ etcd = { instance_name = os.getenv("TT_INSTANCE_NAME"), prefix = '/instance', endpoints = {"http://etcd0:2379","http://etcd1:2379","http://etcd2:2379"}, - fencing_enabled = true, + fencing_enabled = false, timeout = 2, -} - -box = { - background = false, - log_level = 6, - log_format = 'plain', - - memtx_dir = '/var/lib/tarantool/snaps/', - wal_dir = '/var/lib/tarantool/xlogs', -} - -app = { - + login = 'username', + password = 'password', } diff --git a/test/app/init.lua b/test/app/init.lua index 7059c9b..bd23cde 100644 --- a/test/app/init.lua +++ b/test/app/init.lua @@ -1,9 +1,9 @@ local fiber = require "fiber" require 'package.reload' + require 'config' { mkdir = true, - print_config = true, instance_name = os.getenv("TT_INSTANCE_NAME"), file = 'conf.lua', master_selection_policy = 'etcd.cluster.master', @@ -31,9 +31,10 @@ fiber.create(function() for _ = 1, 10 do local f = fiber.create(function() fiber.self():set_joinable(true) - for i = 1, 100 do - box.space.T:replace{i, box.info.id, box.info.vclock} + for _ = 1, 10 do + box.space.T:insert{box.space.T:len(), box.info.id, box.info.vclock} end + fiber.sleep(0.001) end) table.insert(fibers, f) end diff --git a/test/instance.etcd.yaml b/test/instance.etcd.yaml index d23f6ce..9261b50 100644 --- a/test/instance.etcd.yaml +++ b/test/instance.etcd.yaml @@ -9,18 +9,22 @@ instance: fencing_timeout: 10 fencing_pause: 5 box: - replication_connect_quorum: 1 + bootstrap_strategy: auto log_level: 5 + replication_connect_timeout: 3 + listen: 0.0.0.0:3301 memtx_memory: 268435456 + memtx_dir: /var/lib/tarantool/snaps/ + wal_dir: /var/lib/tarantool/xlogs/ instances: instance_01: cluster: instance box: instance_uuid: 91157a11-0000-0001-0000-000000000000 - listen: instance_01:3301 + remote_addr: instance_01:3301 instance_02: cluster: instance box: instance_uuid: 91157a11-0000-0002-0000-000000000000 - listen: instance_02:3302 + remote_addr: instance_02:3301 ...