diff --git a/changelog/unreleased/kong/11464.yml b/changelog/unreleased/kong/11464.yml new file mode 100644 index 000000000000..636ff051f14c --- /dev/null +++ b/changelog/unreleased/kong/11464.yml @@ -0,0 +1,7 @@ +message: Fix an issue that the TTL of the key-auth plugin didnt work in DB-less and Hybrid mode. +type: bugfix +scope: Core +prs: + - 11464 +jiras: + - "FTI-4512" diff --git a/changelog/unreleased/kong/11566.yml b/changelog/unreleased/kong/11566.yml new file mode 100644 index 000000000000..317a9c3ac93f --- /dev/null +++ b/changelog/unreleased/kong/11566.yml @@ -0,0 +1,7 @@ +message: "use deep copies of Route, Service, and Consumer objects when log serializing" +type: bugfix +scope: PDK +prs: + - 11566 +jiras: + - "FTI-5357" diff --git a/changelog/unreleased/kong/11613.yml b/changelog/unreleased/kong/11613.yml new file mode 100644 index 000000000000..907848b39221 --- /dev/null +++ b/changelog/unreleased/kong/11613.yml @@ -0,0 +1,4 @@ +message: "Bumped lua-resty-aws from 1.3.2 to 1.3.5" +type: dependency +prs: + - 11613 diff --git a/changelog/unreleased/kong/acl_cache_warmup.yml b/changelog/unreleased/kong/acl_cache_warmup.yml new file mode 100644 index 000000000000..aa5b6b8791c7 --- /dev/null +++ b/changelog/unreleased/kong/acl_cache_warmup.yml @@ -0,0 +1,5 @@ +message: Fix cache warmup mechanism not working in `acls` plugin groups config entity scenario. +type: bugfix +scope: Core +prs: + - 11414 diff --git a/changelog/unreleased/kong/rate-limiting-fix-redis-sync-rate.yml b/changelog/unreleased/kong/rate-limiting-fix-redis-sync-rate.yml new file mode 100644 index 000000000000..959e7263dc6b --- /dev/null +++ b/changelog/unreleased/kong/rate-limiting-fix-redis-sync-rate.yml @@ -0,0 +1,3 @@ +message: "**Rate Limiting**: fix to provide better accuracy in counters when sync_rate is used with the redis policy." +type: bugfix +scope: Plugin diff --git a/changelog/unreleased/kong/response_status_code.yml b/changelog/unreleased/kong/response_status_code.yml new file mode 100644 index 000000000000..fcfb94f9e264 --- /dev/null +++ b/changelog/unreleased/kong/response_status_code.yml @@ -0,0 +1,3 @@ +message: Fix an issue that response status code is not real upstream status when using kong.response function. +type: bugfix +scope: Core diff --git a/changelog/unreleased/kong/rl-shared-sync-timer.yml b/changelog/unreleased/kong/rl-shared-sync-timer.yml new file mode 100644 index 000000000000..e07b78236dab --- /dev/null +++ b/changelog/unreleased/kong/rl-shared-sync-timer.yml @@ -0,0 +1,3 @@ +message: "**Rate Limiting**: fix an issuer where all counters are synced to the same DB at the same rate." +type: bugfix +scope: Plugin diff --git a/kong-3.4.3-0.rockspec b/kong-3.4.3-0.rockspec index 2d4d38f9c064..aa85c599a6a3 100644 --- a/kong-3.4.3-0.rockspec +++ b/kong-3.4.3-0.rockspec @@ -33,7 +33,7 @@ dependencies = { "lua-protobuf == 0.5.0", "lua-resty-healthcheck == 1.6.3", "lua-messagepack == 0.5.2", - "lua-resty-aws == 1.3.2", + "lua-resty-aws == 1.3.5", "lua-resty-openssl == 0.8.23", "lua-resty-counter == 0.2.1", "lua-resty-ipmatcher == 0.6.1", diff --git a/kong/cache/warmup.lua b/kong/cache/warmup.lua index e92666888bb9..4dee26539357 100644 --- a/kong/cache/warmup.lua +++ b/kong/cache/warmup.lua @@ -1,6 +1,10 @@ local utils = require "kong.tools.utils" local constants = require "kong.constants" local buffer = require "string.buffer" +local acl_groups +if utils.load_module_if_exists("kong.plugins.acl.groups") then + acl_groups = require "kong.plugins.acl.groups" +end local cache_warmup = {} @@ -136,6 +140,14 @@ function cache_warmup.single_dao(dao) if not ok then return nil, err end + + if entity_name == "acls" and acl_groups ~= nil then + log(NOTICE, "warmup acl groups cache for consumer id: ", entity.consumer.id , "...") + local _, err = acl_groups.warmup_groups_cache(entity.consumer.id) + if err then + log(NOTICE, "warmup acl groups cache for consumer id: ", entity.consumer.id , " err: ", err) + end + end end if entity_name == "services" and host_count > 0 then diff --git a/kong/db/dao/init.lua b/kong/db/dao/init.lua index 5b4b011c42c6..b6c28bf2795a 100644 --- a/kong/db/dao/init.lua +++ b/kong/db/dao/init.lua @@ -283,6 +283,12 @@ local function validate_options_value(self, options) end end + if options.export ~= nil then + if type(options.export) ~= "boolean" then + errors.export = "must be a boolean" + end + end + if next(errors) then return nil, errors end @@ -1103,6 +1109,21 @@ function DAO:each(size, options) end +function DAO:each_for_export(size, options) + if self.strategy.schema.ttl then + if not options then + options = get_pagination_options(self, options) + else + options = utils.cycle_aware_deep_copy(options, true) + end + + options.export = true + end + + return self:each(size, options) +end + + function DAO:insert(entity, options) validate_entity_type(entity) diff --git a/kong/db/declarative/export.lua b/kong/db/declarative/export.lua index 7317d94d1e95..c3b6b8c1366b 100644 --- a/kong/db/declarative/export.lua +++ b/kong/db/declarative/export.lua @@ -142,7 +142,7 @@ local function export_from_db_impl(emitter, skip_ws, skip_disabled_entities, exp if db[name].pagination then page_size = db[name].pagination.max_page_size end - for row, err in db[name]:each(page_size, GLOBAL_QUERY_OPTS) do + for row, err in db[name]:each_for_export(page_size, GLOBAL_QUERY_OPTS) do if not row then end_transaction(db) kong.log.err(err) diff --git a/kong/db/schema/others/declarative_config.lua b/kong/db/schema/others/declarative_config.lua index fe8d3aaaa863..145bb7f97783 100644 --- a/kong/db/schema/others/declarative_config.lua +++ b/kong/db/schema/others/declarative_config.lua @@ -830,6 +830,10 @@ local function flatten(self, input) end end + if schema.ttl and entry.ttl and entry.ttl ~= null then + flat_entry.ttl = entry.ttl + end + entities[entity][id] = flat_entry end end diff --git a/kong/db/strategies/off/init.lua b/kong/db/strategies/off/init.lua index 0ad6c6194290..2edceff6863d 100644 --- a/kong/db/strategies/off/init.lua +++ b/kong/db/strategies/off/init.lua @@ -54,6 +54,19 @@ local function ws(schema, options) end +local function process_ttl_field(entity) + if entity and entity.ttl and entity.ttl ~= null then + local ttl_value = entity.ttl - ngx.time() + if ttl_value > 0 then + entity.ttl = ttl_value + else + entity = nil -- do not return the expired entity + end + end + return entity +end + + -- Returns a dict of entity_ids tagged according to the given criteria. -- Currently only the following kinds of keys are supported: -- * A key like `services||@list` will only return service keys @@ -157,6 +170,7 @@ local function page_for_key(self, key, size, offset, options) yield() local ret = {} + local ret_idx = 1 local schema = self.schema local schema_name = schema.name @@ -194,7 +208,14 @@ local function page_for_key(self, key, size, offset, options) return nil, "stale data detected while paginating" end - ret[i - offset + 1] = schema:process_auto_fields(item, "select", true, PROCESS_AUTO_FIELDS_OPTS) + if schema.ttl then + item = process_ttl_field(item) + end + + if item then + ret[ret_idx] = schema:process_auto_fields(item, "select", true, PROCESS_AUTO_FIELDS_OPTS) + ret_idx = ret_idx + 1 + end end if offset then @@ -211,6 +232,13 @@ local function select_by_key(schema, key) return nil, err end + if schema.ttl then + entity = process_ttl_field(entity) + if not entity then + return nil + end + end + entity = schema:process_auto_fields(entity, "select", true, PROCESS_AUTO_FIELDS_OPTS) return entity diff --git a/kong/db/strategies/postgres/connector.lua b/kong/db/strategies/postgres/connector.lua index ba4b73ef9043..a23fc45f96f9 100644 --- a/kong/db/strategies/postgres/connector.lua +++ b/kong/db/strategies/postgres/connector.lua @@ -567,7 +567,7 @@ function _mt:query(sql, operation) -- we cannot cleanup the connection ngx.log(ngx.ERR, "failed to disconnect: ", err) end - self.store_connection(nil, operation) + self:store_connection(nil, operation) elseif is_new_conn then local keepalive_timeout = self:get_keepalive_timeout(operation) diff --git a/kong/db/strategies/postgres/init.lua b/kong/db/strategies/postgres/init.lua index 458b0b6e0fe9..23cf52384ec6 100644 --- a/kong/db/strategies/postgres/init.lua +++ b/kong/db/strategies/postgres/init.lua @@ -481,6 +481,10 @@ local function page(self, size, token, foreign_key, foreign_entity_name, options statement_name = "page" .. suffix end + if options and options.export then + statement_name = statement_name .. "_for_export" + end + if token then local token_decoded = decode_base64(token) if not token_decoded then @@ -1022,6 +1026,7 @@ function _M.new(connector, schema, errors) ws_id_select_where = "(" .. ws_id_escaped .. " = $0)" end + local select_for_export_expressions local ttl_select_where if has_ttl then fields_hash.ttl = { timestamp = true } @@ -1030,6 +1035,13 @@ function _M.new(connector, schema, errors) insert(insert_expressions, "$" .. #insert_names) insert(insert_columns, ttl_escaped) + select_for_export_expressions = concat { + select_expressions, ",", + "FLOOR(EXTRACT(EPOCH FROM (", + ttl_escaped, " AT TIME ZONE 'UTC'", + "))) AS ", ttl_escaped + } + select_expressions = concat { select_expressions, ",", "FLOOR(EXTRACT(EPOCH FROM (", @@ -1078,6 +1090,7 @@ function _M.new(connector, schema, errors) self.statements["truncate_global"] = self.statements["truncate"] local add_statement + local add_statement_for_export do local function add(name, opts, add_ws) local orig_argn = opts.argn @@ -1106,6 +1119,14 @@ function _M.new(connector, schema, errors) add(name .. "_global", opts, false) add(name, opts, true) end + + add_statement_for_export = function(name, opts) + add_statement(name, opts) + if has_ttl then + opts.code[2] = select_for_export_expressions + add_statement(name .. "_for_export", opts) + end + end end add_statement("insert", { @@ -1181,7 +1202,7 @@ function _M.new(connector, schema, errors) } }) - add_statement("page_first", { + add_statement_for_export("page_first", { operation = "read", argn = { LIMIT }, argv = single_args, @@ -1196,7 +1217,7 @@ function _M.new(connector, schema, errors) } }) - add_statement("page_next", { + add_statement_for_export("page_next", { operation = "read", argn = page_next_names, argv = page_next_args, @@ -1246,7 +1267,7 @@ function _M.new(connector, schema, errors) local statement_name = "page_for_" .. foreign_entity_name - add_statement(statement_name .. "_first", { + add_statement_for_export(statement_name .. "_first", { operation = "read", argn = argn_first, argv = argv_first, @@ -1262,7 +1283,7 @@ function _M.new(connector, schema, errors) } }) - add_statement(statement_name .. "_next", { + add_statement_for_export(statement_name .. "_next", { operation = "read", argn = argn_next, argv = argv_next, @@ -1297,7 +1318,7 @@ function _M.new(connector, schema, errors) for cond, op in pairs({["_and"] = "@>", ["_or"] = "&&"}) do - add_statement("page_by_tags" .. cond .. "_first", { + add_statement_for_export("page_by_tags" .. cond .. "_first", { operation = "read", argn = argn_first, argv = {}, @@ -1313,7 +1334,7 @@ function _M.new(connector, schema, errors) }, }) - add_statement("page_by_tags" .. cond .. "_next", { + add_statement_for_export("page_by_tags" .. cond .. "_next", { operation = "read", argn = argn_next, argv = {}, diff --git a/kong/init.lua b/kong/init.lua index e6b95036766a..0e59302473c6 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -1455,7 +1455,7 @@ do local res = ngx.location.capture("/kong_buffered_http", options) if res.truncated and options.method ~= ngx.HTTP_HEAD then ctx.KONG_PHASE = PHASES.error - ngx.status = 502 + ngx.status = res.status or 502 if has_timing then req_dyn_hook_run_hooks(ctx, "timing", "after:response") diff --git a/kong/pdk/log.lua b/kong/pdk/log.lua index 5e95566fe4e7..a0914e525421 100644 --- a/kong/pdk/log.lua +++ b/kong/pdk/log.lua @@ -16,7 +16,7 @@ local inspect = require "inspect" local ngx_ssl = require "ngx.ssl" local phase_checker = require "kong.pdk.private.phases" local utils = require "kong.tools.utils" - +local cycle_aware_deep_copy = utils.cycle_aware_deep_copy local sub = string.sub local type = type @@ -810,7 +810,7 @@ do end end - -- The value of upstream_status is a string, and status codes may be + -- The value of upstream_status is a string, and status codes may be -- seperated by comma or grouped by colon, according to -- the nginx doc: http://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream_status local upstream_status = var.upstream_status or "" @@ -841,9 +841,9 @@ do }, tries = (ctx.balancer_data or {}).tries, authenticated_entity = build_authenticated_entity(ctx), - route = ctx.route, - service = ctx.service, - consumer = ctx.authenticated_consumer, + route = cycle_aware_deep_copy(ctx.route), + service = cycle_aware_deep_copy(ctx.service), + consumer = cycle_aware_deep_copy(ctx.authenticated_consumer), client_ip = var.remote_addr, started_at = okong.request.get_start_time(), } @@ -882,9 +882,9 @@ do }, tries = (ctx.balancer_data or {}).tries, authenticated_entity = build_authenticated_entity(ctx), - route = ctx.route, - service = ctx.service, - consumer = ctx.authenticated_consumer, + route = cycle_aware_deep_copy(ctx.route), + service = cycle_aware_deep_copy(ctx.service), + consumer = cycle_aware_deep_copy(ctx.authenticated_consumer), client_ip = var.remote_addr, started_at = okong.request.get_start_time(), } diff --git a/kong/plugins/acl/groups.lua b/kong/plugins/acl/groups.lua index ae77100baa52..ee7fcca0d9c3 100644 --- a/kong/plugins/acl/groups.lua +++ b/kong/plugins/acl/groups.lua @@ -196,6 +196,16 @@ local function group_in_groups(groups_to_check, groups) end end +local function warmup_groups_cache(consumer_id) + local cache_key = kong.db.acls:cache_key(consumer_id) + local _, err = kong.cache:get(cache_key, nil, + load_groups_into_memory, + { id = consumer_id }) + if err then + return nil, err + end +end + return { get_current_consumer_id = get_current_consumer_id, @@ -203,4 +213,5 @@ return { get_authenticated_groups = get_authenticated_groups, consumer_in_groups = consumer_in_groups, group_in_groups = group_in_groups, + warmup_groups_cache = warmup_groups_cache, } diff --git a/kong/plugins/rate-limiting/policies/init.lua b/kong/plugins/rate-limiting/policies/init.lua index 12f9f32983e8..f372d6310a7d 100644 --- a/kong/plugins/rate-limiting/policies/init.lua +++ b/kong/plugins/rate-limiting/policies/init.lua @@ -15,27 +15,32 @@ local SYNC_RATE_REALTIME = -1 local EMPTY_UUID = "00000000-0000-0000-0000-000000000000" --- for `conf.sync_rate > 0` -local auto_sync_timer +local EMPTY = {} local cur_usage = { --[[ - [cache_key] = + [db_key][cache_key] = --]] } local cur_usage_expire_at = { --[[ - [cache_key] = + [db_key][cache_key] = --]] } local cur_delta = { --[[ - [cache_key] = + [db_key][cache_key] = --]] } +local function init_tables(db_key) + cur_usage[db_key] = cur_usage[db_key] or {} + cur_usage_expire_at[db_key] = cur_usage_expire_at[db_key] or {} + cur_delta[db_key] = cur_delta[db_key] or {} +end + local function is_present(str) return str and str ~= "" and str ~= null @@ -73,6 +78,13 @@ local sock_opts = {} local EXPIRATION = require "kong.plugins.rate-limiting.expiration" +local function get_db_key(conf) + return fmt("%s:%d;%d", + conf.redis_host, + conf.redis_port, + conf.redis_database) +end + local function get_redis_connection(conf) local red = redis:new() @@ -82,26 +94,25 @@ local function get_redis_connection(conf) sock_opts.ssl_verify = conf.redis_ssl_verify sock_opts.server_name = conf.redis_server_name + local db_key = get_db_key(conf) + -- use a special pool name only if redis_database is set to non-zero -- otherwise use the default pool name host:port if conf.redis_database ~= 0 then - sock_opts.pool = fmt( "%s:%d;%d", - conf.redis_host, - conf.redis_port, - conf.redis_database) + sock_opts.pool = db_key end local ok, err = red:connect(conf.redis_host, conf.redis_port, sock_opts) if not ok then kong.log.err("failed to connect to Redis: ", err) - return nil, err + return nil, db_key, err end local times, err = red:get_reused_times() if err then kong.log.err("failed to get connect reused times: ", err) - return nil, err + return nil, db_key, err end if times == 0 then @@ -118,7 +129,7 @@ local function get_redis_connection(conf) end if not ok then kong.log.err("failed to auth Redis: ", err) - return nil, err + return nil, db_key, err end end @@ -129,18 +140,21 @@ local function get_redis_connection(conf) local ok, err = red:select(conf.redis_database) if not ok then kong.log.err("failed to change Redis database: ", err) - return nil, err + return nil, db_key, err end end end - return red + return red, db_key, err end -local function clear_local_counter() - table_clear(cur_usage) - table_clear(cur_usage_expire_at) - table_clear(cur_delta) +local function clear_local_counter(db_key) + -- for config updates a db may no longer be used but this happens rarely + -- and unlikely there will be a lot of them. So we choose to not remove the table + -- but just clear it, as recreating the table will be more expensive + table_clear(cur_usage[db_key]) + table_clear(cur_usage_expire_at[db_key]) + table_clear(cur_delta[db_key]) end local function sync_to_redis(premature, conf) @@ -148,16 +162,16 @@ local function sync_to_redis(premature, conf) return end - local red, err = get_redis_connection(conf) + local red, db_key, err = get_redis_connection(conf) if not red then kong.log.err("[rate-limiting] failed to connect to Redis: ", err) - clear_local_counter() + clear_local_counter(db_key) return end red:init_pipeline() - for cache_key, delta in pairs(cur_delta) do + for cache_key, delta in pairs(cur_delta[db_key] or EMPTY) do red:eval([[ local key, value, expiration = KEYS[1], tonumber(ARGV[1]), ARGV[2] local exists = redis.call("exists", key) @@ -165,55 +179,104 @@ local function sync_to_redis(premature, conf) if not exists or exists == 0 then redis.call("expireat", key, expiration) end - ]], 1, cache_key, delta, cur_usage_expire_at[cache_key]) + ]], 1, cache_key, delta, cur_usage_expire_at[db_key][cache_key]) end local _, err = red:commit_pipeline() if err then kong.log.err("[rate-limiting] failed to commit increment pipeline in Redis: ", err) - clear_local_counter() + clear_local_counter(db_key) return end local ok, err = red:set_keepalive(10000, 100) if not ok then kong.log.err("[rate-limiting] failed to set Redis keepalive: ", err) - clear_local_counter() + clear_local_counter(db_key) return end -- just clear these tables and avoid creating three new tables - clear_local_counter() + clear_local_counter(db_key) end -local function periodical_sync(conf, sync_func) - if not auto_sync_timer then - local err - -- timer may be initialized after the module's loaded so we need to update the reference - auto_sync_timer, err = kong.timer:named_every("rate-limiting-auto-sync", conf.sync_rate, sync_func, conf) +local plugin_sync_pending = {} +local plugin_sync_running = {} + +-- It's called "rate_limited_sync" because the sync timer itself +-- is rate-limited by the sync_rate. +-- It should be easy to prove that: +-- 1. There will be at most 2 timers per worker for a plugin instance +-- at any given time, 1 syncing and 1 pending (guaranteed by the locks) +-- 2. 2 timers will at least start with a sync_rate interval apart +-- 3. A change is always picked up by a pending timer and +-- will be sync to Redis at most sync_rate interval +local function rate_limited_sync(conf, sync_func) + local cache_key = conf.__key__ or conf.__plugin_id or "rate-limiting" + -- a timer is pending. The change will be picked up by the pending timer + if plugin_sync_pending[cache_key] then + return true + end - if not auto_sync_timer then - kong.log.err("failed to create timer: ", err) - return nil, err + -- The change may or may not be picked up by a running timer + -- let's start a pending timer to make sure the change is picked up + plugin_sync_pending[cache_key] = true + return kong.timer:at(conf.sync_rate, function(premature) + if premature then + -- we do not clear the pending flag to prevent more timers to be started + -- as they will also exit prematurely + return + end + + -- a "pending" state is never touched before the timer is started + assert(plugin_sync_pending[cache_key]) + + + local tries = 0 + -- a timer is already running. + -- the sleep time is picked to a seemingly reasonable value + while plugin_sync_running[cache_key] do + -- we should wait for at most 2 runs even if the connection times out + -- when this happens, we should not clear the "running" state as it would + -- cause a race condition; + -- we don't want to clear the "pending" state and exit the timer either as + -- it's equivalent to waiting for more runs + if tries > 4 then + kong.log.emerg("A Redis sync is blocked by a previous try. " .. + "The previous try should have timed out but it didn't for unknown reasons.") + end + + ngx.sleep(conf.redis_timeout / 2) + tries = tries + 1 + end + + plugin_sync_running[cache_key] = true + + plugin_sync_pending[cache_key] = nil + + -- given the condition, the counters will never be empty so no need to + -- check for empty tables and skip the sync + local ok, err = pcall(sync_func, premature, conf) + if not ok then + kong.log.err("[rate-limiting] error when syncing counters to Redis: ", err) end - end - return true + plugin_sync_running[cache_key] = nil + end) end local function update_local_counters(conf, periods, limits, identifier, value) + local db_key = get_db_key(conf) + init_tables(db_key) + for period, period_date in pairs(periods) do if limits[period] then local cache_key = get_local_key(conf, identifier, period, period_date) - if cur_delta[cache_key] then - cur_delta[cache_key] = cur_delta[cache_key] + value - else - cur_delta[cache_key] = value - end + cur_delta[db_key][cache_key] = (cur_delta[db_key][cache_key] or 0) + value end end - + end return { @@ -291,23 +354,25 @@ return { else update_local_counters(conf, periods, limits, identifier, value) - return periodical_sync(conf, sync_to_redis) + return rate_limited_sync(conf, sync_to_redis) end end, usage = function(conf, identifier, period, current_timestamp) local periods = timestamp.get_timestamps(current_timestamp) local cache_key = get_local_key(conf, identifier, period, periods[period]) + local db_key = get_db_key(conf) + init_tables(db_key) -- use local cache to reduce the number of redis calls -- also by pass the logic of incrementing the counter - if conf.sync_rate ~= SYNC_RATE_REALTIME and cur_usage[cache_key] then - if cur_usage_expire_at[cache_key] > ngx_time() then - return cur_usage[cache_key] + (cur_delta[cache_key] or 0) + if conf.sync_rate ~= SYNC_RATE_REALTIME and cur_usage[db_key][cache_key] then + if cur_usage_expire_at[db_key][cache_key] > ngx_time() then + return cur_usage[db_key][cache_key] + (cur_delta[db_key][cache_key] or 0) end - cur_usage[cache_key] = 0 - cur_usage_expire_at[cache_key] = periods[period] + EXPIRATION[period] - cur_delta[cache_key] = 0 + cur_usage[db_key][cache_key] = 0 + cur_usage_expire_at[db_key][cache_key] = periods[period] + EXPIRATION[period] + cur_delta[db_key][cache_key] = 0 return 0 end @@ -344,9 +409,11 @@ return { end if conf.sync_rate ~= SYNC_RATE_REALTIME then - cur_usage[cache_key] = current_metric or 0 - cur_usage_expire_at[cache_key] = periods[period] + EXPIRATION[period] - cur_delta[cache_key] = 0 + cur_usage[db_key][cache_key] = current_metric or 0 + cur_usage_expire_at[db_key][cache_key] = periods[period] + EXPIRATION[period] + -- The key was just read from Redis using `incr`, which incremented it + -- by 1. Adjust the value to account for the prior increment. + cur_delta[db_key][cache_key] = -1 end return current_metric or 0 diff --git a/spec/01-unit/10-log_serializer_spec.lua b/spec/01-unit/10-log_serializer_spec.lua index c99f0c2dbc83..bd465d22805e 100644 --- a/spec/01-unit/10-log_serializer_spec.lua +++ b/spec/01-unit/10-log_serializer_spec.lua @@ -32,7 +32,7 @@ describe("kong.log.serialize", function() bytes_sent = "99", request_time = "2", remote_addr = "1.1.1.1", - -- may be a non-numeric string, + -- may be a non-numeric string, -- see http://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_addr upstream_status = "500, 200 : 200, 200", }, @@ -123,7 +123,7 @@ describe("kong.log.serialize", function() end) it("serializes the Consumer object", function() - ngx.ctx.authenticated_consumer = {id = "someconsumer"} + ngx.ctx.authenticated_consumer = { id = "someconsumer" } local res = kong.log.serialize({ngx = ngx, kong = kong, }) assert.is_table(res) @@ -193,6 +193,20 @@ describe("kong.log.serialize", function() assert.equal("/upstream_uri" .. "?" .. args, res.upstream_uri) end) + + it("use the deep copies of the Route, Service, Consumer object avoid " .. + "modify ctx.authenticated_consumer, ctx.route, ctx.service", function() + ngx.ctx.authenticated_consumer = { id = "someconsumer" } + ngx.ctx.route = { id = "my_route" } + ngx.ctx.service = { id = "my_service" } + local res = kong.log.serialize({ngx = ngx, kong = kong, }) + assert.not_equal(tostring(ngx.ctx.authenticated_consumer), + tostring(res.consumer)) + assert.not_equal(tostring(ngx.ctx.route), + tostring(res.route)) + assert.not_equal(tostring(ngx.ctx.service), + tostring(res.service)) + end) end) end) @@ -290,7 +304,7 @@ describe("kong.log.serialize", function() end) it("serializes the Consumer object", function() - ngx.ctx.authenticated_consumer = {id = "someconsumer"} + ngx.ctx.authenticated_consumer = { id = "someconsumer" } local res = kong.log.serialize({ngx = ngx, kong = kong, }) assert.is_table(res) @@ -348,6 +362,20 @@ describe("kong.log.serialize", function() assert.is_nil(res.tries) end) + + it("use the deep copies of the Route, Service, Consumer object avoid " .. + "modify ctx.authenticated_consumer, ctx.route, ctx.service", function() + ngx.ctx.authenticated_consumer = { id = "someconsumer "} + ngx.ctx.route = { id = "my_route" } + ngx.ctx.service = { id = "my_service" } + local res = kong.log.serialize({ngx = ngx, kong = kong, }) + assert.not_equal(tostring(ngx.ctx.authenticated_consumer), + tostring(res.consumer)) + assert.not_equal(tostring(ngx.ctx.route), + tostring(res.route)) + assert.not_equal(tostring(ngx.ctx.service), + tostring(res.service)) + end) end) end) end) diff --git a/spec/02-integration/05-proxy/07-upstream_timeouts_spec.lua b/spec/02-integration/05-proxy/07-upstream_timeouts_spec.lua index d6d2121aa4af..0a1596f2a2d0 100644 --- a/spec/02-integration/05-proxy/07-upstream_timeouts_spec.lua +++ b/spec/02-integration/05-proxy/07-upstream_timeouts_spec.lua @@ -32,6 +32,21 @@ for _, strategy in helpers.each_strategy() do route.service = bp.services:insert(service) + if route.enable_buffering then + route.enable_buffering = nil + bp.plugins:insert({ + name = "pre-function", + service = { id = route.service.id }, + config = { + access = { + [[ + kong.service.request.enable_buffering() + ]], + }, + } + }) + end + if not route.protocols then route.protocols = { "http" } end @@ -73,6 +88,17 @@ for _, strategy in helpers.each_strategy() do read_timeout = 1, -- ms }, }, + { + methods = { "PUT" }, + service = { + name = "api-4", + protocol = "http", + host = "konghq.com", + port = 81, + connect_timeout = 1, -- ms + }, + enable_buffering = true, + }, } bp.plugins:insert { @@ -83,7 +109,7 @@ for _, strategy in helpers.each_strategy() do } assert(helpers.start_kong({ - plugins = "ctx-checker-last", + plugins = "bundled, ctx-checker-last", database = strategy, nginx_conf = "spec/fixtures/custom_nginx.template", })) @@ -161,5 +187,22 @@ for _, strategy in helpers.each_strategy() do assert.equal(504, res.status) end) end) + + describe("upstream_connect_timeout with enable_buffering", function() + it("sets upstream send timeout value", function() + local res = assert(proxy_client:send { + method = "PUT", + path = "/put", + body = { + huge = string.rep("a", 2^25) + }, + headers = { ["Content-Type"] = "application/json" }, + }) + + assert.equal(504, res.status) + end) + end) + + end) end diff --git a/spec/02-integration/07-sdk/02-log_spec.lua b/spec/02-integration/07-sdk/02-log_spec.lua index fff9061a4c23..7f2b7e607b4a 100644 --- a/spec/02-integration/07-sdk/02-log_spec.lua +++ b/spec/02-integration/07-sdk/02-log_spec.lua @@ -1,4 +1,6 @@ local helpers = require "spec.helpers" +local cjson = require "cjson" +local FILE_LOG_PATH = os.tmpname() local function find_in_file(f, pat) @@ -121,3 +123,340 @@ describe("PDK: kong.log", function() f:close() end) end) + +for _, strategy in helpers.each_strategy() do + describe("PDK: make sure kong.log.serialize() will not modify ctx which's lifecycle " .. + "is across request [#" .. strategy .. "]", function() + describe("ctx.authenticated_consumer", function() + local proxy_client + local bp + + lazy_setup(function() + bp, _ = helpers.get_db_utils(strategy, { + "routes", + "services", + "plugins", + "consumers", + "acls", + "keyauth_credentials", + }) + + local consumer = bp.consumers:insert( { + username = "foo", + }) + + bp.keyauth_credentials:insert { + key = "test", + consumer = { id = consumer.id }, + } + + bp.acls:insert { + group = "allowed", + consumer = consumer, + } + + local route1 = bp.routes:insert { + paths = { "/status/200" }, + } + + bp.plugins:insert { + name = "acl", + route = { id = route1.id }, + config = { + allow = { "allowed" }, + }, + } + + bp.plugins:insert { + name = "key-auth", + route = { id = route1.id }, + } + + bp.plugins:insert { + name = "file-log", + route = { id = route1.id }, + config = { + path = FILE_LOG_PATH, + reopen = false, + custom_fields_by_lua = { + ["consumer.id"] = "return nil", + }, + }, + protocols = { + "http" + }, + } + + assert(helpers.start_kong({ + plugins = "bundled", + database = strategy, + nginx_conf = "spec/fixtures/custom_nginx.template", + db_cache_warmup_entities = "keyauth_credentials,consumers,acls", + nginx_worker_processes = 1, + })) + end) + + before_each(function() + proxy_client = helpers.proxy_client() + end) + + after_each(function () + proxy_client:close() + end) + + lazy_teardown(function() + helpers.stop_kong() + end) + + it("use the deep copy of Consumer object", function() + for i = 1, 3 do + local res = proxy_client:send { + method = "GET", + path = "/status/200", + headers = { + ["apikey"] = "test", + } + } + assert.res_status(200, res) + end + end) + end) + + describe("ctx.service", function() + local proxy_client + local bp + + lazy_setup(function() + bp, _ = helpers.get_db_utils(strategy, { + "routes", + "services", + "plugins", + },{ + "error-handler-log", + }) + + local service = bp.services:insert { + name = "example", + host = helpers.mock_upstream_host, + port = helpers.mock_upstream_port, + } + + local route1 = bp.routes:insert { + paths = { "/status/200" }, + service = service, + } + + bp.plugins:insert { + name = "error-handler-log", + config = {}, + } + + bp.plugins:insert { + name = "file-log", + route = { id = route1.id }, + config = { + path = FILE_LOG_PATH, + reopen = false, + custom_fields_by_lua = { + ["service.name"] = "return nil", + }, + }, + protocols = { + "http" + }, + } + + assert(helpers.start_kong({ + plugins = "bundled, error-handler-log", + database = strategy, + nginx_conf = "spec/fixtures/custom_nginx.template", + nginx_worker_processes = 1, + })) + end) + + before_each(function() + proxy_client = helpers.proxy_client() + end) + + after_each(function () + proxy_client:close() + end) + + lazy_teardown(function() + helpers.stop_kong() + end) + + it("use the deep copy of Service object", function() + for i = 1, 3 do + local res = proxy_client:send { + method = "GET", + path = "/status/200", + } + assert.res_status(200, res) + + local service_matched_header = res.headers["Log-Plugin-Service-Matched"] + assert.equal(service_matched_header, "example") + end + end) + end) + + describe("ctx.route", function() + local proxy_client + local bp + + lazy_setup(function() + bp, _ = helpers.get_db_utils(strategy, { + "routes", + "services", + "plugins", + }) + + local service = bp.services:insert { + host = helpers.mock_upstream_host, + port = helpers.mock_upstream_port, + } + + local route1 = bp.routes:insert { + name = "route1", + paths = { "/status/200" }, + service = service, + } + + assert(bp.plugins:insert { + name = "request-termination", + route = { id = route1.id }, + config = { + status_code = 418, + message = "No coffee for you. I'm a teapot.", + echo = true, + }, + }) + + bp.plugins:insert { + name = "file-log", + route = { id = route1.id }, + config = { + path = FILE_LOG_PATH, + reopen = false, + custom_fields_by_lua = { + ["route.name"] = "return nil", + }, + }, + protocols = { + "http" + }, + } + + assert(helpers.start_kong({ + plugins = "bundled, error-handler-log", + database = strategy, + nginx_conf = "spec/fixtures/custom_nginx.template", + nginx_worker_processes = 1, + })) + end) + + before_each(function() + proxy_client = helpers.proxy_client() + end) + + after_each(function () + proxy_client:close() + end) + + lazy_teardown(function() + helpers.stop_kong() + end) + + it("use the deep copy of Route object", function() + for i = 1, 3 do + local res = proxy_client:send { + method = "GET", + path = "/status/200", + } + + local body = assert.res_status(418, res) + local json = cjson.decode(body) + assert.equal(json["matched_route"]["name"], "route1") + end + end) + end) + + describe("in stream subsystem# ctx.authenticated_consumer", function() + local proxy_client + local bp + + local MESSAGE = "echo, ping, pong. echo, ping, pong. echo, ping, pong.\n" + lazy_setup(function() + bp, _ = helpers.get_db_utils(strategy, { + "routes", + "services", + "plugins", + }) + + local service = assert(bp.services:insert { + host = helpers.mock_upstream_host, + port = helpers.mock_upstream_stream_port, + protocol = "tcp", + }) + + local route1 = bp.routes:insert({ + destinations = { + { port = 19000 }, + }, + protocols = { + "tcp", + }, + service = service, + }) + + bp.plugins:insert { + name = "file-log", + route = { id = route1.id }, + config = { + path = FILE_LOG_PATH, + reopen = false, + custom_fields_by_lua = { + ["service.port"] = "return nil", + ["service.host"] = "return nil", + }, + }, + protocols = { + "tcp" + }, + } + + assert(helpers.start_kong({ + plugins = "bundled, error-handler-log", + database = strategy, + nginx_conf = "spec/fixtures/custom_nginx.template", + nginx_worker_processes = 1, + stream_listen = helpers.get_proxy_ip(false) .. ":19000", + proxy_stream_error_log = "logs/error.log", + })) + end) + + before_each(function() + proxy_client = helpers.proxy_client() + end) + + after_each(function () + proxy_client:close() + end) + + lazy_teardown(function() + helpers.stop_kong() + end) + + it("use the deep copy of Service object", function() + for i = 1, 3 do + local tcp_client = ngx.socket.tcp() + assert(tcp_client:connect(helpers.get_proxy_ip(false), 19000)) + assert(tcp_client:send(MESSAGE)) + local body = assert(tcp_client:receive("*a")) + assert.equal(MESSAGE, body) + assert(tcp_client:close()) + end + end) + end) + end) +end diff --git a/spec/03-plugins/09-key-auth/04-hybrid_mode_spec.lua b/spec/03-plugins/09-key-auth/04-hybrid_mode_spec.lua new file mode 100644 index 000000000000..ba3a0faaa2aa --- /dev/null +++ b/spec/03-plugins/09-key-auth/04-hybrid_mode_spec.lua @@ -0,0 +1,123 @@ +local helpers = require "spec.helpers" + +for _, strategy in helpers.each_strategy({"postgres"}) do + describe("Plugin: key-auth (access) [#" .. strategy .. "] auto-expiring keys", function() + -- Give a bit of time to reduce test flakyness on slow setups + local ttl = 10 + local inserted_at + local proxy_client + + lazy_setup(function() + local bp = helpers.get_db_utils(strategy, { + "routes", + "services", + "plugins", + "consumers", + "keyauth_credentials", + }) + + local r = bp.routes:insert { + hosts = { "key-ttl-hybrid.com" }, + } + + bp.plugins:insert { + name = "key-auth", + route = { id = r.id }, + } + + bp.consumers:insert { + username = "Jafar", + } + + assert(helpers.start_kong({ + role = "control_plane", + database = strategy, + cluster_cert = "spec/fixtures/kong_clustering.crt", + cluster_cert_key = "spec/fixtures/kong_clustering.key", + lua_ssl_trusted_certificate = "spec/fixtures/kong_clustering.crt", + cluster_listen = "127.0.0.1:9005", + cluster_telemetry_listen = "127.0.0.1:9006", + nginx_conf = "spec/fixtures/custom_nginx.template", + })) + + assert(helpers.start_kong({ + role = "data_plane", + database = "off", + prefix = "servroot2", + cluster_cert = "spec/fixtures/kong_clustering.crt", + cluster_cert_key = "spec/fixtures/kong_clustering.key", + lua_ssl_trusted_certificate = "spec/fixtures/kong_clustering.crt", + cluster_control_plane = "127.0.0.1:9005", + cluster_telemetry_endpoint = "127.0.0.1:9006", + proxy_listen = "0.0.0.0:9002", + })) + end) + + lazy_teardown(function() + if proxy_client then + proxy_client:close() + end + + helpers.stop_kong("servroot2") + helpers.stop_kong() + end) + + it("authenticate for up to 'ttl'", function() + + -- add credentials after nginx has started to avoid TTL expiration + local admin_client = helpers.admin_client() + local res = assert(admin_client:send { + method = "POST", + path = "/consumers/Jafar/key-auth", + headers = { + ["Content-Type"] = "application/json", + }, + body = { + key = "kong", + ttl = 10, + }, + }) + assert.res_status(201, res) + admin_client:close() + + ngx.update_time() + inserted_at = ngx.now() + + helpers.wait_until(function() + proxy_client = helpers.http_client("127.0.0.1", 9002) + res = assert(proxy_client:send { + method = "GET", + path = "/status/200", + headers = { + ["Host"] = "key-ttl-hybrid.com", + ["apikey"] = "kong", + } + }) + + proxy_client:close() + return res and res.status == 200 + end, 5) + + ngx.update_time() + local elapsed = ngx.now() - inserted_at + + ngx.sleep(ttl - elapsed) + + helpers.wait_until(function() + proxy_client = helpers.http_client("127.0.0.1", 9002) + res = assert(proxy_client:send { + method = "GET", + path = "/status/200", + headers = { + ["Host"] = "key-ttl-hybrid.com", + ["apikey"] = "kong", + } + }) + + proxy_client:close() + return res and res.status == 401 + end, 5) + + end) + end) +end diff --git a/spec/03-plugins/18-acl/02-access_spec.lua b/spec/03-plugins/18-acl/02-access_spec.lua index 3b5c37cc4e77..6112802f00f2 100644 --- a/spec/03-plugins/18-acl/02-access_spec.lua +++ b/spec/03-plugins/18-acl/02-access_spec.lua @@ -711,10 +711,39 @@ for _, strategy in helpers.each_strategy() do } } + local route14 = bp.routes:insert({ + hosts = { "acl14.com" } + }) + + local acl_prefunction_code = " local consumer_id = \"" .. tostring(consumer2.id) .. "\"\n" .. [[ + local cache_key = kong.db.acls:cache_key(consumer_id) + + -- we must use shadict to get the cache, because the `kong.cache` was hooked by `kong.plugins.pre-function` + local raw_groups, err = ngx.shared.kong_db_cache:get("kong_db_cache"..cache_key) + if raw_groups then + ngx.exit(200) + else + ngx.log(ngx.ERR, "failed to get cache: ", err) + ngx.exit(500) + end + + ]] + + bp.plugins:insert { + route = { id = route14.id }, + name = "pre-function", + config = { + access = { + acl_prefunction_code, + }, + } + } + assert(helpers.start_kong({ plugins = "bundled, ctx-checker", database = strategy, nginx_conf = "spec/fixtures/custom_nginx.template", + db_cache_warmup_entities = "keyauth_credentials,consumers,acls", })) end) @@ -1332,6 +1361,26 @@ for _, strategy in helpers.each_strategy() do assert.matches("You cannot consume this service", json.message) end) end) + + describe("cache warmup acls group", function() + it("cache warmup acls group", function() + assert(helpers.restart_kong { + plugins = "bundled, ctx-checker", + database = strategy, + nginx_conf = "spec/fixtures/custom_nginx.template", + db_cache_warmup_entities = "keyauth_credentials,consumers,acls", + }) + + proxy_client = helpers.proxy_client() + local res = assert(proxy_client:get("/request", { + headers = { + ["Host"] = "acl14.com" + } + })) + assert.res_status(200, res) + end) + end) + end) describe("Plugin: ACL (access) [#" .. strategy .. "] anonymous", function() diff --git a/spec/03-plugins/23-rate-limiting/02-policies_spec.lua b/spec/03-plugins/23-rate-limiting/02-policies_spec.lua index 7ce052080e18..6ee5ef674e71 100644 --- a/spec/03-plugins/23-rate-limiting/02-policies_spec.lua +++ b/spec/03-plugins/23-rate-limiting/02-policies_spec.lua @@ -176,53 +176,63 @@ describe("Plugin: rate-limiting (policies)", function() end) end - for _, sync_rate in ipairs{1, SYNC_RATE_REALTIME} do - describe("redis with sync rate: " .. sync_rate, function() - local identifier = uuid() - local conf = { - route_id = uuid(), - service_id = uuid(), - redis_host = helpers.redis_host, - redis_port = helpers.redis_port, - redis_database = 0, - sync_rate = sync_rate, - } - - before_each(function() - local red = require "resty.redis" - local redis = assert(red:new()) - redis:set_timeout(1000) - assert(redis:connect(conf.redis_host, conf.redis_port)) - redis:flushall() - redis:close() - end) - - it("increase & usage", function() - --[[ - Just a simple test: - - increase 1 - - check usage == 1 - - increase 1 - - check usage == 2 - - increase 1 (beyond the limit) - - check usage == 3 - --]] - - local current_timestamp = 1424217600 - local periods = timestamp.get_timestamps(current_timestamp) + for _, sync_rate in ipairs{0.5, SYNC_RATE_REALTIME} do + local current_timestamp = 1424217600 + local periods = timestamp.get_timestamps(current_timestamp) + + for period in pairs(periods) do + describe("redis with sync rate: " .. sync_rate .. " period: " .. period, function() + local identifier = uuid() + local conf = { + route_id = uuid(), + service_id = uuid(), + redis_host = helpers.redis_host, + redis_port = helpers.redis_port, + redis_database = 0, + sync_rate = sync_rate, + } - for period in pairs(periods) do + before_each(function() + local red = require "resty.redis" + local redis = assert(red:new()) + redis:set_timeout(1000) + assert(redis:connect(conf.redis_host, conf.redis_port)) + redis:flushall() + redis:close() + end) + + it("increase & usage", function() + --[[ + Just a simple test: + - increase 1 + - check usage == 1 + - increase 1 + - check usage == 2 + - increase 1 (beyond the limit) + - check usage == 3 + --]] local metric = assert(policies.redis.usage(conf, identifier, period, current_timestamp)) assert.equal(0, metric) for i = 1, 3 do - assert(policies.redis.increment(conf, { [period] = 2 }, identifier, current_timestamp, 1)) - metric = assert(policies.redis.usage(conf, identifier, period, current_timestamp)) - assert.equal(i, metric) + -- "second" keys expire too soon to check the async increment. + -- Let's verify all the other scenarios: + if not (period == "second" and sync_rate ~= SYNC_RATE_REALTIME) then + assert(policies.redis.increment(conf, { [period] = 2 }, identifier, current_timestamp, 1)) + + -- give time to the async increment to happen + if sync_rate ~= SYNC_RATE_REALTIME then + local sleep_time = 1 + (sync_rate > 0 and sync_rate or 0) + ngx.sleep(sleep_time) + end + + metric = assert(policies.redis.usage(conf, identifier, period, current_timestamp)) + assert.equal(i, metric) + end end - end + end) end) - end) + end end end) diff --git a/spec/03-plugins/23-rate-limiting/05-integration_spec.lua b/spec/03-plugins/23-rate-limiting/05-integration_spec.lua index d919c50f0eaf..8b00ea67e780 100644 --- a/spec/03-plugins/23-rate-limiting/05-integration_spec.lua +++ b/spec/03-plugins/23-rate-limiting/05-integration_spec.lua @@ -88,104 +88,63 @@ describe("Plugin: rate-limiting (integration)", function() }, } + -- it's set smaller than SLEEP_TIME in purpose + local SYNC_RATE = 0.1 for strategy, config in pairs(strategies) do - describe("config.policy = redis #" .. strategy, function() - -- Regression test for the following issue: - -- https://github.com/Kong/kong/issues/3292 - - lazy_setup(function() - flush_redis(red, REDIS_DB_1) - flush_redis(red, REDIS_DB_2) - flush_redis(red, REDIS_DB_3) - if red_version >= version("6.0.0") then - add_redis_user(red) - end - - bp = helpers.get_db_utils(nil, { - "routes", - "services", - "plugins", - }, { - "rate-limiting" - }) - - local route1 = assert(bp.routes:insert { - hosts = { "redistest1.com" }, - }) - assert(bp.plugins:insert { - name = "rate-limiting", - route = { id = route1.id }, - config = { - minute = 1, - policy = "redis", - redis_host = REDIS_HOST, - redis_port = config.redis_port, - redis_database = REDIS_DB_1, - redis_ssl = config.redis_ssl, - redis_ssl_verify = config.redis_ssl_verify, - redis_server_name = config.redis_server_name, - fault_tolerant = false, - redis_timeout = 10000, - }, - }) - - local route2 = assert(bp.routes:insert { - hosts = { "redistest2.com" }, - }) - assert(bp.plugins:insert { - name = "rate-limiting", - route = { id = route2.id }, - config = { - minute = 1, - policy = "redis", - redis_host = REDIS_HOST, - redis_port = config.redis_port, - redis_database = REDIS_DB_2, - redis_ssl = config.redis_ssl, - redis_ssl_verify = config.redis_ssl_verify, - redis_server_name = config.redis_server_name, - fault_tolerant = false, - redis_timeout = 10000, - }, - }) - - if red_version >= version("6.0.0") then - local route3 = assert(bp.routes:insert { - hosts = { "redistest3.com" }, + for with_sync_rate in pairs{false, true} do + describe("config.policy = redis #" .. strategy, function() + -- Regression test for the following issue: + -- https://github.com/Kong/kong/issues/3292 + + lazy_setup(function() + flush_redis(red, REDIS_DB_1) + flush_redis(red, REDIS_DB_2) + flush_redis(red, REDIS_DB_3) + if red_version >= version("6.0.0") then + add_redis_user(red) + end + + bp = helpers.get_db_utils(nil, { + "routes", + "services", + "plugins", + }, { + "rate-limiting" + }) + + local route1 = assert(bp.routes:insert { + hosts = { "redistest1.com" }, }) assert(bp.plugins:insert { name = "rate-limiting", - route = { id = route3.id }, + route = { id = route1.id }, config = { - minute = 2, -- Handle multiple tests + minute = 1, policy = "redis", redis_host = REDIS_HOST, redis_port = config.redis_port, - redis_username = REDIS_USER_VALID, - redis_password = REDIS_PASSWORD, - redis_database = REDIS_DB_3, -- ensure to not get a pooled authenticated connection by using a different db + redis_database = REDIS_DB_1, redis_ssl = config.redis_ssl, redis_ssl_verify = config.redis_ssl_verify, redis_server_name = config.redis_server_name, fault_tolerant = false, redis_timeout = 10000, + sync_rate = with_sync_rate and SYNC_RATE or nil, }, }) - local route4 = assert(bp.routes:insert { - hosts = { "redistest4.com" }, + local route2 = assert(bp.routes:insert { + hosts = { "redistest2.com" }, }) assert(bp.plugins:insert { name = "rate-limiting", - route = { id = route4.id }, + route = { id = route2.id }, config = { minute = 1, policy = "redis", redis_host = REDIS_HOST, redis_port = config.redis_port, - redis_username = REDIS_USER_INVALID, - redis_password = REDIS_PASSWORD, - redis_database = REDIS_DB_4, -- ensure to not get a pooled authenticated connection by using a different db + redis_database = REDIS_DB_2, redis_ssl = config.redis_ssl, redis_ssl_verify = config.redis_ssl_verify, redis_server_name = config.redis_server_name, @@ -193,104 +152,88 @@ describe("Plugin: rate-limiting (integration)", function() redis_timeout = 10000, }, }) - end + if red_version >= version("6.0.0") then + local route3 = assert(bp.routes:insert { + hosts = { "redistest3.com" }, + }) + assert(bp.plugins:insert { + name = "rate-limiting", + route = { id = route3.id }, + config = { + minute = 2, -- Handle multiple tests + policy = "redis", + redis_host = REDIS_HOST, + redis_port = config.redis_port, + redis_username = REDIS_USER_VALID, + redis_password = REDIS_PASSWORD, + redis_database = REDIS_DB_3, -- ensure to not get a pooled authenticated connection by using a different db + redis_ssl = config.redis_ssl, + redis_ssl_verify = config.redis_ssl_verify, + redis_server_name = config.redis_server_name, + fault_tolerant = false, + redis_timeout = 10000, + }, + }) + + local route4 = assert(bp.routes:insert { + hosts = { "redistest4.com" }, + }) + assert(bp.plugins:insert { + name = "rate-limiting", + route = { id = route4.id }, + config = { + minute = 1, + policy = "redis", + redis_host = REDIS_HOST, + redis_port = config.redis_port, + redis_username = REDIS_USER_INVALID, + redis_password = REDIS_PASSWORD, + redis_database = REDIS_DB_4, -- ensure to not get a pooled authenticated connection by using a different db + redis_ssl = config.redis_ssl, + redis_ssl_verify = config.redis_ssl_verify, + redis_server_name = config.redis_server_name, + fault_tolerant = false, + redis_timeout = 10000, + }, + }) + end + + + assert(helpers.start_kong({ + nginx_conf = "spec/fixtures/custom_nginx.template", + lua_ssl_trusted_certificate = config.lua_ssl_trusted_certificate, + })) + client = helpers.proxy_client() + end) + + lazy_teardown(function() + helpers.stop_kong() + if red_version >= version("6.0.0") then + remove_redis_user(red) + end + end) + + it("connection pool respects database setting", function() + assert(red:select(REDIS_DB_1)) + local size_1 = assert(red:dbsize()) - assert(helpers.start_kong({ - nginx_conf = "spec/fixtures/custom_nginx.template", - lua_ssl_trusted_certificate = config.lua_ssl_trusted_certificate, - })) - client = helpers.proxy_client() - end) + assert(red:select(REDIS_DB_2)) + local size_2 = assert(red:dbsize()) - lazy_teardown(function() - helpers.stop_kong() - if red_version >= version("6.0.0") then - remove_redis_user(red) - end - end) + assert.equal(0, tonumber(size_1)) + assert.equal(0, tonumber(size_2)) + if red_version >= version("6.0.0") then + assert(red:select(REDIS_DB_3)) + local size_3 = assert(red:dbsize()) + assert.equal(0, tonumber(size_3)) + end - it("connection pool respects database setting", function() - assert(red:select(REDIS_DB_1)) - local size_1 = assert(red:dbsize()) - - assert(red:select(REDIS_DB_2)) - local size_2 = assert(red:dbsize()) - - assert.equal(0, tonumber(size_1)) - assert.equal(0, tonumber(size_2)) - if red_version >= version("6.0.0") then - assert(red:select(REDIS_DB_3)) - local size_3 = assert(red:dbsize()) - assert.equal(0, tonumber(size_3)) - end - - local res = assert(client:send { - method = "GET", - path = "/status/200", - headers = { - ["Host"] = "redistest1.com" - } - }) - assert.res_status(200, res) - - -- Wait for async timer to increment the limit - - ngx.sleep(SLEEP_TIME) - - assert(red:select(REDIS_DB_1)) - size_1 = assert(red:dbsize()) - - assert(red:select(REDIS_DB_2)) - size_2 = assert(red:dbsize()) - - -- TEST: DB 1 should now have one hit, DB 2 and 3 none - - assert.equal(1, tonumber(size_1)) - assert.equal(0, tonumber(size_2)) - if red_version >= version("6.0.0") then - assert(red:select(REDIS_DB_3)) - local size_3 = assert(red:dbsize()) - assert.equal(0, tonumber(size_3)) - end - - -- rate-limiting plugin will reuses the redis connection - local res = assert(client:send { - method = "GET", - path = "/status/200", - headers = { - ["Host"] = "redistest2.com" - } - }) - assert.res_status(200, res) - - -- Wait for async timer to increment the limit - - ngx.sleep(SLEEP_TIME) - - assert(red:select(REDIS_DB_1)) - size_1 = assert(red:dbsize()) - - assert(red:select(REDIS_DB_2)) - size_2 = assert(red:dbsize()) - - -- TEST: DB 1 and 2 should now have one hit, DB 3 none - - assert.equal(1, tonumber(size_1)) - assert.equal(1, tonumber(size_2)) - if red_version >= version("6.0.0") then - assert(red:select(REDIS_DB_3)) - local size_3 = assert(red:dbsize()) - assert.equal(0, tonumber(size_3)) - end - - if red_version >= version("6.0.0") then - -- rate-limiting plugin will reuses the redis connection local res = assert(client:send { method = "GET", path = "/status/200", headers = { - ["Host"] = "redistest3.com" + ["Host"] = "redistest1.com" } }) assert.res_status(200, res) @@ -305,52 +248,113 @@ describe("Plugin: rate-limiting (integration)", function() assert(red:select(REDIS_DB_2)) size_2 = assert(red:dbsize()) - assert(red:select(REDIS_DB_3)) - local size_3 = assert(red:dbsize()) + -- TEST: DB 1 should now have one hit, DB 2 and 3 none - -- TEST: All DBs should now have one hit, because the - -- plugin correctly chose to select the database it is - -- configured to hit + assert.equal(1, tonumber(size_1)) + assert.equal(0, tonumber(size_2)) + if red_version >= version("6.0.0") then + assert(red:select(REDIS_DB_3)) + local size_3 = assert(red:dbsize()) + assert.equal(0, tonumber(size_3)) + end - assert.is_true(tonumber(size_1) > 0) - assert.is_true(tonumber(size_2) > 0) - assert.is_true(tonumber(size_3) > 0) - end - end) - - it("authenticates and executes with a valid redis user having proper ACLs", function() - if red_version >= version("6.0.0") then + -- rate-limiting plugin will reuses the redis connection local res = assert(client:send { method = "GET", path = "/status/200", headers = { - ["Host"] = "redistest3.com" + ["Host"] = "redistest2.com" } }) assert.res_status(200, res) - else - ngx.log(ngx.WARN, "Redis v" .. tostring(red_version) .. " does not support ACL functions " .. - "'authenticates and executes with a valid redis user having proper ACLs' will be skipped") - end - end) - it("fails to rate-limit for a redis user with missing ACLs", function() - if red_version >= version("6.0.0") then - local res = assert(client:send { - method = "GET", - path = "/status/200", - headers = { - ["Host"] = "redistest4.com" - } - }) - assert.res_status(500, res) - else - ngx.log(ngx.WARN, "Redis v" .. tostring(red_version) .. " does not support ACL functions " .. - "'fails to rate-limit for a redis user with missing ACLs' will be skipped") - end - end) + -- Wait for async timer to increment the limit - end) - end + ngx.sleep(SLEEP_TIME) + + assert(red:select(REDIS_DB_1)) + size_1 = assert(red:dbsize()) + + assert(red:select(REDIS_DB_2)) + size_2 = assert(red:dbsize()) + -- TEST: DB 1 and 2 should now have one hit, DB 3 none + + assert.equal(1, tonumber(size_1)) + assert.equal(1, tonumber(size_2)) + if red_version >= version("6.0.0") then + assert(red:select(REDIS_DB_3)) + local size_3 = assert(red:dbsize()) + assert.equal(0, tonumber(size_3)) + end + + if red_version >= version("6.0.0") then + -- rate-limiting plugin will reuses the redis connection + local res = assert(client:send { + method = "GET", + path = "/status/200", + headers = { + ["Host"] = "redistest3.com" + } + }) + assert.res_status(200, res) + + -- Wait for async timer to increment the limit + + ngx.sleep(SLEEP_TIME) + + assert(red:select(REDIS_DB_1)) + size_1 = assert(red:dbsize()) + + assert(red:select(REDIS_DB_2)) + size_2 = assert(red:dbsize()) + + assert(red:select(REDIS_DB_3)) + local size_3 = assert(red:dbsize()) + + -- TEST: All DBs should now have one hit, because the + -- plugin correctly chose to select the database it is + -- configured to hit + + assert.is_true(tonumber(size_1) > 0) + assert.is_true(tonumber(size_2) > 0) + assert.is_true(tonumber(size_3) > 0) + end + end) + + it("authenticates and executes with a valid redis user having proper ACLs", function() + if red_version >= version("6.0.0") then + local res = assert(client:send { + method = "GET", + path = "/status/200", + headers = { + ["Host"] = "redistest3.com" + } + }) + assert.res_status(200, res) + else + ngx.log(ngx.WARN, "Redis v" .. tostring(red_version) .. " does not support ACL functions " .. + "'authenticates and executes with a valid redis user having proper ACLs' will be skipped") + end + end) + + it("fails to rate-limit for a redis user with missing ACLs", function() + if red_version >= version("6.0.0") then + local res = assert(client:send { + method = "GET", + path = "/status/200", + headers = { + ["Host"] = "redistest4.com" + } + }) + assert.res_status(500, res) + else + ngx.log(ngx.WARN, "Redis v" .. tostring(red_version) .. " does not support ACL functions " .. + "'fails to rate-limit for a redis user with missing ACLs' will be skipped") + end + end) + + end) + end + end end)