Skip to content

Commit

Permalink
fix(aws-lambda): remove AWS Lambda API latency from kong latency (#12835
Browse files Browse the repository at this point in the history
)

* fix(aws-lambda): remove AWS Lambda API latency from kong latency

This commit removes the latency attributed to AWS Lambda API
requests from Kong's measured latencies (latencies.kong,
KONG_RESPONSE_LATENCY, X-Kong-Response-Latency), ensuring
that these delays are not mistakenly considered as originating from Kong.

---------

Co-authored-by: Aapo Talvensaari <aapo.talvensaari@gmail.com>
(cherry picked from commit bf673ce)
  • Loading branch information
windmgc authored and jschmid1 committed Apr 26, 2024
1 parent 1a13abc commit 1394fa2
Show file tree
Hide file tree
Showing 4 changed files with 116 additions and 11 deletions.
3 changes: 3 additions & 0 deletions changelog/unreleased/kong/fix-aws-lambda-kong-latency.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
message: "**AWS-Lambda**: fix an issue that the latency attributed to AWS Lambda API requests will be counted as part of the latency in Kong"
type: bugfix
scope: Plugin
29 changes: 20 additions & 9 deletions kong/plugins/aws-lambda/handler.lua
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,9 @@ local AWSLambdaHandler = {


function AWSLambdaHandler:access(conf)
-- TRACING: set KONG_WAITING_TIME start
local kong_wait_time_start = get_now()

if initialize then
initialize()
end
Expand Down Expand Up @@ -171,9 +174,6 @@ function AWSLambdaHandler:access(conf)

local upstream_body_json = build_request_payload(conf)

-- TRACING: set KONG_WAITING_TIME start
local kong_wait_time_start = get_now()

local res, err = lambda_service:invoke({
FunctionName = conf.function_name,
InvocationType = conf.invocation_type,
Expand All @@ -182,6 +182,14 @@ function AWSLambdaHandler:access(conf)
Qualifier = conf.qualifier,
})

-- TRACING: set KONG_WAITING_TIME stop
local ctx = ngx.ctx
local lambda_wait_time_total = get_now() - kong_wait_time_start
-- setting the latency here is a bit tricky, but because we are not
-- actually proxying, it will not be overwritten
ctx.KONG_WAITING_TIME = lambda_wait_time_total
kong.ctx.plugin.waiting_time = lambda_wait_time_total

if err then
return error(err)
end
Expand All @@ -191,12 +199,6 @@ function AWSLambdaHandler:access(conf)
return error(content.Message)
end

-- TRACING: set KONG_WAITING_TIME stop
local ctx = ngx.ctx
-- setting the latency here is a bit tricky, but because we are not
-- actually proxying, it will not be overwritten
ctx.KONG_WAITING_TIME = get_now() - kong_wait_time_start

local headers = res.headers

-- Remove Content-Length header returned by Lambda service,
Expand Down Expand Up @@ -249,4 +251,13 @@ function AWSLambdaHandler:access(conf)
end


function AWSLambdaHandler:header_filter(conf)
-- TRACING: remove the latency of requesting AWS Lambda service from the KONG_RESPONSE_LATENCY
local ctx = ngx.ctx
if ctx.KONG_RESPONSE_LATENCY then
ctx.KONG_RESPONSE_LATENCY = ctx.KONG_RESPONSE_LATENCY - (kong.ctx.plugin.waiting_time or 0)
end
end


return AWSLambdaHandler
90 changes: 88 additions & 2 deletions spec/03-plugins/27-aws-lambda/99-access_spec.lua
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ local helpers = require "spec.helpers"
local meta = require "kong.meta"
local pl_file = require "pl.file"
local fixtures = require "spec.fixtures.aws-lambda"
local http_mock = require "spec.helpers.http_mock"

local TEST_CONF = helpers.test_conf
local server_tokens = meta._SERVER_TOKENS
Expand All @@ -22,6 +23,19 @@ for _, strategy in helpers.each_strategy() do
describe("Plugin: AWS Lambda (access) [#" .. strategy .. "]", function()
local proxy_client
local admin_client
local mock_http_server_port = helpers.get_available_port()

local mock = http_mock.new(mock_http_server_port, [[
ngx.print('hello world')
]], {
prefix = "mockserver",
log_opts = {
req = true,
req_body = true,
req_large_body = true,
},
tls = false,
})

lazy_setup(function()
local bp = helpers.get_db_utils(strategy, {
Expand Down Expand Up @@ -163,6 +177,12 @@ for _, strategy in helpers.each_strategy() do
service = null,
}

local route25 = bp.routes:insert {
hosts = { "lambda25.test" },
protocols = { "http", "https" },
service = null,
}

bp.plugins:insert {
name = "aws-lambda",
route = { id = route1.id },
Expand Down Expand Up @@ -489,6 +509,26 @@ for _, strategy in helpers.each_strategy() do
}
}

bp.plugins:insert {
name = "aws-lambda",
route = { id = route25.id },
config = {
port = 10001,
aws_key = "mock-key",
aws_secret = "mock-secret",
aws_region = "us-east-1",
function_name = "functionWithLatency",
}
}

bp.plugins:insert {
route = { id = route25.id },
name = "http-log",
config = {
http_endpoint = "http://localhost:" .. mock_http_server_port,
}
}

fixtures.dns_mock:A({
name = "custom.lambda.endpoint",
address = "127.0.0.1",
Expand All @@ -511,7 +551,7 @@ for _, strategy in helpers.each_strategy() do
lazy_setup(function()
assert(helpers.start_kong({
database = strategy,
plugins = "aws-lambda",
plugins = "aws-lambda, http-log",
nginx_conf = "spec/fixtures/custom_nginx.template",
-- we don't actually use any stream proxy features in this test suite,
-- but this is needed in order to load our forward-proxy stream_mock fixture
Expand Down Expand Up @@ -1218,7 +1258,7 @@ for _, strategy in helpers.each_strategy() do
helpers.setenv("AWS_REGION", "us-east-1")
assert(helpers.start_kong({
database = strategy,
plugins = "aws-lambda",
plugins = "aws-lambda, http-log",
nginx_conf = "spec/fixtures/custom_nginx.template",
-- we don't actually use any stream proxy features in this test suite,
-- but this is needed in order to load our forward-proxy stream_mock fixture
Expand All @@ -1244,6 +1284,52 @@ for _, strategy in helpers.each_strategy() do
assert.is_array(res.headers["Access-Control-Allow-Origin"])
end)
end)

describe("With latency", function()
lazy_setup(function()
assert(mock:start())

helpers.setenv("AWS_REGION", "us-east-1")
assert(helpers.start_kong({
database = strategy,
plugins = "aws-lambda, http-log",
nginx_conf = "spec/fixtures/custom_nginx.template",
-- we don't actually use any stream proxy features in this test suite,
-- but this is needed in order to load our forward-proxy stream_mock fixture
stream_listen = helpers.get_proxy_ip(false) .. ":19000",
}, nil, nil, fixtures))
end)

lazy_teardown(function()
helpers.stop_kong()
helpers.unsetenv("AWS_REGION")
assert(mock:stop())
end)

it("invokes a Lambda function with GET and latency", function()
local res = assert(proxy_client:send {
method = "GET",
path = "/get",
headers = {
["Host"] = "lambda25.test"
}
})

assert.res_status(200, res)
local http_log_entries
assert.eventually(function ()
http_log_entries = mock:get_all_logs()
return #http_log_entries >= 1
end).with_timeout(10).is_truthy()
assert.is_not_nil(http_log_entries[1])
local log_entry_with_latency = cjson.decode(http_log_entries[1].req.body)
-- Accessing the aws mock server will require some time for sure
-- So if latencies.kong < latencies.proxy we should assume that the
-- latency calculation is working. Checking a precise number will
-- result in flakiness here.
assert.True(log_entry_with_latency.latencies.kong < log_entry_with_latency.latencies.proxy)
end)
end)
end)

describe("Plugin: AWS Lambda with #vault [#" .. strategy .. "]", function ()
Expand Down
5 changes: 5 additions & 0 deletions spec/fixtures/aws-lambda.lua
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,11 @@ local fixtures = {
elseif string.match(ngx.var.uri, "functionWithTransferEncodingHeader") then
ngx.say("{\"statusCode\": 200, \"headers\": { \"Transfer-Encoding\": \"chunked\", \"transfer-encoding\": \"chunked\"}}")
elseif string.match(ngx.var.uri, "functionWithLatency") then
-- additional latency
ngx.sleep(2)
ngx.say("{\"statusCodge\": 200, \"body\": \"dGVzdA=\", \"isBase64Encoded\": false}")
elseif type(res) == 'string' then
ngx.header["Content-Length"] = #res + 1
ngx.say(res)
Expand Down

0 comments on commit 1394fa2

Please sign in to comment.