Skip to content

Commit

Permalink
feat: fix code
Browse files Browse the repository at this point in the history
  • Loading branch information
oowl committed Aug 14, 2024
1 parent fcc3e7b commit b35af1c
Show file tree
Hide file tree
Showing 3 changed files with 380 additions and 0 deletions.
128 changes: 128 additions & 0 deletions spec/03-plugins/38-ai-proxy/03-anthropic_integration_spec.lua
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,35 @@ for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then
},
},
}

local chat_good_no_allow_override = assert(bp.routes:insert {
service = empty_service,
protocols = { "http" },
strip_path = true,
paths = { "/anthropic/llm/v1/chat/good-no-allow-override" }
})
bp.plugins:insert {
name = PLUGIN_NAME,
route = { id = chat_good_no_allow_override.id },
config = {
route_type = "llm/v1/chat",
auth = {
header_name = "x-api-key",
header_value = "anthropic-key",
allow_auth_override = false,
},
model = {
name = "claude-2.1",
provider = "anthropic",
options = {
max_tokens = 256,
temperature = 1.0,
upstream_url = "http://"..helpers.mock_upstream_host..":"..MOCK_PORT.."/llm/v1/chat/good",
anthropic_version = "2023-06-01",
},
},
},
}
--

-- 200 chat bad upstream response with one option
Expand Down Expand Up @@ -551,6 +580,105 @@ for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then
}, json.choices[1].message)
end)

it("good request with client right header auth", function()
local r = client:get("/anthropic/llm/v1/chat/good", {
headers = {
["content-type"] = "application/json",
["accept"] = "application/json",
["x-api-key"] = "anthropic-key",
},
body = pl_file.read("spec/fixtures/ai-proxy/anthropic/llm-v1-chat/requests/good.json"),
})

local body = assert.res_status(200 , r)
local json = cjson.decode(body)

-- check this is in the 'kong' response format
-- assert.equals(json.id, "chatcmpl-8T6YwgvjQVVnGbJ2w8hpOA17SeNy2")
assert.equals(json.model, "claude-2.1")
assert.equals(json.object, "chat.content")
assert.equals(r.headers["X-Kong-LLM-Model"], "anthropic/claude-2.1")

assert.is_table(json.choices)
assert.is_table(json.choices[1].message)
assert.same({
content = "The sum of 1 + 1 is 2.",
role = "assistant",
}, json.choices[1].message)
end)

it("good request with client wrong header auth", function()
local r = client:get("/anthropic/llm/v1/chat/good", {
headers = {
["content-type"] = "application/json",
["accept"] = "application/json",
["x-api-key"] = "wrong",
},
body = pl_file.read("spec/fixtures/ai-proxy/anthropic/llm-v1-chat/requests/good.json"),
})

local body = assert.res_status(401 , r)
local json = cjson.decode(body)

-- check this is in the 'kong' response format
assert.is_truthy(json.error)
assert.equals(json.error.type, "authentication_error")
end)

it("good request with client right header auth and no allow_auth_override", function()
local r = client:get("/anthropic/llm/v1/chat/good-no-allow-override", {
headers = {
["content-type"] = "application/json",
["accept"] = "application/json",
["x-api-key"] = "anthropic-key",
},
body = pl_file.read("spec/fixtures/ai-proxy/anthropic/llm-v1-chat/requests/good.json"),
})

local body = assert.res_status(200 , r)
local json = cjson.decode(body)

-- check this is in the 'kong' response format
-- assert.equals(json.id, "chatcmpl-8T6YwgvjQVVnGbJ2w8hpOA17SeNy2")
assert.equals(json.model, "claude-2.1")
assert.equals(json.object, "chat.content")
assert.equals(r.headers["X-Kong-LLM-Model"], "anthropic/claude-2.1")

assert.is_table(json.choices)
assert.is_table(json.choices[1].message)
assert.same({
content = "The sum of 1 + 1 is 2.",
role = "assistant",
}, json.choices[1].message)
end)

it("good request with client wrong header auth and no allow_auth_override", function()
local r = client:get("/anthropic/llm/v1/chat/good-no-allow-override", {
headers = {
["content-type"] = "application/json",
["accept"] = "application/json",
["x-api-key"] = "wrong",
},
body = pl_file.read("spec/fixtures/ai-proxy/anthropic/llm-v1-chat/requests/good.json"),
})

local body = assert.res_status(200 , r)
local json = cjson.decode(body)

-- check this is in the 'kong' response format
-- assert.equals(json.id, "chatcmpl-8T6YwgvjQVVnGbJ2w8hpOA17SeNy2")
assert.equals(json.model, "claude-2.1")
assert.equals(json.object, "chat.content")
assert.equals(r.headers["X-Kong-LLM-Model"], "anthropic/claude-2.1")

assert.is_table(json.choices)
assert.is_table(json.choices[1].message)
assert.same({
content = "The sum of 1 + 1 is 2.",
role = "assistant",
}, json.choices[1].message)
end)

it("bad upstream response", function()
local r = client:get("/anthropic/llm/v1/chat/bad_upstream_response", {
headers = {
Expand Down
123 changes: 123 additions & 0 deletions spec/03-plugins/38-ai-proxy/04-cohere_integration_spec.lua
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,33 @@ for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then
},
},
}
local chat_good_no_allow_override = assert(bp.routes:insert {
service = empty_service,
protocols = { "http" },
strip_path = true,
paths = { "/cohere/llm/v1/chat/good-no-allow-override" }
})
bp.plugins:insert {
name = PLUGIN_NAME,
route = { id = chat_good_no_allow_override.id },
config = {
route_type = "llm/v1/chat",
auth = {
header_name = "Authorization",
header_value = "Bearer cohere-key",
allow_auth_override = false,
},
model = {
name = "command",
provider = "cohere",
options = {
max_tokens = 256,
temperature = 1.0,
upstream_url = "http://"..helpers.mock_upstream_host..":"..MOCK_PORT.."/llm/v1/chat/good",
},
},
},
}
--

-- 200 chat bad upstream response with one option
Expand Down Expand Up @@ -426,6 +453,102 @@ for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then
}, json.choices[1].message)
end)

it("good request with right client auth", function()
local r = client:get("/cohere/llm/v1/chat/good", {
headers = {
["content-type"] = "application/json",
["accept"] = "application/json",
["Authorization"] = "Bearer cohere-key",
},
body = pl_file.read("spec/fixtures/ai-proxy/cohere/llm-v1-chat/requests/good.json"),
})

local body = assert.res_status(200 , r)
local json = cjson.decode(body)

-- check this is in the 'kong' response format
assert.equals(json.model, "command")
assert.equals(json.object, "chat.completion")
assert.equals(r.headers["X-Kong-LLM-Model"], "cohere/command")

assert.is_table(json.choices)
assert.is_table(json.choices[1].message)
assert.same({
content = "The sum of 1 + 1 is 2.",
role = "assistant",
}, json.choices[1].message)
end)

it("good request with wrong client auth", function()
local r = client:get("/cohere/llm/v1/chat/good", {
headers = {
["content-type"] = "application/json",
["accept"] = "application/json",
["Authorization"] = "Bearer wrong",
},
body = pl_file.read("spec/fixtures/ai-proxy/cohere/llm-v1-chat/requests/good.json"),
})

local body = assert.res_status(401 , r)
local json = cjson.decode(body)

-- check this is in the 'kong' response format
assert.is_truthy(json.message)
assert.equals(json.message, "invalid api token")
end)

it("good request with right client auth and no allow_auth_override", function()
local r = client:get("/cohere/llm/v1/chat/good-no-allow-override", {
headers = {
["content-type"] = "application/json",
["accept"] = "application/json",
["Authorization"] = "Bearer cohere-key",
},
body = pl_file.read("spec/fixtures/ai-proxy/cohere/llm-v1-chat/requests/good.json"),
})

local body = assert.res_status(200 , r)
local json = cjson.decode(body)

-- check this is in the 'kong' response format
assert.equals(json.model, "command")
assert.equals(json.object, "chat.completion")
assert.equals(r.headers["X-Kong-LLM-Model"], "cohere/command")

assert.is_table(json.choices)
assert.is_table(json.choices[1].message)
assert.same({
content = "The sum of 1 + 1 is 2.",
role = "assistant",
}, json.choices[1].message)
end)

it("good request with wrong client auth and no allow_auth_override", function()
local r = client:get("/cohere/llm/v1/chat/good-no-allow-override", {
headers = {
["content-type"] = "application/json",
["accept"] = "application/json",
["Authorization"] = "Bearer wrong",
},
body = pl_file.read("spec/fixtures/ai-proxy/cohere/llm-v1-chat/requests/good.json"),
})

local body = assert.res_status(200 , r)
local json = cjson.decode(body)

-- check this is in the 'kong' response format
assert.equals(json.model, "command")
assert.equals(json.object, "chat.completion")
assert.equals(r.headers["X-Kong-LLM-Model"], "cohere/command")

assert.is_table(json.choices)
assert.is_table(json.choices[1].message)
assert.same({
content = "The sum of 1 + 1 is 2.",
role = "assistant",
}, json.choices[1].message)
end)

it("bad upstream response", function()
local r = client:get("/cohere/llm/v1/chat/bad_upstream_response", {
headers = {
Expand Down
Loading

0 comments on commit b35af1c

Please sign in to comment.