diff --git a/kong/llm/drivers/shared.lua b/kong/llm/drivers/shared.lua index 871db7b59f93..ff8ecdc378f6 100644 --- a/kong/llm/drivers/shared.lua +++ b/kong/llm/drivers/shared.lua @@ -131,10 +131,10 @@ _M.clear_response_headers = { -- @return {string} error if any is thrown - request should definitely be terminated if this is not nil function _M.merge_config_defaults(request, options, request_format) if options then - request.temperature = request.temperature or options.temperature - request.max_tokens = request.max_tokens or options.max_tokens - request.top_p = request.top_p or options.top_p - request.top_k = request.top_k or options.top_k + request.temperature = options.temperature or request.temperature + request.max_tokens = options.max_tokens or request.max_tokens + request.top_p = options.top_p or request.top_p + request.top_k = options.top_k or request.top_k end return request, nil diff --git a/spec/03-plugins/38-ai-proxy/01-unit_spec.lua b/spec/03-plugins/38-ai-proxy/01-unit_spec.lua index 9ff754a1407f..9b5bc0a8a37f 100644 --- a/spec/03-plugins/38-ai-proxy/01-unit_spec.lua +++ b/spec/03-plugins/38-ai-proxy/01-unit_spec.lua @@ -628,8 +628,8 @@ describe(PLUGIN_NAME .. ": (unit)", function() local formatted, err = ai_shared.merge_config_defaults( SAMPLE_LLM_V1_CHAT_WITH_SOME_OPTS, { - max_tokens = 1024, - top_p = 1.0, + max_tokens = 256, + top_p = 0.2, }, "llm/v1/chat" ) diff --git a/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua b/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua index e9fb74c3114a..d89293c57161 100644 --- a/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua +++ b/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua @@ -841,6 +841,23 @@ for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then }, json.choices[1].message) end) + it("tries to override configured model", function() + local r = client:get("/openai/llm/v1/chat/good", { + headers = { + ["content-type"] = "application/json", + ["accept"] = "application/json", + }, + body = pl_file.read("spec/fixtures/ai-proxy/openai/llm-v1-chat/requests/good_own_model.json"), + }) + + -- validate that the request succeeded, response status 200 + local body = assert.res_status(400 , r) + local json = cjson.decode(body) + + -- check this is in the 'kong' response format + assert.same(json, {error = { message = "cannot use own model - must be: gpt-3.5-turbo" } }) + end) + it("bad upstream response", function() local r = client:get("/openai/llm/v1/chat/bad_upstream_response", { headers = {