diff --git a/src/cpp/include/openvino/genai/generation_config.hpp b/src/cpp/include/openvino/genai/generation_config.hpp index 22edcb98c..2a2a5b749 100644 --- a/src/cpp/include/openvino/genai/generation_config.hpp +++ b/src/cpp/include/openvino/genai/generation_config.hpp @@ -38,9 +38,9 @@ enum class StopCriteria { EARLY, HEURISTIC, NEVER }; * @param eos_token_id token_id of (end of sentence) * @param min_new_tokens set 0 probability for eos_token_id for the first eos_token_id generated tokens. Ignored for non continuous batching. * - * @param stop_strings vector of strings that will cause pipeline to stop generating further tokens. Ignored for non continuous batching. + * @param stop_strings A set of strings that will cause pipeline to stop generating further tokens. * @param include_stop_str_in_output if set to true stop string that matched generation will be included in generation output (default: false) - * @param stop_token_ids vector of tokens that will cause pipeline to stop generating further tokens. Ignored for non continuous batching. + * @param stop_token_ids A set of tokens that will cause pipeline to stop generating further tokens. * @param echo if set to true, output will include user prompt (default: false). * @param logprobs number of top logprobs computed for each position, if set to 0, logprobs are not computed and value 0.0 is returned. * Currently only single top logprob can be returned, so any logprobs > 1 is treated as logprobs == 1. (default: 0). @@ -154,9 +154,9 @@ static constexpr ov::Property max_new_tokens{"max_new_tokens"}; static constexpr ov::Property max_length{"max_length"}; static constexpr ov::Property ignore_eos{"ignore_eos"}; static constexpr ov::Property min_new_tokens{"min_new_tokens"}; -static constexpr ov::Property> stop_strings{"stop_strings"}; +static constexpr ov::Property> stop_strings{"stop_strings"}; static constexpr ov::Property include_stop_str_in_output{"include_stop_str_in_output"}; -static constexpr ov::Property>> stop_token_ids{"stop_token_ids"}; +static constexpr ov::Property> stop_token_ids{"stop_token_ids"}; static constexpr ov::Property num_beam_groups{"num_beam_groups"}; static constexpr ov::Property num_beams{"num_beams"}; diff --git a/src/python/py_generation_config.cpp b/src/python/py_generation_config.cpp index 43dc24382..8a6cc8c49 100644 --- a/src/python/py_generation_config.cpp +++ b/src/python/py_generation_config.cpp @@ -41,9 +41,9 @@ char generation_config_docstring[] = R"( ignore_eos: if set to true, then generation will not stop even if token is met. eos_token_id: token_id of (end of sentence) min_new_tokens: set 0 probability for eos_token_id for the first eos_token_id generated tokens. Ignored for non continuous batching. - stop_strings: list of strings that will cause pipeline to stop generating further tokens. Ignored for non continuous batching. + stop_strings: a set of strings that will cause pipeline to stop generating further tokens. include_stop_str_in_output: if set to true stop string that matched generation will be included in generation output (default: false) - stop_token_ids: list of tokens that will cause pipeline to stop generating further tokens. Ignored for non continuous batching. + stop_token_ids: a set of tokens that will cause pipeline to stop generating further tokens. echo: if set to true, the model will echo the prompt in the output. logprobs: number of top logprobs computed for each position, if set to 0, logprobs are not computed and value 0.0 is returned. Currently only single top logprob can be returned, so any logprobs > 1 is treated as logprobs == 1. (default: 0). @@ -87,6 +87,9 @@ void init_generation_config(py::module_& m) { .def_readwrite("max_length", &GenerationConfig::max_length) .def_readwrite("ignore_eos", &GenerationConfig::ignore_eos) .def_readwrite("min_new_tokens", &GenerationConfig::min_new_tokens) + .def_readwrite("stop_strings", &GenerationConfig::stop_strings) + .def_readwrite("include_stop_str_in_output", &GenerationConfig::include_stop_str_in_output) + .def_readwrite("stop_token_ids", &GenerationConfig::stop_token_ids) .def_readwrite("num_beam_groups", &GenerationConfig::num_beam_groups) .def_readwrite("num_beams", &GenerationConfig::num_beams) .def_readwrite("diversity_penalty", &GenerationConfig::diversity_penalty) diff --git a/src/python/py_utils.cpp b/src/python/py_utils.cpp index 91108d5e1..c0d21a9df 100644 --- a/src/python/py_utils.cpp +++ b/src/python/py_utils.cpp @@ -248,7 +248,7 @@ ov::genai::OptionalGenerationConfig update_config_from_kwargs(const ov::genai::O res_config.stop_strings = py::cast>(value); } else if (key == "include_stop_str_in_output") { res_config.include_stop_str_in_output = py::cast(value); - } else if (key == "include_stop_str_in_output") { + } else if (key == "stop_token_ids") { res_config.stop_token_ids = py::cast>(value); } else if (key == "max_length") { res_config.max_length = py::cast(item.second); @@ -311,11 +311,11 @@ bool generation_config_param_to_property(std::string key, py::object value, ov:: } else if (key == "min_new_tokens") { map.insert(ov::genai::min_new_tokens(py::cast(value))); } else if (key == "stop_strings") { - map.insert(ov::genai::stop_strings(py::cast>(value))); + map.insert(ov::genai::stop_strings(py::cast>(value))); } else if (key == "include_stop_str_in_output") { map.insert(ov::genai::include_stop_str_in_output(py::cast(value))); - } else if (key == "include_stop_str_in_output") { - map.insert(ov::genai::stop_token_ids(py::cast>>(value))); + } else if (key == "stop_token_ids") { + map.insert(ov::genai::stop_token_ids(py::cast>(value))); } else if (key == "num_beam_groups") { map.insert(ov::genai::num_beam_groups(py::cast(value))); } else if (key == "num_beams") { diff --git a/tests/python_tests/test_generate_api.py b/tests/python_tests/test_generate_api.py index 2f8085735..0149ce59e 100644 --- a/tests/python_tests/test_generate_api.py +++ b/tests/python_tests/test_generate_api.py @@ -848,3 +848,29 @@ def test_batch_switch(): pipe = read_model(('katuni4ka/tiny-random-phi3', Path('tiny-random-phi3')))[4] pipe.generate(["a"], max_new_tokens=2) pipe.generate(["1", "2"], max_new_tokens=2) + + +@pytest.mark.precommit +@pytest.mark.nightly +def test_stop_token_ids(): + pipe = read_model(('katuni4ka/tiny-random-phi3', Path('tiny-random-phi3')))[4] + res = pipe.generate( + ov.Tensor([(1,)]), + max_new_tokens=3, + stop_token_ids={-1, 9935}, + include_stop_str_in_output=False + ) + assert 2 == len(res.tokens[0]) + assert 9935 in res.tokens[0] + + +@pytest.mark.precommit +@pytest.mark.nightly +def test_stop_strings(): + pipe = read_model(('katuni4ka/tiny-random-phi3', Path('tiny-random-phi3')))[4] + res = pipe.generate( + "", + max_new_tokens=5, + stop_strings={"ignored", "боль"} + ) + assert "боль" not in res