diff --git a/src/cpp/src/sequence_group.hpp b/src/cpp/src/sequence_group.hpp index b6bcc83530..c423675e64 100644 --- a/src/cpp/src/sequence_group.hpp +++ b/src/cpp/src/sequence_group.hpp @@ -243,9 +243,12 @@ class SequenceGroup : public std::enable_shared_from_this { SequenceGroup(uint64_t request_id, const ov::Tensor input_ids, const ov::genai::GenerationConfig& sampling_params, std::size_t block_size) : SequenceGroup(request_id, sampling_params, block_size) { - m_prompt_ids.resize(input_ids.get_size()); - std::copy_n(input_ids.data(), input_ids.get_size(), m_prompt_ids.begin()); - m_prompt_log_probs.reserve(m_prompt_ids.size()); + size_t prompt_len = input_ids.get_size(); + OPENVINO_ASSERT(prompt_len > 0, "Prompt length cannot be 0"); + + m_prompt_ids.resize(prompt_len); + std::copy_n(input_ids.data(), prompt_len, m_prompt_ids.begin()); + m_prompt_log_probs.reserve(prompt_len); // create a single sequence add_sequence(Sequence::create(m_next_sequence_id++)); diff --git a/tests/cpp/block_manager.cpp b/tests/cpp/block_manager.cpp index 46c2fdddd7..670a0dffe7 100644 --- a/tests/cpp/block_manager.cpp +++ b/tests/cpp/block_manager.cpp @@ -10,7 +10,7 @@ TEST(TestBlockManager, general_test) { ov::genai::BlockManager bm = ov::genai::BlockManager(6, false, 4); - ov::genai::TokenIds prompt_ids; + ov::genai::TokenIds prompt_ids = {10, 0}; ov::genai::SequenceGroup::Ptr sequence_group = std::make_shared( 0, diff --git a/tests/python_tests/test_llm_pipeline.py b/tests/python_tests/test_llm_pipeline.py index 5278f4424f..031c42a1dc 100644 --- a/tests/python_tests/test_llm_pipeline.py +++ b/tests/python_tests/test_llm_pipeline.py @@ -97,6 +97,14 @@ def test_batch_size_switch(): ov_pipe.generate(["1", "2"], max_new_tokens=2) ov_pipe.generate(["a"], max_new_tokens=2) + +@pytest.mark.precommit +@pytest.mark.nightly +def test_empty_encoded_inputs_throw(): + ov_pipe = read_model(('katuni4ka/tiny-random-phi3', Path('tiny-random-phi3')))[4] + with pytest.raises(RuntimeError): + ov_pipe.generate(ov.Tensor(np.array([[]], dtype=np.int64)), max_new_tokens=2) + # # Chat scenario #