Skip to content

Commit

Permalink
Define models for tokenizer request/response
Browse files Browse the repository at this point in the history
  • Loading branch information
NeonDaniel committed Dec 20, 2024
1 parent 24a170c commit 8b83a41
Showing 1 changed file with 13 additions and 1 deletion.
14 changes: 13 additions & 1 deletion neon_data_models/models/api/mq/brainforge.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

from typing import List, Optional, Any, Dict
from typing import List, Optional, Any, Dict, Literal
from pydantic import Field

from neon_data_models.models.base.contexts import MQContext
Expand Down Expand Up @@ -85,6 +85,18 @@ class LLMGetCompletionResponse(MQContext):
description="Raw completion response from an OpenAI endpoint")


class LLMGetTokenizerChatTemplate(LLMGetModels):
model: str = Field(description="Model to request (<name>@<revision>)")
messages: List[Dict[Literal["role", "content"], str]] = Field(
description="List of dict messages in OpenAI format")
tokenize: bool = Field(False)
add_generation_prompt: bool = Field(True)


class LLMGetTokenizerChatTemplateResponse(MQContext):
prompt: str = Field(description="Prompt generated by the tokenizer")


class LLMGetInferenceResponse(LLMResponse, MQContext):
pass

Expand Down

0 comments on commit 8b83a41

Please sign in to comment.