Skip to content

Commit

Permalink
fix(utils.py): fix select tokenizer for custom tokenizer (#7599)
Browse files Browse the repository at this point in the history
* fix(utils.py): fix select tokenizer for custom tokenizer

* fix(router.py): fix 'utils/token_counter' endpoint
  • Loading branch information
krrishdholakia authored Jan 8, 2025
1 parent 04eb718 commit 07c5f13
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 7 deletions.
14 changes: 11 additions & 3 deletions litellm/proxy/_new_secret_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,21 @@ model_list:
api_key: os.environ/OPENAI_API_KEY
- model_name: chatbot_actions
litellm_params:
model: langfuse/openai-gpt-3.5-turbo
model: langfuse/azure/gpt-4o
api_base: "os.environ/AZURE_API_BASE"
api_key: "os.environ/AZURE_API_KEY"
tpm: 1000000
prompt_id: "jokes"
- model_name: openai-gpt-3.5-turbo
- model_name: openai-deepseek
litellm_params:
model: openai/gpt-3.5-turbo
model: deepseek/deepseek-chat
api_key: os.environ/OPENAI_API_KEY
model_info:
access_groups: ["restricted-models"]
custom_tokenizer:
identifier: deepseek-ai/DeepSeek-V3-Base
revision: main
auth_token: os.environ/HUGGINGFACE_API_KEY


litellm_settings:
Expand Down
1 change: 1 addition & 0 deletions litellm/proxy/proxy_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -5606,6 +5606,7 @@ async def token_counter(request: TokenCountRequest):
_tokenizer_used = litellm.utils._select_tokenizer(
model=model_to_use, custom_tokenizer=custom_tokenizer
)

tokenizer_used = str(_tokenizer_used["type"])
total_tokens = token_counter(
model=model_to_use,
Expand Down
2 changes: 1 addition & 1 deletion litellm/router.py
Original file line number Diff line number Diff line change
Expand Up @@ -4379,7 +4379,7 @@ def get_router_model_info(
pass

## GET LITELLM MODEL INFO - raises exception, if model is not mapped
if not model.startswith(custom_llm_provider):
if not model.startswith("{}/".format(custom_llm_provider)):
model_info_name = "{}/{}".format(custom_llm_provider, model)
else:
model_info_name = model
Expand Down
6 changes: 3 additions & 3 deletions litellm/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1278,12 +1278,12 @@ def _select_tokenizer(
model: str, custom_tokenizer: Optional[CustomHuggingfaceTokenizer] = None
):
if custom_tokenizer is not None:
custom_tokenizer = Tokenizer.from_pretrained(
custom_tokenizer["identifier"],
_tokenizer = create_pretrained_tokenizer(
identifier=custom_tokenizer["identifier"],
revision=custom_tokenizer["revision"],
auth_token=custom_tokenizer["auth_token"],
)
return {"type": "huggingface_tokenizer", "tokenizer": custom_tokenizer}
return _tokenizer
return _select_tokenizer_helper(model=model)


Expand Down

0 comments on commit 07c5f13

Please sign in to comment.