|
| 1 | +import openai |
| 2 | +import json |
| 3 | +from typing import Dict, List, Literal, Optional, Union |
| 4 | +from edenai_apis.utils.exception import ProviderException |
| 5 | +from edenai_apis.features import ProviderInterface, TextInterface |
| 6 | +from edenai_apis.features.text import ChatDataClass, ChatMessageDataClass |
| 7 | +from edenai_apis.features.text.chat.chat_dataclass import ( |
| 8 | + StreamChat, |
| 9 | + ChatStreamResponse, |
| 10 | + ToolCall, |
| 11 | +) |
| 12 | +from edenai_apis.utils.types import ResponseType |
| 13 | +from edenai_apis.loaders.loaders import load_provider |
| 14 | +from edenai_apis.loaders.data_loader import ProviderDataEnum |
| 15 | +from edenai_apis.features.text.chat.helpers import get_tool_call_from_history_by_id |
| 16 | +from edenai_apis.apis.openai.helpers import convert_tools_to_openai |
| 17 | + |
| 18 | + |
| 19 | +class DeepseekApi(ProviderInterface, TextInterface): |
| 20 | + provider_name = "deepseek" |
| 21 | + |
| 22 | + def __init__(self, api_keys: Dict = {}) -> None: |
| 23 | + self.api_settings = load_provider( |
| 24 | + ProviderDataEnum.KEY, self.provider_name, api_keys=api_keys |
| 25 | + ) |
| 26 | + self.api_key = self.api_settings["api_key"] |
| 27 | + self.client = openai.OpenAI( |
| 28 | + api_key=self.api_key, base_url="https://api.together.xyz/v1" |
| 29 | + ) |
| 30 | + |
| 31 | + def text__chat( |
| 32 | + self, |
| 33 | + text: str, |
| 34 | + chatbot_global_action: Optional[str], |
| 35 | + previous_history: Optional[List[Dict[str, str]]], |
| 36 | + temperature: float, |
| 37 | + max_tokens: int, |
| 38 | + model: str, |
| 39 | + stream=False, |
| 40 | + available_tools: Optional[List[dict]] = None, |
| 41 | + tool_choice: Literal["auto", "required", "none"] = "auto", |
| 42 | + tool_results: Optional[List[dict]] = None, |
| 43 | + ) -> ResponseType[Union[ChatDataClass, StreamChat]]: |
| 44 | + previous_history = previous_history or [] |
| 45 | + messages = [] |
| 46 | + for msg in previous_history: |
| 47 | + message = { |
| 48 | + "role": msg.get("role"), |
| 49 | + "content": msg.get("message"), |
| 50 | + } |
| 51 | + if msg.get("tool_calls"): |
| 52 | + message["tool_calls"] = [ |
| 53 | + { |
| 54 | + "id": tool["id"], |
| 55 | + "type": "function", |
| 56 | + "function": { |
| 57 | + "name": tool["name"], |
| 58 | + "arguments": tool["arguments"], |
| 59 | + }, |
| 60 | + } |
| 61 | + for tool in msg["tool_calls"] |
| 62 | + ] |
| 63 | + messages.append(message) |
| 64 | + |
| 65 | + if text and not tool_results: |
| 66 | + messages.append({"role": "user", "content": text}) |
| 67 | + |
| 68 | + if tool_results: |
| 69 | + for tool in tool_results or []: |
| 70 | + tool_call = get_tool_call_from_history_by_id( |
| 71 | + tool["id"], previous_history |
| 72 | + ) |
| 73 | + try: |
| 74 | + result = json.dumps(tool["result"]) |
| 75 | + except json.JSONDecodeError: |
| 76 | + result = str(result) |
| 77 | + messages.append( |
| 78 | + { |
| 79 | + "role": "tool", |
| 80 | + "content": result, |
| 81 | + "tool_call_id": tool_call["id"], |
| 82 | + } |
| 83 | + ) |
| 84 | + |
| 85 | + if chatbot_global_action: |
| 86 | + messages.insert(0, {"role": "system", "content": chatbot_global_action}) |
| 87 | + |
| 88 | + payload = { |
| 89 | + "model": f"deepseek-ai/{model}", |
| 90 | + "temperature": temperature, |
| 91 | + "messages": messages, |
| 92 | + "max_completion_tokens": max_tokens, |
| 93 | + "stream": stream, |
| 94 | + } |
| 95 | + |
| 96 | + if available_tools and not tool_results: |
| 97 | + payload["tools"] = convert_tools_to_openai(available_tools) |
| 98 | + payload["tool_choice"] = tool_choice |
| 99 | + |
| 100 | + try: |
| 101 | + response = self.client.chat.completions.create(**payload) |
| 102 | + except Exception as exc: |
| 103 | + raise ProviderException(str(exc)) |
| 104 | + |
| 105 | + # Standardize the response |
| 106 | + if stream is False: |
| 107 | + message = response.choices[0].message |
| 108 | + generated_text = message.content |
| 109 | + original_tool_calls = message.tool_calls or [] |
| 110 | + tool_calls = [] |
| 111 | + for call in original_tool_calls: |
| 112 | + tool_calls.append( |
| 113 | + ToolCall( |
| 114 | + id=call["id"], |
| 115 | + name=call["function"]["name"], |
| 116 | + arguments=call["function"]["arguments"], |
| 117 | + ) |
| 118 | + ) |
| 119 | + messages = [ |
| 120 | + ChatMessageDataClass(role="user", message=text, tools=available_tools), |
| 121 | + ChatMessageDataClass( |
| 122 | + role="assistant", |
| 123 | + message=generated_text, |
| 124 | + tool_calls=tool_calls, |
| 125 | + ), |
| 126 | + ] |
| 127 | + messages_json = [m.dict() for m in messages] |
| 128 | + |
| 129 | + standardized_response = ChatDataClass( |
| 130 | + generated_text=generated_text, message=messages_json |
| 131 | + ) |
| 132 | + |
| 133 | + return ResponseType[ChatDataClass]( |
| 134 | + original_response=response.to_dict(), |
| 135 | + standardized_response=standardized_response, |
| 136 | + ) |
| 137 | + else: |
| 138 | + stream = ( |
| 139 | + ChatStreamResponse( |
| 140 | + text=chunk.to_dict()["choices"][0]["delta"].get("content", ""), |
| 141 | + blocked=not chunk.to_dict()["choices"][0].get("finish_reason") |
| 142 | + in (None, "stop"), |
| 143 | + provider="openai", |
| 144 | + ) |
| 145 | + for chunk in response |
| 146 | + if chunk |
| 147 | + ) |
| 148 | + |
| 149 | + return ResponseType[StreamChat]( |
| 150 | + original_response=None, standardized_response=StreamChat(stream=stream) |
| 151 | + ) |
0 commit comments