|
8 | 8 |
|
9 | 9 | Requires OpenAI API Key
|
10 | 10 | """
|
11 |
| - |
| 11 | +import httpx |
12 | 12 | from jaseci.jsorc.live_actions import jaseci_action
|
13 | 13 | from jaseci.utils.utils import logger
|
14 |
| -import openai |
15 |
| -from typing import Union |
| 14 | +from openai import OpenAI |
| 15 | +from openai._types import Timeout, NotGiven, NOT_GIVEN |
| 16 | +from openai._base_client import DEFAULT_MAX_RETRIES |
| 17 | +from typing import Union, Mapping |
| 18 | + |
| 19 | +openai_client = None |
| 20 | + |
| 21 | + |
| 22 | +def client() -> OpenAI: |
| 23 | + if openai_client: |
| 24 | + return openai_client |
| 25 | + else: |
| 26 | + raise Exception("Openai not initilized yet!") |
16 | 27 |
|
17 | 28 |
|
18 | 29 | @jaseci_action(act_group=["openai"], allow_remote=True)
|
19 | 30 | def setup(
|
20 |
| - api_key=openai.api_key, |
21 |
| - api_type=openai.api_type, |
22 |
| - api_base=openai.api_base, |
23 |
| - api_version=openai.api_version, |
| 31 | + api_key: str | None = None, |
| 32 | + organization: str | None = None, |
| 33 | + base_url: str | httpx.URL | None = None, |
| 34 | + timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, |
| 35 | + max_retries: int = DEFAULT_MAX_RETRIES, |
| 36 | + default_headers: Mapping[str, str] | None = None, |
| 37 | + default_query: Mapping[str, object] | None = None, |
| 38 | + # Configure a custom httpx client. See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details. |
| 39 | + http_client: httpx.Client | None = None, |
| 40 | + # Enable or disable schema validation for data returned by the API. |
| 41 | + # When enabled an error APIResponseValidationError is raised |
| 42 | + # if the API responds with invalid data for the expected schema. |
| 43 | + # |
| 44 | + # This parameter may be removed or changed in the future. |
| 45 | + # If you rely on this feature, please open a GitHub issue |
| 46 | + # outlining your use-case to help us decide if it should be |
| 47 | + # part of our public interface in the future. |
| 48 | + _strict_response_validation: bool = False, |
24 | 49 | ):
|
25 | 50 | """
|
26 |
| - Sets up OpenAI API Key |
27 |
| -
|
28 |
| - :param api_key: str, required, default=None |
29 |
| - OpenAI API key |
30 |
| - :return: bool |
31 |
| - Returns True if api_key is set, False otherwise |
| 51 | + You may check on openai documentation for further information |
| 52 | + https://github.com/openai/openai-python |
32 | 53 | """
|
33 |
| - if not api_key: |
34 |
| - logger.error( |
35 |
| - "No OpenAI API Key Provided. Please set OPENAI_API_KEY environment variable or pass in api_key parameter though actions call openai.setup" |
| 54 | + try: |
| 55 | + global openai_client |
| 56 | + openai_client = OpenAI( |
| 57 | + api_key=api_key, |
| 58 | + organization=organization, |
| 59 | + base_url=base_url, |
| 60 | + timeout=timeout, |
| 61 | + max_retries=max_retries, |
| 62 | + default_headers=default_headers, |
| 63 | + default_query=default_query, |
| 64 | + http_client=http_client, |
| 65 | + _strict_response_validation=_strict_response_validation, |
36 | 66 | )
|
37 |
| - return False |
38 |
| - else: |
39 |
| - openai.api_key = api_key |
40 |
| - openai.api_type = api_type |
41 |
| - openai.api_base = api_base |
42 |
| - openai.api_version = api_version |
43 | 67 | return True
|
| 68 | + except Exception: |
| 69 | + logger.error("Error occured during initialization!") |
| 70 | + return False |
44 | 71 |
|
45 | 72 |
|
46 | 73 | @jaseci_action(act_group=["openai"], allow_remote=True)
|
@@ -97,7 +124,7 @@ def completion(
|
97 | 124 | A list of completions generated by the GPT-3 model based on the provided prompt(s).
|
98 | 125 | """
|
99 | 126 |
|
100 |
| - response = openai.Completion.create( |
| 127 | + response = client().completions.create( |
101 | 128 | model=model,
|
102 | 129 | prompt=prompt,
|
103 | 130 | suffix=suffix,
|
@@ -156,7 +183,7 @@ def chat(
|
156 | 183 | A list of responses generated by the GPT-3.5 model based on the provided messages.
|
157 | 184 | """
|
158 | 185 |
|
159 |
| - response = openai.ChatCompletion.create( |
| 186 | + response = client().chat.completions.create( |
160 | 187 | model=model,
|
161 | 188 | messages=messages,
|
162 | 189 | temperature=temperature,
|
@@ -188,7 +215,7 @@ def get_embeddings(input: Union[str, list], model: str = "text-embedding-ada-002
|
188 | 215 | List[List[float]]:
|
189 | 216 | A list of embeddings, where each embedding is a list of floating-point numbers.
|
190 | 217 | """
|
191 |
| - response = openai.Embedding.create(model=model, inputs=input) |
| 218 | + response = client().embeddings.create(model=model, inputs=input) |
192 | 219 | response = [x.embedding for x in response.data]
|
193 | 220 | return response
|
194 | 221 |
|
@@ -251,15 +278,15 @@ def transcribe(
|
251 | 278 |
|
252 | 279 | audio_file = open(audio_file, "rb")
|
253 | 280 | if not translate:
|
254 |
| - transcription = openai.Audio.transcribe( |
| 281 | + transcription = client().audio.transcriptions.create( |
255 | 282 | file=audio_file,
|
256 | 283 | model=model,
|
257 | 284 | prompt=prompt,
|
258 | 285 | temperature=temperature,
|
259 | 286 | language=language,
|
260 | 287 | )
|
261 | 288 | else:
|
262 |
| - transcription = openai.Audio.translate( |
| 289 | + transcription = client().audio.translations.create( |
263 | 290 | file=audio_file,
|
264 | 291 | model=model,
|
265 | 292 | prompt=prompt,
|
@@ -293,7 +320,7 @@ def generate_image(
|
293 | 320 | A list of generated images, either as URLs or Base64-encoded JSON strings,
|
294 | 321 | depending on the value of `response_format`.
|
295 | 322 | """
|
296 |
| - response = openai.Image.create( |
| 323 | + response = client().images.generate( |
297 | 324 | prompt=prompt, n=n, size=size, response_format=response_format
|
298 | 325 | )
|
299 | 326 | response = [
|
@@ -349,7 +376,7 @@ def edit_image(
|
349 | 376 | image = open(image_file, "rb")
|
350 | 377 | mask = open(mask_file, "rb")
|
351 | 378 |
|
352 |
| - response = openai.Image.create_edit( |
| 379 | + response = client().images.edit( |
353 | 380 | prompt=prompt,
|
354 | 381 | n=n,
|
355 | 382 | size=size,
|
@@ -399,7 +426,7 @@ def variations_image(
|
399 | 426 |
|
400 | 427 | image = open(image_file, "rb")
|
401 | 428 |
|
402 |
| - response = openai.Image.create_variation( |
| 429 | + response = client().images.create_variation( |
403 | 430 | n=n, size=size, response_format=response_format, image=image
|
404 | 431 | )
|
405 | 432 | response = [
|
|
0 commit comments