diff --git a/edenai_apis/apis/google/google_text_api.py b/edenai_apis/apis/google/google_text_api.py index a9a02f7b..c7539017 100644 --- a/edenai_apis/apis/google/google_text_api.py +++ b/edenai_apis/apis/google/google_text_api.py @@ -332,7 +332,9 @@ def text__embeddings( self, texts: List[str], model: Optional[str] = None ) -> ResponseType[EmbeddingsDataClass]: model = model.split("__")[1] if "__" in model else model - response = self.clients["llm_client"].embeddings(texts=texts, model=model) + response = self.clients["llm_client"].embeddings( + texts=texts, provider_model_name=f"vertex_ai/{model}", model=model + ) return response def text__code_generation( @@ -476,10 +478,8 @@ def text__search( ).original_response # Extracts embeddings from texts & query - texts_embed = [ - item["embeddings"]["values"] for item in texts_embed_response["predictions"] - ] - query_embed = query_embed_response["predictions"][0]["embeddings"]["values"] + texts_embed = list(texts_embed_response["data"][0]["embedding"]) + query_embed = query_embed_response["embeddings"][0] items = [] # Calculate score for each text index diff --git a/edenai_apis/llmengine/clients/litellm_client/litellm_client.py b/edenai_apis/llmengine/clients/litellm_client/litellm_client.py index 1f50f6c5..f61caf36 100644 --- a/edenai_apis/llmengine/clients/litellm_client/litellm_client.py +++ b/edenai_apis/llmengine/clients/litellm_client/litellm_client.py @@ -212,6 +212,7 @@ def embedding( self, input=[], model: Optional[str] = None, + provider_model_name: Optional[str] = None, # Optional params dimensions: Optional[int] = None, timeout=600, # default to 10 minutes @@ -229,6 +230,8 @@ def embedding( if model is not None: self.model_name = model call_params["model"] = f"{self.provider_name}/{model}" + if provider_model_name: + call_params["model"] = provider_model_name call_params["timeout"] = timeout if dimensions is not None: call_params["dimensions"] = dimensions