Skip to content

Commit

Permalink
feat/adding-llama-and-gemini-classes (#405)
Browse files Browse the repository at this point in the history
* feat/adding-llama-and-gemini-classes

* fixing-gemini
  • Loading branch information
Shyam-Raghuwanshi authored Aug 3, 2024
1 parent fb14ff7 commit 4f00857
Show file tree
Hide file tree
Showing 10 changed files with 204 additions and 137 deletions.
2 changes: 1 addition & 1 deletion JS/edgechains/arakoodev/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"dist"
],
"exports": {
"./openai": "./dist/openai/src/index.js",
"./ai": "./dist/ai/src/index.js",
"./vector-db": "./dist/vector-db/src/index.js",
"./document-loader": "./dist/document-loader/src/index.js",
"./splitter": "./dist/splitter/src/index.js",
Expand Down
3 changes: 3 additions & 0 deletions JS/edgechains/arakoodev/src/ai/src/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
export { OpenAI } from "./lib/openai/openai.js";
export { GeminiAI } from "./lib/gemini/gemini.js";
export { LlamaAI } from "./lib/llama/llama.js";
92 changes: 92 additions & 0 deletions JS/edgechains/arakoodev/src/ai/src/lib/gemini/gemini.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import axios from "axios";
import { retry } from "@lifeomic/attempt"
const url = "https://generativelanguage.googleapis.com/v1/models/gemini-pro:generateContent";

interface GeminiAIConstructionOptions {
apiKey?: string;
}

type SafetyRating = {
category: "HARM_CATEGORY_SEXUALLY_EXPLICIT" | "HARM_CATEGORY_HATE_SPEECH" | "HARM_CATEGORY_HARASSMENT" | "HARM_CATEGORY_DANGEROUS_CONTENT";
probability: "NEGLIGIBLE" | "LOW" | "MEDIUM" | "HIGH";
};

type ContentPart = {
text: string;
};

type Content = {
parts: ContentPart[];
role: string;
};

type Candidate = {
content: Content;
finishReason: string;
index: number;
safetyRatings: SafetyRating[];
};

type UsageMetadata = {
promptTokenCount: number;
candidatesTokenCount: number;
totalTokenCount: number;
};

type Response = {
candidates: Candidate[];
usageMetadata: UsageMetadata;
};


type responseMimeType = "text/plain" | "application/json"


interface GeminiAIChatOptions {
model?: string;
max_output_tokens?: number;
temperature?: number;
prompt: string;
max_retry?: number;
responseType?: responseMimeType;
delay?: number
}

export class GeminiAI {
apiKey: string;
constructor(options: GeminiAIConstructionOptions) {
this.apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
}

async chat(chatOptions: GeminiAIChatOptions): Promise<Response> {
let data = JSON.stringify({
"contents": [
{
"role": "user",
"parts": [
{
"text": chatOptions.prompt
}
]
}
]
});

let config = {
method: 'post',
maxBodyLength: Infinity,
url,
headers: {
'Content-Type': 'application/json',
'x-goog-api-key': this.apiKey
},
temperature: chatOptions.temperature || "0.7",
responseMimeType: chatOptions.responseType || "text/plain",
"max_output_tokens": chatOptions.max_output_tokens || 1024,
data: data
};
return await retry(async () => {
return (await axios.request(config)).data;
}, { maxAttempts: chatOptions.max_retry || 3, delay: chatOptions.delay || 200 });
}
}
103 changes: 103 additions & 0 deletions JS/edgechains/arakoodev/src/ai/src/lib/llama/llama.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@

import axios from "axios";
import { role } from "../../types";
import { retry } from "@lifeomic/attempt";

const url = 'https://api.llama-api.com/chat/completions'

interface messageOption {
role: role;
content: string;
name?: string;
}

interface llamaChatOptions {
model?: string;
role?: role;
max_tokens?: number;
temperature?: number;
prompt?: string;
messages?: messageOption[];
stream?: boolean
max_retry?: number;
delay?: number
}[]

export class LlamaAI {
apiKey: string
queue: string[]
constructor({ apiKey }: { apiKey: string }) {
this.apiKey = apiKey;
this.queue = [];
}

async makeRequest(chatOptions: llamaChatOptions) {
try {
return await retry(async () => {

return await axios
.post(
url,
{
model: chatOptions.model || "llama-13b-chat",
messages: chatOptions.prompt
? [
{
role: chatOptions.role || "user",
content: chatOptions.prompt,
},
]
: chatOptions.messages,
max_tokens: chatOptions.max_tokens || 1024,
stream: chatOptions.stream || false,
temperature: chatOptions.temperature || 0.7,
},
{
headers: { Authorization: "Bearer " + this.apiKey },
}
)
}, { maxAttempts: chatOptions.max_retry || 3, delay: chatOptions.delay || 200 });
} catch (error: any) {
console.log(error)
throw new Error(`Error while making request: ${error.message}`);
}
}

async _runStreamForJupyter(apiRequestJson) {
const response = await this.makeRequest(apiRequestJson);

for (const chunk of response.data) {
this.queue.push(chunk);
}
}

async *getSequences() {
while (this.queue.length > 0) {
yield this.queue.shift();
await new Promise(resolve => setTimeout(resolve, 100));
}
}

async runStream(apiRequestJson) {
await this._runStreamForJupyter(apiRequestJson);
this.getSequences();
}

async runSync(apiRequestJson) {
const response = await this.makeRequest(apiRequestJson);

if (response.status !== 200) {
throw new Error(`POST ${response.status} ${response.data.detail}`);
}

return response.data;
}

chat(chatOptions: llamaChatOptions) {
if (chatOptions.stream) {
return this.runStream(chatOptions);
} else {
return this.runSync(chatOptions);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ export class OpenAI {
}

async chat(chatOptions: OpenAIChatOptions): Promise<OpenAIChatReturnOptions> {
const responce = await axios
const response = await axios
.post(
openAI_url,
{
Expand Down Expand Up @@ -99,13 +99,13 @@ export class OpenAI {
console.log("Error creating request:", error.message);
}
});
return responce[0].message;
return response[0].message;
}

async chatWithFunction(
chatOptions: chatWithFunctionOptions
): Promise<chatWithFunctionReturnOptions> {
const responce = await axios
const response = await axios
.post(
openAI_url,
{
Expand Down Expand Up @@ -143,7 +143,7 @@ export class OpenAI {
console.log("Error creating request:", error.message);
}
});
return responce[0].message;
return response[0].message;
}

async generateEmbeddings(resp): Promise<any> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@ export type ChatModel =
| "gpt-3.5-turbo-0125"
| "gpt-3.5-turbo-16k-0613";

export type role = "user" | "assistant" | "system";
export type role = "user" | "assistant" | "system";
2 changes: 0 additions & 2 deletions JS/edgechains/arakoodev/src/openai/src/index.ts

This file was deleted.

This file was deleted.

0 comments on commit 4f00857

Please sign in to comment.