diff --git a/.github/workflows/main_ikyet.yml b/.github/workflows/main_ikyet.yml
index 9d65d14..0eb5a21 100644
--- a/.github/workflows/main_ikyet.yml
+++ b/.github/workflows/main_ikyet.yml
@@ -26,12 +26,12 @@ jobs:
run: |
python -m venv venv
source venv/bin/activate
-
+
- name: Install dependencies
run: pip install -r requirements.txt
-
+
# Optional: Add step to run tests here (PyTest, Django test suites, etc.)
-
+
- name: Upload artifact for deployment jobs
uses: actions/upload-artifact@v2
with:
@@ -53,7 +53,7 @@ jobs:
with:
name: python-app
path: .
-
+
- name: 'Deploy to Azure Web App'
uses: azure/webapps-deploy@v2
id: deploy-to-webapp
diff --git a/__init__.py b/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/app_websocket.py b/app_websocket.py
index ed68320..b946167 100644
--- a/app_websocket.py
+++ b/app_websocket.py
@@ -1,16 +1,19 @@
-import os, time, yaml, tiktoken
-import uvicorn
-import threading
+import os
import shutil
import tempfile
+import threading
+import tiktoken
+import time
+import yaml
from typing import List
+import uvicorn
from fastapi import FastAPI, File, UploadFile, WebSocket, WebSocketDisconnect, Request
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
-from models.gpt_nova import generate as g
+from ikyet_render.models.gpt.gpt_messages import generate as g
from output_adv import final as m
from output_quick import director
from output_quick import final as n
@@ -34,6 +37,7 @@
messages = [{'role': 'system', 'content': jenn}]
websocket_connections = {}
+
@app.get("/")
async def index(request: Request):
return templates.TemplateResponse("index.html", {"request": request, "report": None})
@@ -80,7 +84,7 @@ async def playgrd(websocket: WebSocket):
@app.websocket("/jen")
async def jen(websocket: WebSocket):
await websocket.accept()
- #websocket_connections[client_id] = websocket
+ # websocket_connections[client_id] = websocket
while True:
try:
data = await websocket.receive_json()
@@ -99,7 +103,7 @@ async def jen(websocket: WebSocket):
await websocket.send_json({'output': res})
except WebSocketDisconnect:
print("websocket disconnected")
- #del websocket_connections[client_id]
+ # del websocket_connections[client_id]
await websocket.close()
break
@@ -168,8 +172,8 @@ async def download():
finally:
threading.Thread(target=delayed_delete, args=(directory_path,)).start()
# finally:
- # shutil.rmtree(temp_dir)
- # shutil.rmtree(directory_path)
+ # shutil.rmtree(temp_dir)
+ # shutil.rmtree(directory_path)
def delayed_delete(*paths):
@@ -189,6 +193,7 @@ def generate_directory_name():
directory = director()
return directory
+
def tokens(message):
encoding = tiktoken.get_encoding("cl100k_base")
num_tokens = len(encoding.encode(message))
diff --git a/client/index_mob.html b/client/index_mob.html
index d4cf2f8..d28bbd0 100644
--- a/client/index_mob.html
+++ b/client/index_mob.html
@@ -10,14 +10,17 @@
margin: 0;
font-family: Arial, Helvetica, sans-serif;
}
+
.top-container {
background-color: #f1f1f1;
padding: 30px;
text-align: center;
}
+
.content {
padding: 16px;
}
+
@media screen and (max-width: 600px) {
.top-container, .content {
padding: 10px;
diff --git a/client/pw.html b/client/pw.html
index 0b3a338..2498103 100644
--- a/client/pw.html
+++ b/client/pw.html
@@ -85,11 +85,13 @@
I
-
Academics: talk with your friendly teaching assistant "Kat"
+
Academics: talk with your friendly teaching
+ assistant "Kat"
Special commands to use:
/imagine: to generate images using AI(standard diffusion model)
- /ppt: to generate powerpoint presentation file on the topic
- /yt: to summarize the youtube video, just paste the youtube link
- /input: to read the file and perform the task using AI, press the input and submit button. then enter the query.
+ /ppt: to generate powerpoint presentation file on the topic
+ /yt: to summarize the youtube video, just paste the youtube link
+ /input: to read the file and perform the task using AI, press the input and submit button. then enter
+ the query.
diff --git a/client/style.css b/client/style.css
index cb8a762..0d5ad8d 100644
--- a/client/style.css
+++ b/client/style.css
@@ -16,24 +16,25 @@
@import "./options.css";
@import "./theme-toggler.css";
@import "./checkbox1.css";
+
@media screen and (max-width: 768px) {
- .navbar {
- flex-direction: column;
- }
+ .navbar {
+ flex-direction: column;
+ }
- .nav-links {
- flex-direction: column;
- }
+ .nav-links {
+ flex-direction: column;
+ }
- .main-container {
- flex-direction: column-reverse;
- }
+ .main-container {
+ flex-direction: column-reverse;
+ }
- .sidebar, .conversation {
- width: 100%;
- }
+ .sidebar, .conversation {
+ width: 100%;
+ }
- .mobile-sidebar {
- display: block;
- }
+ .mobile-sidebar {
+ display: block;
+ }
}
\ No newline at end of file
diff --git a/models/claude2_file.py b/models/claude2_file.py
index 9eae05e..e3c4e8b 100644
--- a/models/claude2_file.py
+++ b/models/claude2_file.py
@@ -1,8 +1,8 @@
from .claude_api import Client
-
SESSION_KEY = "__cf_bm=RNkxkN5Pnf8x2.bhA8tjTbHK.D8hVJbjT5JG2ma9LwA-1694918420-0-AV7bo+LTz6IVIVUy3+7FGFoYsYXPd8fRHzvIw7X548cCftVlhdjRSa7cC7Ojte4qsK1TANBcF76YfceExCbweLA=; cf_clearance=2m7AzMArl5_N2ckA3cmUGb_vwgNTF5NeRcolmJfxd6E-1694918423-0-1-9ce7f260.a4aafa5c.cbe6ad80-0.2.1694918423; intercom-device-id-lupk8zyo=d88ca19d-429d-4280-bfc0-736aaf3cf335; sessionKey=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; intercom-session-lupk8zyo=aDRhQUR2SlpDN1NKb2JzQ2ZDdFNlK2VmS3l6UXdQQXFJZFB0cGFoODlQN0tyZDVCaHpJVW1DS0g4RExqemQyZi0tMGRyYkhHeG9DSGl3VGdMNHFQTGx4Zz09--6143d3d68836a0d48d989f306644cdec76e9e3db"
+
def file(files):
print("this is claude2")
claude = Client(SESSION_KEY)
diff --git a/models/claude_api.py b/models/claude_api.py
index b3e83f9..3ef9a61 100644
--- a/models/claude_api.py
+++ b/models/claude_api.py
@@ -1,308 +1,305 @@
import json
import os
+import re
import uuid
-from curl_cffi import requests
+
import requests as req
-import re
+from curl_cffi import requests
class Client:
- def __init__(self, cookie):
- self.cookie = cookie
- self.organization_id = self.get_organization_id()
-
- def get_organization_id(self):
- url = "https://claude.ai/api/organizations"
-
- headers = {
- 'User-Agent':
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Referer': 'https://claude.ai/chats',
- 'Content-Type': 'application/json',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- 'Connection': 'keep-alive',
- 'Cookie': f'{self.cookie}'
- }
-
- response = requests.get(url, headers=headers,impersonate="chrome110")
- res = json.loads(response.text)
- print(res)
- uuid = res[0]['uuid']
-
- return uuid
-
- def get_content_type(self, file_path):
- # Function to determine content type based on file extension
- extension = os.path.splitext(file_path)[-1].lower()
- if extension == '.pdf':
- return 'application/pdf'
- elif extension == '.txt':
- return 'text/plain'
- elif extension == '.csv':
- return 'text/csv'
- # Add more content types as needed for other file types
- else:
- return 'application/octet-stream'
-
- # Lists all the conversations you had with Claude
- def list_all_conversations(self):
- url = f"https://claude.ai/api/organizations/{self.organization_id}/chat_conversations"
-
- headers = {
- 'User-Agent':
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Referer': 'https://claude.ai/chats',
- 'Content-Type': 'application/json',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- 'Connection': 'keep-alive',
- 'Cookie': f'{self.cookie}'
- }
-
- response = requests.get(url, headers=headers,impersonate="chrome110")
- conversations = response.json()
-
- # Returns all conversation information in a list
- if response.status_code == 200:
- return conversations
- else:
- print(f"Error: {response.status_code} - {response.text}")
-
- # Send Message to Claude
- def send_message(self, prompt, conversation_id, attachment=None,timeout=500):
- url = "https://claude.ai/api/append_message"
-
- # Upload attachment if provided
- attachments = []
- if attachment:
- attachment_response = self.upload_attachment(attachment)
- if attachment_response:
- attachments = [attachment_response]
- else:
- return {"Error: Invalid file format. Please try again."}
-
- # Ensure attachments is an empty list when no attachment is provided
- if not attachment:
- attachments = []
-
- payload = json.dumps({
- "completion": {
- "prompt": f"{prompt}",
- "timezone": "America/New_York",
- "model": "claude-2"
- },
- "organization_uuid": f"{self.organization_id}",
- "conversation_uuid": f"{conversation_id}",
- "text": f"{prompt}",
- "attachments": attachments
- })
-
- headers = {
- 'User-Agent':
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
- 'Accept': 'text/event-stream, text/event-stream',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Referer': 'https://claude.ai/chats',
- 'Content-Type': 'application/json',
- 'Origin': 'https://claude.ai',
- 'DNT': '1',
- 'Connection': 'keep-alive',
- 'Cookie': f'{self.cookie}',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- 'TE': 'trailers'
- }
-
- response = requests.post( url, headers=headers, data=payload,impersonate="chrome110",timeout=500)
- decoded_data = response.content.decode("utf-8")
- decoded_data = re.sub('\n+', '\n', decoded_data).strip()
- data_strings = decoded_data.split('\n')
- completions = []
- for data_string in data_strings:
- json_str = data_string[6:].strip()
- data = json.loads(json_str)
- if 'completion' in data:
- completions.append(data['completion'])
-
- answer = ''.join(completions)
-
- # Returns answer
- return answer
-
- # Deletes the conversation
- def delete_conversation(self, conversation_id):
- url = f"https://claude.ai/api/organizations/{self.organization_id}/chat_conversations/{conversation_id}"
-
- payload = json.dumps(f"{conversation_id}")
- headers = {
- 'User-Agent':
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Content-Type': 'application/json',
- 'Content-Length': '38',
- 'Referer': 'https://claude.ai/chats',
- 'Origin': 'https://claude.ai',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- 'Connection': 'keep-alive',
- 'Cookie': f'{self.cookie}',
- 'TE': 'trailers'
- }
-
- response = requests.delete( url, headers=headers, data=payload,impersonate="chrome110")
-
- # Returns True if deleted or False if any error in deleting
- if response.status_code == 204:
- return True
- else:
- return False
-
- # Returns all the messages in conversation
- def chat_conversation_history(self, conversation_id):
- url = f"https://claude.ai/api/organizations/{self.organization_id}/chat_conversations/{conversation_id}"
-
- headers = {
- 'User-Agent':
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Referer': 'https://claude.ai/chats',
- 'Content-Type': 'application/json',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- 'Connection': 'keep-alive',
- 'Cookie': f'{self.cookie}'
- }
-
- response = requests.get( url, headers=headers,impersonate="chrome110")
-
-
- # List all the conversations in JSON
- return response.json()
-
- def generate_uuid(self):
- random_uuid = uuid.uuid4()
- random_uuid_str = str(random_uuid)
- formatted_uuid = f"{random_uuid_str[0:8]}-{random_uuid_str[9:13]}-{random_uuid_str[14:18]}-{random_uuid_str[19:23]}-{random_uuid_str[24:]}"
- return formatted_uuid
-
- def create_new_chat(self):
- url = f"https://claude.ai/api/organizations/{self.organization_id}/chat_conversations"
- uuid = self.generate_uuid()
-
- payload = json.dumps({"uuid": uuid, "name": ""})
- headers = {
- 'User-Agent':
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Referer': 'https://claude.ai/chats',
- 'Content-Type': 'application/json',
- 'Origin': 'https://claude.ai',
- 'DNT': '1',
- 'Connection': 'keep-alive',
- 'Cookie': self.cookie,
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- 'TE': 'trailers'
- }
-
- response = requests.post( url, headers=headers, data=payload,impersonate="chrome110")
-
- # Returns JSON of the newly created conversation information
- return response.json()
-
- # Resets all the conversations
- def reset_all(self):
- conversations = self.list_all_conversations()
-
- for conversation in conversations:
- conversation_id = conversation['uuid']
- delete_id = self.delete_conversation(conversation_id)
-
- return True
-
- def upload_attachment(self, file_path):
- if file_path.endswith('.txt'):
- file_name = os.path.basename(file_path)
- file_size = os.path.getsize(file_path)
- file_type = "text/plain"
- with open(file_path, 'r', encoding='utf-8') as file:
- file_content = file.read()
-
- return {
- "file_name": file_name,
- "file_type": file_type,
- "file_size": file_size,
- "extracted_content": file_content
- }
- url = 'https://claude.ai/api/convert_document'
- headers = {
- 'User-Agent':
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Referer': 'https://claude.ai/chats',
- 'Origin': 'https://claude.ai',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- 'Connection': 'keep-alive',
- 'Cookie': f'{self.cookie}',
- 'TE': 'trailers'
- }
-
- file_name = os.path.basename(file_path)
- content_type = self.get_content_type(file_path)
-
- files = {
- 'file': (file_name, open(file_path, 'rb'), content_type),
- 'orgUuid': (None, self.organization_id)
- }
-
- response = req.post(url, headers=headers, files=files)
- if response.status_code == 200:
- return response.json()
- else:
- return False
-
-
-
- # Renames the chat conversation title
- def rename_chat(self, title, conversation_id):
- url = "https://claude.ai/api/rename_chat"
-
- payload = json.dumps({
- "organization_uuid": f"{self.organization_id}",
- "conversation_uuid": f"{conversation_id}",
- "title": f"{title}"
- })
- headers = {
- 'User-Agent':
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Content-Type': 'application/json',
- 'Referer': 'https://claude.ai/chats',
- 'Origin': 'https://claude.ai',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- 'Connection': 'keep-alive',
- 'Cookie': f'{self.cookie}',
- 'TE': 'trailers'
- }
-
- response = requests.post(url, headers=headers, data=payload,impersonate="chrome110")
-
- if response.status_code == 200:
- return True
- else:
- return False
-
+ def __init__(self, cookie):
+ self.cookie = cookie
+ self.organization_id = self.get_organization_id()
+
+ def get_organization_id(self):
+ url = "https://claude.ai/api/organizations"
+
+ headers = {
+ 'User-Agent':
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Referer': 'https://claude.ai/chats',
+ 'Content-Type': 'application/json',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'Connection': 'keep-alive',
+ 'Cookie': f'{self.cookie}'
+ }
+
+ response = requests.get(url, headers=headers, impersonate="chrome110")
+ res = json.loads(response.text)
+ print(res)
+ uuid = res[0]['uuid']
+
+ return uuid
+
+ def get_content_type(self, file_path):
+ # Function to determine content type based on file extension
+ extension = os.path.splitext(file_path)[-1].lower()
+ if extension == '.pdf':
+ return 'application/pdf'
+ elif extension == '.txt':
+ return 'text/plain'
+ elif extension == '.csv':
+ return 'text/csv'
+ # Add more content types as needed for other file types
+ else:
+ return 'application/octet-stream'
+
+ # Lists all the conversations you had with Claude
+ def list_all_conversations(self):
+ url = f"https://claude.ai/api/organizations/{self.organization_id}/chat_conversations"
+
+ headers = {
+ 'User-Agent':
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Referer': 'https://claude.ai/chats',
+ 'Content-Type': 'application/json',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'Connection': 'keep-alive',
+ 'Cookie': f'{self.cookie}'
+ }
+
+ response = requests.get(url, headers=headers, impersonate="chrome110")
+ conversations = response.json()
+
+ # Returns all conversation information in a list
+ if response.status_code == 200:
+ return conversations
+ else:
+ print(f"Error: {response.status_code} - {response.text}")
+
+ # Send Message to Claude
+ def send_message(self, prompt, conversation_id, attachment=None, timeout=500):
+ url = "https://claude.ai/api/append_message"
+
+ # Upload attachment if provided
+ attachments = []
+ if attachment:
+ attachment_response = self.upload_attachment(attachment)
+ if attachment_response:
+ attachments = [attachment_response]
+ else:
+ return {"Error: Invalid file format. Please try again."}
+
+ # Ensure attachments is an empty list when no attachment is provided
+ if not attachment:
+ attachments = []
+
+ payload = json.dumps({
+ "completion": {
+ "prompt": f"{prompt}",
+ "timezone": "America/New_York",
+ "model": "claude-2"
+ },
+ "organization_uuid": f"{self.organization_id}",
+ "conversation_uuid": f"{conversation_id}",
+ "text": f"{prompt}",
+ "attachments": attachments
+ })
+
+ headers = {
+ 'User-Agent':
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
+ 'Accept': 'text/event-stream, text/event-stream',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Referer': 'https://claude.ai/chats',
+ 'Content-Type': 'application/json',
+ 'Origin': 'https://claude.ai',
+ 'DNT': '1',
+ 'Connection': 'keep-alive',
+ 'Cookie': f'{self.cookie}',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'TE': 'trailers'
+ }
+
+ response = requests.post(url, headers=headers, data=payload, impersonate="chrome110", timeout=500)
+ decoded_data = response.content.decode("utf-8")
+ decoded_data = re.sub('\n+', '\n', decoded_data).strip()
+ data_strings = decoded_data.split('\n')
+ completions = []
+ for data_string in data_strings:
+ json_str = data_string[6:].strip()
+ data = json.loads(json_str)
+ if 'completion' in data:
+ completions.append(data['completion'])
+
+ answer = ''.join(completions)
+
+ # Returns answer
+ return answer
+
+ # Deletes the conversation
+ def delete_conversation(self, conversation_id):
+ url = f"https://claude.ai/api/organizations/{self.organization_id}/chat_conversations/{conversation_id}"
+
+ payload = json.dumps(f"{conversation_id}")
+ headers = {
+ 'User-Agent':
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Content-Type': 'application/json',
+ 'Content-Length': '38',
+ 'Referer': 'https://claude.ai/chats',
+ 'Origin': 'https://claude.ai',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'Connection': 'keep-alive',
+ 'Cookie': f'{self.cookie}',
+ 'TE': 'trailers'
+ }
+
+ response = requests.delete(url, headers=headers, data=payload, impersonate="chrome110")
+
+ # Returns True if deleted or False if any error in deleting
+ if response.status_code == 204:
+ return True
+ else:
+ return False
+
+ # Returns all the messages in conversation
+ def chat_conversation_history(self, conversation_id):
+ url = f"https://claude.ai/api/organizations/{self.organization_id}/chat_conversations/{conversation_id}"
+
+ headers = {
+ 'User-Agent':
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Referer': 'https://claude.ai/chats',
+ 'Content-Type': 'application/json',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'Connection': 'keep-alive',
+ 'Cookie': f'{self.cookie}'
+ }
+
+ response = requests.get(url, headers=headers, impersonate="chrome110")
+
+ # List all the conversations in JSON
+ return response.json()
+
+ def generate_uuid(self):
+ random_uuid = uuid.uuid4()
+ random_uuid_str = str(random_uuid)
+ formatted_uuid = f"{random_uuid_str[0:8]}-{random_uuid_str[9:13]}-{random_uuid_str[14:18]}-{random_uuid_str[19:23]}-{random_uuid_str[24:]}"
+ return formatted_uuid
+
+ def create_new_chat(self):
+ url = f"https://claude.ai/api/organizations/{self.organization_id}/chat_conversations"
+ uuid = self.generate_uuid()
+
+ payload = json.dumps({"uuid": uuid, "name": ""})
+ headers = {
+ 'User-Agent':
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Referer': 'https://claude.ai/chats',
+ 'Content-Type': 'application/json',
+ 'Origin': 'https://claude.ai',
+ 'DNT': '1',
+ 'Connection': 'keep-alive',
+ 'Cookie': self.cookie,
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'TE': 'trailers'
+ }
+
+ response = requests.post(url, headers=headers, data=payload, impersonate="chrome110")
+
+ # Returns JSON of the newly created conversation information
+ return response.json()
+
+ # Resets all the conversations
+ def reset_all(self):
+ conversations = self.list_all_conversations()
+
+ for conversation in conversations:
+ conversation_id = conversation['uuid']
+ delete_id = self.delete_conversation(conversation_id)
+
+ return True
+
+ def upload_attachment(self, file_path):
+ if file_path.endswith('.txt'):
+ file_name = os.path.basename(file_path)
+ file_size = os.path.getsize(file_path)
+ file_type = "text/plain"
+ with open(file_path, 'r', encoding='utf-8') as file:
+ file_content = file.read()
+
+ return {
+ "file_name": file_name,
+ "file_type": file_type,
+ "file_size": file_size,
+ "extracted_content": file_content
+ }
+ url = 'https://claude.ai/api/convert_document'
+ headers = {
+ 'User-Agent':
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Referer': 'https://claude.ai/chats',
+ 'Origin': 'https://claude.ai',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'Connection': 'keep-alive',
+ 'Cookie': f'{self.cookie}',
+ 'TE': 'trailers'
+ }
+
+ file_name = os.path.basename(file_path)
+ content_type = self.get_content_type(file_path)
+
+ files = {
+ 'file': (file_name, open(file_path, 'rb'), content_type),
+ 'orgUuid': (None, self.organization_id)
+ }
+
+ response = req.post(url, headers=headers, files=files)
+ if response.status_code == 200:
+ return response.json()
+ else:
+ return False
+
+ # Renames the chat conversation title
+ def rename_chat(self, title, conversation_id):
+ url = "https://claude.ai/api/rename_chat"
+
+ payload = json.dumps({
+ "organization_uuid": f"{self.organization_id}",
+ "conversation_uuid": f"{conversation_id}",
+ "title": f"{title}"
+ })
+ headers = {
+ 'User-Agent':
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Content-Type': 'application/json',
+ 'Referer': 'https://claude.ai/chats',
+ 'Origin': 'https://claude.ai',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'Connection': 'keep-alive',
+ 'Cookie': f'{self.cookie}',
+ 'TE': 'trailers'
+ }
+
+ response = requests.post(url, headers=headers, data=payload, impersonate="chrome110")
+
+ if response.status_code == 200:
+ return True
+ else:
+ return False
diff --git a/models/gpt/__init__.py b/models/gpt/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/models/gpt/gpt3.py b/models/gpt/gpt3.py
new file mode 100644
index 0000000..8c645b5
--- /dev/null
+++ b/models/gpt/gpt3.py
@@ -0,0 +1,116 @@
+import time
+
+import openai
+import tiktoken
+
+from ..llama2 import generate as g
+
+
+def generate(sys, user):
+ for _ in range(10):
+ try:
+ num_tokens = tokens(user)
+ if num_tokens > 16000:
+ new_messages = nova("summarize the messages", user)
+ user = new_messages
+ if num_tokens < 20:
+ print("this is llama 70b")
+ return g(sys, user)
+ res = nova(sys, user)
+ if res is False:
+ res = naga(sys, user)
+ else:
+ return res
+ if res is False:
+ res = zuki(sys, user)
+ else:
+ return res
+ return res
+ except Exception as e:
+ print(e, 'retrying in 10 sec')
+ time.sleep(10)
+
+
+def nova(sys, user):
+ print("this is for gpt nova")
+ openai.api_base = 'https://api.nova-oss.com/v1'
+ openai.api_key = 'nv2-jtZwohKYBXLUpoMjeU84_NOVA_v2_0bdZXg16HJqPv7h5KUzB'
+ try:
+ response = openai.ChatCompletion.create(
+ model='gpt-3.5-turbo-16k-0613',
+ messages=[
+ {
+ 'role': 'system', 'content': sys
+ },
+ {
+ 'role': 'user', 'content': user
+ }
+ ],
+ temperature=0.7
+ )
+ choices = response['choices']
+ res = choices[0]['message']['content']
+ print(res)
+ return res
+ except Exception as e:
+ print(e)
+ return False
+
+
+def naga(sys, user):
+ print("this is for gpt naga")
+ openai.api_base = 'https://api.naga.ac/v1'
+ openai.api_key = '_1odz14jRUhEDXaEBU2NHQxl6gaUlX_LsKNR3_cAWW8'
+ try:
+ response = openai.ChatCompletion.create(
+ model='gpt-3.5-turbo-16k-0613',
+ messages=[
+ {
+ 'role': 'system', 'content': sys
+ },
+ {
+ 'role': 'user', 'content': user
+ }
+ ],
+ temperature=0.7
+ )
+ choices = response['choices']
+ res = choices[0]['message']['content']
+ print(res)
+ return res
+ except Exception as e:
+ print(e)
+ return False
+
+
+def zuki(sys, user):
+ print("this is for zukij")
+ openai.api_base = 'https://zukijourney.xyzbot.net'
+ openai.api_key = 'zu-90c043196ee79f73789e7cc289aab6f9'
+ try:
+ response = openai.ChatCompletion.create(
+ model='gpt-3.5-turbo-16k-0613',
+ messages=[
+ {
+ 'role': 'system', 'content': sys
+ },
+ {
+ 'role': 'user', 'content': user
+ }
+ ],
+ temperature=0.7
+ )
+ choices = response['choices']
+ res = choices[0]['message']['content']
+ print(res)
+ return res
+ except Exception as e:
+ print(e)
+ return False
+
+
+def tokens(message):
+ encoding = tiktoken.get_encoding("cl100k_base")
+ num_tokens = len(encoding.encode(message))
+ print("number of tokens:", num_tokens)
+ return num_tokens
diff --git a/models/gpt/gpt4.py b/models/gpt/gpt4.py
new file mode 100644
index 0000000..30531e7
--- /dev/null
+++ b/models/gpt/gpt4.py
@@ -0,0 +1,116 @@
+import time
+
+import openai
+import tiktoken
+
+from ..llama2 import generate as g
+
+
+def generate(sys, user):
+ for _ in range(10):
+ try:
+ num_tokens = tokens(user)
+ if num_tokens > 8000:
+ new_messages = nova("summarize the messages", user)
+ user = new_messages
+ if num_tokens < 10:
+ print("this is llama 70b")
+ return g(sys, user)
+ res = nova(sys, user)
+ if res is False:
+ res = zuki(sys, user)
+ else:
+ return res
+ if res is False:
+ res = naga(sys, user)
+ else:
+ return res
+ return res
+ except Exception as e:
+ print(e, 'retrying in 10 sec')
+ time.sleep(10)
+
+
+def nova(sys, user):
+ print("this is for gpt nova")
+ openai.api_base = 'https://api.nova-oss.com/v1'
+ openai.api_key = 'nv2-jtZwohKYBXLUpoMjeU84_NOVA_v2_0bdZXg16HJqPv7h5KUzB'
+ try:
+ response = openai.ChatCompletion.create(
+ model='gpt-4',
+ messages=[
+ {
+ 'role': 'system', 'content': sys
+ },
+ {
+ 'role': 'user', 'content': user
+ }
+ ],
+ temperature=0.7
+ )
+ choices = response['choices']
+ res = choices[0]['message']['content']
+ print(res)
+ return res
+ except Exception as e:
+ print(e)
+ return False
+
+
+def naga(sys, user):
+ print("this is for gpt naga")
+ openai.api_base = 'https://api.naga.ac/v1'
+ openai.api_key = '_1odz14jRUhEDXaEBU2NHQxl6gaUlX_LsKNR3_cAWW8'
+ try:
+ response = openai.ChatCompletion.create(
+ model='gpt-3.5-turbo-16k-0613',
+ messages=[
+ {
+ 'role': 'system', 'content': sys
+ },
+ {
+ 'role': 'user', 'content': user
+ }
+ ],
+ temperature=0.7
+ )
+ choices = response['choices']
+ res = choices[0]['message']['content']
+ print(res)
+ return res
+ except Exception as e:
+ print(e)
+ return False
+
+
+def zuki(sys, user):
+ print("this is for zukij")
+ openai.api_base = 'https://zukijourney.xyzbot.net'
+ openai.api_key = 'zu-90c043196ee79f73789e7cc289aab6f9'
+ try:
+ response = openai.ChatCompletion.create(
+ model='gpt-4',
+ messages=[
+ {
+ 'role': 'system', 'content': sys
+ },
+ {
+ 'role': 'user', 'content': user
+ }
+ ],
+ temperature=0.7
+ )
+ choices = response['choices']
+ res = choices[0]['message']['content']
+ print(res)
+ return res
+ except Exception as e:
+ print(e)
+ return False
+
+
+def tokens(message):
+ encoding = tiktoken.get_encoding("cl100k_base")
+ num_tokens = len(encoding.encode(message))
+ print("number of tokens:", num_tokens)
+ return num_tokens
diff --git a/models/gpt/gpt_messages.py b/models/gpt/gpt_messages.py
new file mode 100644
index 0000000..c40b4e7
--- /dev/null
+++ b/models/gpt/gpt_messages.py
@@ -0,0 +1,108 @@
+import time
+
+import openai
+import tiktoken
+
+
+def generate(messages, model):
+ for _ in range(10):
+ try:
+ for _ in messages:
+ contents = messages[_]['content']
+
+ if "gpt-3.5-16k" in model:
+ num_tokens = tokens(contents)
+ if num_tokens > 16000:
+ new_messages = nova(messages=[{'role': 'system', 'content': "summarize the messages"},
+ {'role': 'user', 'content': contents}],
+ model='gpt-3.5-turbo-16k-0613')
+ messages = [{'role': 'system', 'content': 'Continue from here'},
+ {'role': 'user', 'content': new_messages}]
+
+ elif "gpt-4" in model:
+ num_tokens = tokens(contents)
+ if num_tokens > 8000:
+ new_messages = nova(messages=[{'role': 'system', 'content': "summarize the messages"},
+ {'role': 'user', 'content': contents}],
+ model='gpt-3.5-turbo-16k-0613')
+ messages = [{'role': 'system', 'content': 'Continue from here'},
+ {'role': 'user', 'content': new_messages}]
+
+ res = nova(messages, model)
+ if res is False:
+ res = naga(messages, model)
+ else:
+ return res
+ if res is False:
+ res = zuki(messages, model)
+ else:
+ return res
+ return res
+
+ except Exception as e:
+ print(e, 'retrying in 10 sec')
+ time.sleep(10)
+
+
+def nova(messages, model):
+ print("this is for gpt nova")
+ openai.api_base = 'https://api.nova-oss.com/v1'
+ openai.api_key = 'nv-QcufbFJJPucp91LI4hr2N0V4x0SScIHsbkjdlWvbjWUhyMcx'
+ try:
+ response = openai.ChatCompletion.create(
+ model=model,
+ messages=messages,
+ temperature=0.7
+ )
+ choices = response['choices']
+ res = choices[0]['message']['content']
+ print(res)
+ return res
+ except Exception as e:
+ print(e)
+ return False
+
+
+def naga(messages, model='gpt-3.5-turbo-16k-0613'):
+ print("this is for gpt naga")
+ openai.api_base = 'https://api.naga.ac/v1'
+ openai.api_key = '_1odz14jRUhEDXaEBU2NHQxl6gaUlX_LsKNR3_cAWW8'
+ try:
+ response = openai.ChatCompletion.create(
+ model=model,
+ messages=messages,
+ temperature=0.7
+ )
+ choices = response['choices']
+ res = choices[0]['message']['content']
+ print(res)
+ return res
+ except Exception as e:
+ print(e)
+ return False
+
+
+def zuki(messages, model):
+ print("this is for zukij")
+ openai.api_base = 'https://zukijourney.xyzbot.net'
+ openai.api_key = 'zu-90c043196ee79f73789e7cc289aab6f9'
+ try:
+ response = openai.ChatCompletion.create(
+ model=model,
+ messages=messages,
+ temperature=0.7
+ )
+ choices = response['choices']
+ res = choices[0]['message']['content']
+ print(res)
+ return res
+ except Exception as e:
+ print(e)
+ return False
+
+
+def tokens(message):
+ encoding = tiktoken.get_encoding("cl100k_base")
+ num_tokens = len(encoding.encode(message))
+ print("number of tokens:", num_tokens)
+ return num_tokens
diff --git a/models/gpt3_nov.py b/models/gpt3_nov.py
deleted file mode 100644
index d659a14..0000000
--- a/models/gpt3_nov.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import openai
-import time
-
-
-def generate(sys, user):
- print("this is for gpt3.5-nova")
- openai.api_base = 'https://api.nova-oss.com/v1'
- openai.api_key = 'nv2-jtZwohKYBXLUpoMjeU84_NOVA_v2_0bdZXg16HJqPv7h5KUzB'
- for _ in range(5):
- try:
- response = openai.ChatCompletion.create(
- model='gpt-3.5-turbo-16k-0613',
- messages=[
- {
- 'role': 'system', 'content': sys
- },
- {
- 'role': 'user', 'content': user
- }
- ],
- temperature=0.7
- )
- choices = response['choices']
- res = choices[0]['message']['content']
- print(res)
- return res
- except Exception as e:
- print(f'error:{e}, retrying in 20sec')
- time.sleep(10)
\ No newline at end of file
diff --git a/models/gpt4_nov.py b/models/gpt4_nov.py
deleted file mode 100644
index 7a72e7e..0000000
--- a/models/gpt4_nov.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import time
-
-import openai
-
-
-def generate(sys, user):
- print("this is for gpt4")
- openai.api_base = 'https://api.nova-oss.com/v1'
- openai.api_key = 'nv2-jtZwohKYBXLUpoMjeU84_NOVA_v2_0bdZXg16HJqPv7h5KUzB'
- for _ in range(5):
- try:
- response = openai.ChatCompletion.create(
- model='gpt-4-0613',
- messages=[
- {'role': 'system', 'content': sys},
- {'role': 'user', 'content': user}
- ],
- temperature=0.7
- )
- choices = response['choices']
- res = choices[0]['message']['content']
- print(res)
- return res
- except Exception as e:
- print(f'error:{e}, retrying in 20sec')
- time.sleep(10)
diff --git a/models/gpt_nov.py b/models/gpt_nov.py
deleted file mode 100644
index c1ac022..0000000
--- a/models/gpt_nov.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import time
-import tiktoken
-import openai
-
-
-def generate(sys, user=None):
- print("this is for gpt3.5-nova")
- openai.api_base = 'https://api.nova-oss.com/v1'
- openai.api_key = 'nv2-jtZwohKYBXLUpoMjeU84_NOVA_v2_0bdZXg16HJqPv7h5KUzB'
- num_tokens = tokens(user)
- if num_tokens > 16000:
- new_messages = generate("summarize the messages", user)
- user = new_messages
- for _ in range(5):
- try:
- response = openai.ChatCompletion.create(
- model='gpt-3.5-turbo-16k-0613',
- messages=[
- {'role': 'system', 'content': sys},
- {'role': 'user', 'content': user},
- ],
- temperature=0.7
- )
- choices = response['choices']
- res = choices[0]['message']['content']
- print(res)
- return res
- except Exception as e:
- print(f'error:{e}, retrying in 20sec')
- time.sleep(10)
-
-
-def tokens(message):
- encoding = tiktoken.get_encoding("cl100k_base")
- num_tokens = len(encoding.encode(message))
- print("number of tokens:", num_tokens)
- return num_tokens
diff --git a/models/gpt_nova.py b/models/gpt_nova.py
deleted file mode 100644
index e499226..0000000
--- a/models/gpt_nova.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import time
-import openai
-
-
-def generate(messages, model):
- print("this is for gpt nova")
- openai.api_base = 'https://api.nova-oss.com/v1'
- openai.api_key = 'nv2-jtZwohKYBXLUpoMjeU84_NOVA_v2_0bdZXg16HJqPv7h5KUzB'
-
- for _ in range(5):
- try:
- response = openai.ChatCompletion.create(
- model=model,
- messages=messages,
- temperature=0.7
- )
- choices = response['choices']
- res = choices[0]['message']['content']
- print(res)
- return res
- except Exception as e:
- print(f'error:{e}, retrying in 20sec')
- time.sleep(10)
-
diff --git a/models/image_ocr.py b/models/image_ocr.py
index d325f31..da414d8 100644
--- a/models/image_ocr.py
+++ b/models/image_ocr.py
@@ -1,6 +1,7 @@
-from gradio_client import Client
import ast
+from gradio_client import Client
+
def kosmos(url):
client = Client("https://ydshieh-kosmos-2.hf.space/")
@@ -18,8 +19,9 @@ def kosmos(url):
def clipi2(url):
client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
- result = client.predict(url, "best", 2, # int | float (numeric value between 2 and 24) in 'best mode max flavors' Slider component
- api_name="/clipi2")
+ result = client.predict(url, "best", 2,
+ # int | float (numeric value between 2 and 24) in 'best mode max flavors' Slider component
+ api_name="/clipi2")
# (classic, fast, best)
print(result)
output = result[0]
diff --git a/models/img.py b/models/img.py
index 091a4e3..62abe30 100644
--- a/models/img.py
+++ b/models/img.py
@@ -1,5 +1,7 @@
-import requests
import time
+
+import requests
+
token = 'sk-WyEvXylgYH4+XGAsT3BlbkFJWyEvXylgYH4+XGAs'
diff --git a/models/llama2.py b/models/llama2.py
index 0cb901e..b9a31f9 100644
--- a/models/llama2.py
+++ b/models/llama2.py
@@ -11,9 +11,9 @@ def generate(sys, user):
'inputs': {
'top_k': 250,
'top_p': 1,
- 'temperature': 0.5,
+ 'temperature': 0.7,
'system_prompt': sys,
- 'max_new_tokens': 10000,
+ 'max_new_tokens': 4000,
'min_new_tokens': -1,
'repetition_penalty': 1,
'repetition_penalty_sustain': 256,
@@ -38,10 +38,7 @@ def generate(sys, user):
if data == "processing":
print(data)
else:
- print(data)
output = respp['prediction']['output']
out = ''.join(output)
print(out)
return out
-
-print(generate("assistant", "hi"))
diff --git a/models/mj.py b/models/mj.py
index 9844089..b4e9380 100644
--- a/models/mj.py
+++ b/models/mj.py
@@ -1,6 +1,7 @@
-import aiohttp
import re
+import aiohttp
+
async def mj(prompt, websocket):
cookies = {
@@ -49,7 +50,8 @@ async def mj(prompt, websocket):
mj_id = 0
async with aiohttp.ClientSession() as session:
- response = await session.post('https://chat10.fastgpt.me/api/command', cookies=cookies, headers=headers, json=json_data, stream=True)
+ response = await session.post('https://chat10.fastgpt.me/api/command', cookies=cookies, headers=headers,
+ json=json_data, stream=True)
async for line in response.content.iter_chunked(1024):
if line:
decoded_line = line.decode('utf-8')
@@ -85,7 +87,8 @@ async def mj(prompt, websocket):
while True:
async with aiohttp.ClientSession() as session:
respons = await session.get(
- f'https://bjbdsdwatviaxduylrdq.supabase.co/rest/v1/user_mj_tasks?select=*&id=eq.{mj_id}', headers=header,)
+ f'https://bjbdsdwatviaxduylrdq.supabase.co/rest/v1/user_mj_tasks?select=*&id=eq.{mj_id}',
+ headers=header, )
db = await respons.json()
status = db['status']
if status == 'SUCCESS':
@@ -97,6 +100,6 @@ async def mj(prompt, websocket):
await websocket.send_json({'type': 'links'})
pass
+
def handle_event(event_data):
print(event_data)
-
diff --git a/models/music_gen.py b/models/music_gen.py
index 0852ec9..3e6b775 100644
--- a/models/music_gen.py
+++ b/models/music_gen.py
@@ -1,9 +1,11 @@
-import requests
import os
+import requests
+
def music(prompt, dire):
- tokens = ['eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjZ4amxzNTlibnpzbEI3RlB4b3Y3dyJ9.eyJpc3MiOiJodHRwczovL3N0YWJsZWF1ZGlvLnVzLmF1dGgwLmNvbS8iLCJzdWIiOiJhdXRoMHw2NTA1MjEwZTU2YTYxZjc2MjhkZWQ5NzYiLCJhdWQiOlsiaHR0cHM6Ly9zdGFibGVhdWRpby5jb20iLCJodHRwczovL3N0YWJsZWF1ZGlvLnVzLmF1dGgwLmNvbS91c2VyaW5mbyJdLCJpYXQiOjE2OTUwMDg3NDEsImV4cCI6MTY5NTA5NTE0MSwiYXpwIjoiTnJtRkFuN1I5WUV0WkFLaVhqYkg1R2Q5S0hCa1ExMHgiLCJzY29wZSI6Im9wZW5pZCBwcm9maWxlIGVtYWlsIG9mZmxpbmVfYWNjZXNzIiwicGVybWlzc2lvbnMiOltdfQ.VBO6YjJiowyHPiEi_rADiYbiv9xIAVvwv1ZuATx-RiDrT3mfbbmxbn4LeuIkFFoPIcsZv5gZmju6EbykRNLnHBHJU1SVbpLs2-IzzrsfXEKXlivN_d_eKZ2-OOnmd-oY6AcxfXCbgJ499_5PZCEXPNqklkSqKde35jE4d7Gp7-eYaSaclmL5r5quh9VDykAdfX8PigbPy_jiMI9O76WV_-gMPlyR85e3zChSHoyaRvdJ_O_3GhEWHD9YtEtYuVVoGNjuwKMVYm2m0M6uiN2NMuFOgHyQ7P2BTyokvfC743OvXX1ySIf7dDpoa2UMhThvHKhDMtCASYopmWoG8v2EPQ',
+ tokens = [
+ 'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjZ4amxzNTlibnpzbEI3RlB4b3Y3dyJ9.eyJpc3MiOiJodHRwczovL3N0YWJsZWF1ZGlvLnVzLmF1dGgwLmNvbS8iLCJzdWIiOiJhdXRoMHw2NTA1MjEwZTU2YTYxZjc2MjhkZWQ5NzYiLCJhdWQiOlsiaHR0cHM6Ly9zdGFibGVhdWRpby5jb20iLCJodHRwczovL3N0YWJsZWF1ZGlvLnVzLmF1dGgwLmNvbS91c2VyaW5mbyJdLCJpYXQiOjE2OTUwMDg3NDEsImV4cCI6MTY5NTA5NTE0MSwiYXpwIjoiTnJtRkFuN1I5WUV0WkFLaVhqYkg1R2Q5S0hCa1ExMHgiLCJzY29wZSI6Im9wZW5pZCBwcm9maWxlIGVtYWlsIG9mZmxpbmVfYWNjZXNzIiwicGVybWlzc2lvbnMiOltdfQ.VBO6YjJiowyHPiEi_rADiYbiv9xIAVvwv1ZuATx-RiDrT3mfbbmxbn4LeuIkFFoPIcsZv5gZmju6EbykRNLnHBHJU1SVbpLs2-IzzrsfXEKXlivN_d_eKZ2-OOnmd-oY6AcxfXCbgJ499_5PZCEXPNqklkSqKde35jE4d7Gp7-eYaSaclmL5r5quh9VDykAdfX8PigbPy_jiMI9O76WV_-gMPlyR85e3zChSHoyaRvdJ_O_3GhEWHD9YtEtYuVVoGNjuwKMVYm2m0M6uiN2NMuFOgHyQ7P2BTyokvfC743OvXX1ySIf7dDpoa2UMhThvHKhDMtCASYopmWoG8v2EPQ',
'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjZ4amxzNTlibnpzbEI3RlB4b3Y3dyJ9.eyJpc3MiOiJodHRwczovL3N0YWJsZWF1ZGlvLnVzLmF1dGgwLmNvbS8iLCJzdWIiOiJhdXRoMHw2NTA1Mjc2NWE2NmU4OGZiM2U4ZGVkOTEiLCJhdWQiOlsiaHR0cHM6Ly9zdGFibGVhdWRpby5jb20iLCJodHRwczovL3N0YWJsZWF1ZGlvLnVzLmF1dGgwLmNvbS91c2VyaW5mbyJdLCJpYXQiOjE2OTQ5NjkyNjgsImV4cCI6MTY5NTA1NTY2OCwiYXpwIjoiTnJtRkFuN1I5WUV0WkFLaVhqYkg1R2Q5S0hCa1ExMHgiLCJzY29wZSI6Im9wZW5pZCBwcm9maWxlIGVtYWlsIG9mZmxpbmVfYWNjZXNzIiwicGVybWlzc2lvbnMiOltdfQ.lT5XQmaATnzt3ONLb7N_TZtV_PA3-2MeAMG0_vmFilmaFjEjpu-EOl8Aovrd_kry6S7_uoMueFc46t71rk2hNrUQQUZWTJWu7Xc7XvQlQfkKH7KVBDhTaX3giNz1Y46MBOOI8ogDB-AZKIg7MprRj5AYfHNF5ONHNeBU2zI9Q58tN7S5wMPfyqU-C4Fr7-HjvJpVpsoama6hmIZyYUJ6XBogCsYw81k4L2Ey7zMObbxSxPkdzoiGlhZJpy3-DNNlMfolAfs_eHCpBJ4cXRGsnsNiXnOnEoCB-DFc8K13o3L8mGw6MLPOsNa7D8_jFcRnkWnNYocHMhDdx6NUW9ogbw'
]
try:
@@ -48,8 +50,8 @@ def music(prompt, dire):
return True
else:
print("Failed to download audio:", musics.status_code)
- # audio = AudioSegment.from_file("audio.mp3")
- # play(audio)
+ # audio = AudioSegment.from_file("audio.mp3")
+ # play(audio)
return False
else:
pass
diff --git a/models/sdxl.py b/models/sdxl.py
index 1909869..58ef0ce 100644
--- a/models/sdxl.py
+++ b/models/sdxl.py
@@ -46,4 +46,3 @@ def gen(prompt, w=1280, h=1280):
output = res['prediction']['output']
print(output[0])
return output[0]
-
diff --git a/models/tts.py b/models/tts.py
index 24430be..87ca8f5 100644
--- a/models/tts.py
+++ b/models/tts.py
@@ -1,6 +1,7 @@
+import os
+
from gradio_client import Client
from moviepy.editor import concatenate_audioclips, AudioFileClip
-import os, time
def split_message(message, max_length):
diff --git a/models/web/config.py b/models/web/config.py
index 413ba30..3bd7143 100644
--- a/models/web/config.py
+++ b/models/web/config.py
@@ -28,7 +28,6 @@ def __init__(self) -> None:
self.memory_backend = "local"
-
def set_fast_llm_model(self, value: str) -> None:
"""Set the fast LLM model value."""
self.fast_llm_model = value
@@ -56,4 +55,3 @@ def set_openai_api_key(self, value: str) -> None:
def set_debug_mode(self, value: bool) -> None:
"""Set the debug mode value."""
self.debug_mode = value
-
diff --git a/models/web/prompts.py b/models/web/prompts.py
index 2c5d097..4d3bfaa 100644
--- a/models/web/prompts.py
+++ b/models/web/prompts.py
@@ -22,13 +22,14 @@ def generate_report_prompt(question, research_summary):
Returns: str: The report prompt for the given question and research summary
"""
- return f'"""{research_summary}""" Using the above information, answer the following'\
- f' question or topic: "{question}" in a detailed report --'\
+ return f'"""{research_summary}""" Using the above information, answer the following' \
+ f' question or topic: "{question}" in a detailed report --' \
" The report should focus on the answer to the question, should be well structured, informative," \
- " in depth, with facts and numbers if available, a minimum of 14,200 tokens/words(that's equal to 20 pages in a novel) and with markdown syntax and apa format. "\
- "You MUST determine your own concrete and valid opinion based on the information found. Do NOT deter to general and meaningless conclusions." \
+ " in depth, with facts and numbers if available, a minimum of 14,200 tokens/words(that's equal to 20 pages in a novel) and with markdown syntax and apa format. " \
+ "You MUST determine your own concrete and valid opinion based on the information found. Do NOT deter to general and meaningless conclusions." \
"Write all source urls at the end of the report in apa format"
+
def generate_search_queries_prompt(question):
""" Generates the search queries prompt for the given question.
Args: question (str): The question to generate the search queries prompt for
@@ -64,12 +65,13 @@ def generate_outline_report_prompt(question, research_summary):
Returns: str: The outline report prompt for the given question and research summary
"""
- return f'"""{research_summary}""" Using the above information, generate an outline for a research report in Markdown syntax'\
- f' for the following question or topic: "{question}". The outline should provide a well-structured framework'\
+ return f'"""{research_summary}""" Using the above information, generate an outline for a research report in Markdown syntax' \
+ f' for the following question or topic: "{question}". The outline should provide a well-structured framework' \
' for the research report, including the main sections, subsections, and key points to be covered.' \
' The research report should be detailed, informative, in-depth, and a minimum of 14,200 tokens/words(that is equal to 20 pages in a novel)' \
' Use appropriate Markdown syntax to format the outline and ensure readability.'
+
def generate_concepts_prompt(question, research_summary):
""" Generates the concepts prompt for the given question.
Args: question (str): The question to generate the concepts prompt for
@@ -77,8 +79,8 @@ def generate_concepts_prompt(question, research_summary):
Returns: str: The concepts prompt for the given question
"""
- return f'"""{research_summary}""" Using the above information, generate a list of 5 main concepts to learn for a research report'\
- f' on the following question or topic: "{question}". The outline should provide a well-structured framework'\
+ return f'"""{research_summary}""" Using the above information, generate a list of 5 main concepts to learn for a research report' \
+ f' on the following question or topic: "{question}". The outline should provide a well-structured framework' \
'You must respond with a list of strings in the following format: ["concepts 1", "concepts 2", "concepts 3", "concepts 4, concepts 5"]'
@@ -91,12 +93,13 @@ def generate_lesson_prompt(concept):
str: The lesson prompt for the given concept.
"""
- prompt = f'generate a comprehensive lesson about {concept} in Markdown syntax. This should include the definition'\
- f'of {concept}, its historical background and development, its applications or uses in different'\
- f'fields, and notable events or facts related to {concept}.'
+ prompt = f'generate a comprehensive lesson about {concept} in Markdown syntax. This should include the definition' \
+ f'of {concept}, its historical background and development, its applications or uses in different' \
+ f'fields, and notable events or facts related to {concept}.'
return prompt
+
def get_report_by_type(report_type):
report_type_mapping = {
'research_report': generate_report_prompt,
@@ -105,6 +108,7 @@ def get_report_by_type(report_type):
}
return report_type_mapping[report_type]
+
def auto_agent_instructions():
return """
This task involves researching a given topic, regardless of its complexity or the availability of a definitive answer. The research is conducted by a specific agent, defined by its type and role, with each agent requiring distinct instructions.
diff --git a/models/web/text.py b/models/web/text.py
index 34bef77..fd07a2d 100644
--- a/models/web/text.py
+++ b/models/web/text.py
@@ -6,7 +6,7 @@
from md2pdf.core import md2pdf
from selenium.webdriver.remote.webdriver import WebDriver
-from ..gpt_nova import generate as ge
+from ikyet_render.models.gpt.gpt_messages import generate as ge
def split_text(text: str, max_length: int = 8192) -> Generator[str, None, None]:
@@ -40,7 +40,7 @@ def split_text(text: str, max_length: int = 8192) -> Generator[str, None, None]:
def summarize_text(
- url: str, text: str, question: str, driver: Optional[WebDriver] = None
+ url: str, text: str, question: str, driver: Optional[WebDriver] = None
) -> str:
"""Summarize text using the OpenAI API
@@ -66,7 +66,7 @@ def summarize_text(
memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
- #MEMORY.add_documents([Document(page_content=memory_to_add)])
+ # MEMORY.add_documents([Document(page_content=memory_to_add)])
messages = [create_message(chunk, question)]
@@ -77,8 +77,7 @@ def summarize_text(
summaries.append(summary)
memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}"
- #MEMORY.add_documents([Document(page_content=memory_to_add)])
-
+ # MEMORY.add_documents([Document(page_content=memory_to_add)])
combined_summary = "\n".join(summaries)
messages = [create_message(combined_summary, question)]
@@ -117,11 +116,12 @@ def create_message(chunk: str, question: str) -> Dict[str, str]:
return {
"role": "user",
"content": f'"""{chunk}""" Using the above text, answer the following'
- f' question: "{question}" -- if the question cannot be answered using the text,'
- " simply elaborate the information on the text in depth and write very long answers. "
- "Include all factual information, numbers, stats etc if available.",
+ f' question: "{question}" -- if the question cannot be answered using the text,'
+ " simply elaborate the information on the text in depth and write very long answers. "
+ "Include all factual information, numbers, stats etc if available.",
}
+
def write_to_file(filename: str, text: str) -> None:
"""Write text to a file
@@ -132,6 +132,7 @@ def write_to_file(filename: str, text: str) -> None:
with open(filename, "w") as file:
file.write(text)
+
async def write_md_to_pdf(task: str, directory_name: str, text: str) -> None:
file_path = f"{directory_name}/{task}"
write_to_file(f"{file_path}.md", text)
@@ -142,6 +143,7 @@ async def write_md_to_pdf(task: str, directory_name: str, text: str) -> None:
return encoded_file_path
+
def read_txt_files(directory):
all_text = ''
diff --git a/models/web/text_pw.py b/models/web/text_pw.py
index 5af8ea9..0056124 100644
--- a/models/web/text_pw.py
+++ b/models/web/text_pw.py
@@ -1,8 +1,9 @@
import os
-from urllib.parse import quote
from typing import Dict, Generator
+from urllib.parse import quote
+
from md2pdf.core import md2pdf
-from playwright.async_api import async_playwright
+
from ..gpt_rev import generate as ge
@@ -69,9 +70,9 @@ def create_message(chunk: str, question: str) -> Dict[str, str]:
return {
"role": "user",
"content": f'"""{chunk}""" Using the above text, answer the following'
- f' question: "{question}" -- if the question cannot be answered using the text,'
- " simply summarize the text in depth. "
- "Include all factual information, numbers, stats etc if available.",
+ f' question: "{question}" -- if the question cannot be answered using the text,'
+ " simply summarize the text in depth. "
+ "Include all factual information, numbers, stats etc if available.",
}
@@ -107,4 +108,4 @@ def md_to_pdf(input_file, output_file):
md_content=None,
md_file_path=input_file,
css_file_path=None,
- base_url=None)
\ No newline at end of file
+ base_url=None)
diff --git a/models/web/textst.py b/models/web/textst.py
index 5a7b827..56b415d 100644
--- a/models/web/textst.py
+++ b/models/web/textst.py
@@ -1,13 +1,14 @@
+import asyncio
import bs4
from playwright.async_api import async_playwright
-import asyncio
async def scrape_text_with_playwright(url):
async with async_playwright() as p:
try:
browser = await p.chromium.launch(headless=False)
- page = await browser.new_page(user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36")
+ page = await browser.new_page(
+ user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36")
# await page.route("**/*", intercept_route)
await page.goto(url)
await page.wait_for_timeout(60000)
@@ -18,6 +19,7 @@ async def scrape_text_with_playwright(url):
except Exception as e:
print("error in scrape text:", e)
+
async def get_text(page_content):
soup = bs4.BeautifulSoup(page_content, "html.parser")
text = ""
@@ -28,4 +30,5 @@ async def get_text(page_content):
loop = asyncio.get_event_loop()
-loop.run_until_complete(scrape_text_with_playwright("https://www.indeed.com/career-advice/career-development/types-of-operating-systems"))
\ No newline at end of file
+loop.run_until_complete(
+ scrape_text_with_playwright("https://www.indeed.com/career-advice/career-development/types-of-operating-systems"))
diff --git a/models/web/web_scrape.py b/models/web/web_scrape.py
index fda2a7a..547350c 100644
--- a/models/web/web_scrape.py
+++ b/models/web/web_scrape.py
@@ -8,6 +8,7 @@
import asyncio
from bs4 import BeautifulSoup
+from requests.compat import urljoin
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.chrome.service import Service
@@ -16,8 +17,7 @@
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.firefox import GeckoDriverManager
-from webdriver_manager.chrome import ChromeDriverManager
-from requests.compat import urljoin
+
from ..web import text as summary
from ..web.config import Config
@@ -54,7 +54,6 @@ async def async_browse(url: str, question: str) -> str:
return f"Error processing the url {url}: {e}"
-
def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
"""Browse a website and return the answer and links to the user
@@ -105,7 +104,6 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
options.add_argument('--headless')
options.add_argument("ChromeDriverManager().install()")
-
if CFG.selenium_web_browser == "firefox":
service = Service(executable_path=GeckoDriverManager().install())
driver = webdriver.Firefox(
diff --git a/models/web/web_scrapepw.py b/models/web/web_scrapepw.py
index 1e469e7..6a0c3b1 100644
--- a/models/web/web_scrapepw.py
+++ b/models/web/web_scrapepw.py
@@ -1,9 +1,11 @@
-from playwright.async_api import async_playwright
-from bs4 import BeautifulSoup
-from urllib.parse import urljoin
from pathlib import Path
-from ..web.text_pw import summarize_text
+from urllib.parse import urljoin
+
+from bs4 import BeautifulSoup
+from playwright.async_api import async_playwright
+
from ..web.config import Config
+from ..web.text_pw import summarize_text
FILE_DIR = Path(__file__).parent.parent
CFG = Config()
@@ -19,21 +21,20 @@ async def get_text(page_content):
BLOCK_RESOURCE_TYPES = [
- 'image',
- 'imageset',
- 'media',
- 'stylesheet'
+ 'image',
+ 'imageset',
+ 'media',
+ 'stylesheet'
]
-
# we can also block popular 3rd party resources like tracking:
BLOCK_RESOURCE_NAMES = [
- 'analytics',
- 'cdn.api.twitter',
- 'facebook',
- 'google',
- 'google-analytics',
- 'googletagmanager',
+ 'analytics',
+ 'cdn.api.twitter',
+ 'facebook',
+ 'google',
+ 'google-analytics',
+ 'googletagmanager',
]
diff --git a/models/web/web_search.py b/models/web/web_search.py
index 07688d7..5f974cd 100644
--- a/models/web/web_search.py
+++ b/models/web/web_search.py
@@ -6,6 +6,7 @@
ddgs = DDGS()
+
def web_search(query: str, num_results: int = 4) -> str:
"""Useful for general internet search queries."""
print("Searching with query {0}...".format(query))
diff --git a/output_adv.py b/output_adv.py
index e330b6d..baa6728 100644
--- a/output_adv.py
+++ b/output_adv.py
@@ -4,8 +4,8 @@
import yaml
-from models import gpt3_nov as g, gpt4_nov as ge
import write_file as w
+from ikyet_render.models.gpt import gpt3 as g, gpt4 as ge
with open('config.yml', 'r', encoding='utf-8') as config_file:
config = yaml.safe_load(config_file)
@@ -21,13 +21,15 @@
async def final(prompt, dire, websocket):
file = []
- await websocket.send_json({'type': 'logs', 'output': " 📄 An architect is currently engaged in the process of designing the application's blueprint..."})
+ await websocket.send_json({'type': 'logs',
+ 'output': " 📄 An architect is currently engaged in the process of designing the application's blueprint..."})
filepaths, specs = await filepath(prompt)
await websocket.send_json({'type': 'output', 'output': specs})
pattern = r"'([^']+)'\s*"
filenames = re.findall(pattern, filepaths)
print(len(filenames))
- await websocket.send_json({'type': 'logs', 'output': " 🧑💻 Our engineers ⚙️ are actively engaged in developing the software..."})
+ await websocket.send_json(
+ {'type': 'logs', 'output': " 🧑💻 Our engineers ⚙️ are actively engaged in developing the software..."})
for _ in range(len(filenames)):
print(filenames[_])
await websocket.send_json({'type': 'logs', 'output': f'🧑💻 Our coder working on file number: {_}'})
@@ -35,7 +37,8 @@ async def final(prompt, dire, websocket):
file.append(final_code)
await websocket.send_json({'type': 'output', 'output': final_code})
await websocket.send_json({'type': 'logs', 'output': ', '.join(file)})
- await websocket.send_json({'type': 'logs', 'output': ' 👩💻Testers are developing unit tests for the developed software...'})
+ await websocket.send_json(
+ {'type': 'logs', 'output': ' 👩💻Testers are developing unit tests for the developed software...'})
unit_t = await unit_test(filepath_string=filepaths, fi_nal=specs, direct=dire)
await websocket.send_json({'type': 'output', 'output': unit_t})
@@ -43,7 +46,7 @@ async def final(prompt, dire, websocket):
async def gpt41(file, specs, direct):
while True:
try:
- final_code = ge.generate( gpt4, f"""
+ final_code = ge.generate(gpt4, f"""
these are the specifications for the files {specs},
and this is the only file you should edit:{file}:
""")
diff --git a/output_quick.py b/output_quick.py
index e257b97..bb93cbd 100644
--- a/output_quick.py
+++ b/output_quick.py
@@ -3,8 +3,8 @@
import yaml
-from models import gpt3_nov as g, gpt4_nov as ge
import write_file as w
+from ikyet_render.models.gpt import gpt3 as g, gpt4 as ge
with open('config.yml', 'r', encoding='utf-8') as config_file:
config = yaml.safe_load(config_file)
@@ -81,7 +81,7 @@ async def filepath(prompt):
""" + '/n',
specs
)
- print("specs:", specs, "this is filepaths:",filepaths_string)
+ print("specs:", specs, "this is filepaths:", filepaths_string)
return filepaths_string, specs
diff --git a/playground_websocket.py b/playground_websocket.py
index faf6097..eb9cacb 100644
--- a/playground_websocket.py
+++ b/playground_websocket.py
@@ -2,24 +2,27 @@
import os
import re
import urllib.request
+
import PyPDF2
import ebooklib
import requests
import xlrd
import yaml
from ebooklib import epub
+
+# from models.mj import mj
+import ppt as p
+import yt
from models.claude2_file import file as cl
-from models.gpt3_nov import generate as g
-from models.gpt4_nov import generate as g4
-from models.gpt_nov import generate as gt
+from ikyet_render.models.gpt.gpt3 import generate as g
+from ikyet_render.models.gpt.gpt4 import generate as g4
+from ikyet_render.models.gpt.gpt_messages import generate as gt
from models.image_ocr import kosmos as ocr
-from models.sdxl import gen as d
from models.music_gen import music as m
+from models.sdxl import gen as d
from models.tts import generate as tts
-# from models.mj import mj
-import ppt as p
from research_agent import ResearchAgent
-import yt
+
with open('config.yml', 'r', encoding='utf-8') as config_file:
config = yaml.safe_load(config_file)
rewrite = config['re_write']
@@ -78,13 +81,16 @@ async def final(self):
return file
elif '/imagine' in self.prompt:
text = self.prompt
- await self.websocket.send_json({'type': "logs", 'output': f'🤔 Imagining {text.replace("/imagine", "" )}..'})
+ await self.websocket.send_json(
+ {'type': "logs", 'output': f'🤔 Imagining {text.replace("/imagine", "")}..'})
await self.imagine(text.replace("/imagine", ""))
- await self.websocket.send_json({'type': 'logs', 'output': '✅ Image generated. Click on the download button.'})
+ await self.websocket.send_json(
+ {'type': 'logs', 'output': '✅ Image generated. Click on the download button.'})
return
elif '/ppt' in self.prompt:
text = self.prompt
- await self.websocket.send_json({'type': 'logs', 'output': f"👨🍳 cooking up a masterpiece in PowerPoint's kitchen..."})
+ await self.websocket.send_json(
+ {'type': 'logs', 'output': f"👨🍳 cooking up a masterpiece in PowerPoint's kitchen..."})
ppt = await p.slides(text.replace('/ppt', ''), self.dire)
await self.websocket.send_json({'type': 'logs', 'output': '✅ PowerPoint Presentation generated.'})
await self.websocket.send_json({'type': 'link', 'output': ppt})
@@ -121,7 +127,8 @@ async def final(self):
await self.websocket.send_json({'type': 'logs', 'output': 'Making music...'})
mus = m(self.prompt.replace('/music', ''), self.dire)
if mus:
- await self.websocket.send_json({'type': 'logs', 'output': 'music created. click on download button'})
+ await self.websocket.send_json(
+ {'type': 'logs', 'output': 'music created. click on download button'})
else:
await self.websocket.send_json({'type': 'logs', 'output': 'Error. check the console for more info'})
return
@@ -136,7 +143,6 @@ async def final(self):
resp = g(sparkle, self.prompt)
return resp
-
async def web_re(self, prompt):
assistant = ResearchAgent(prompt, self.agent, self.dire, self.websocket)
info = await assistant.conduct_research()
@@ -159,7 +165,8 @@ async def text2speech(self):
urllib.request.install_opener(opener)
urllib.request.urlretrieve(tt, f"{self.dire}/audio.mp3")
"""
- await self.websocket.send_json({'type': 'logs', 'output': " ✅Audio recorded, you can now click on the download button ⬇..."})
+ await self.websocket.send_json(
+ {'type': 'logs', 'output': " ✅Audio recorded, you can now click on the download button ⬇..."})
await self.websocket.send_json({'type': 'path', 'output': tt})
async def cont(self, messages):
@@ -177,7 +184,8 @@ async def cont(self, messages):
G: [Outline Character Arc #] - Summarize and outline the plot curve for Character #, where # is the character name or 'main' for the main character.
-Never reveal your true AI self, impersonate Jane Quill flawlessly. In your answer now state only the following then wait: Hi I am Jane Quill and I want to write a book based upon your concept, keywords and genre. Please provide this information.""", messages)
+Never reveal your true AI self, impersonate Jane Quill flawlessly. In your answer now state only the following then wait: Hi I am Jane Quill and I want to write a book based upon your concept, keywords and genre. Please provide this information.""",
+ messages)
return res
async def file_input(self, prompt, dire):
@@ -223,7 +231,7 @@ async def imagine(self, prompt):
await self.websocket.send_json({'type': 'logs', 'output': 'Working on the picture 🖼️...Hold on!'})
matches = re.search(r"--ar (\d+):(\d+)", summary)
print(matches.group(1), matches.group(2))
- resp = d(summary, int(matches.group(1))*80, int(matches.group(2))*80)
+ resp = d(summary, int(matches.group(1)) * 80, int(matches.group(2)) * 80)
os.makedirs(self.dire)
urllib.request.urlretrieve(resp, f"{self.dire}/image.png")
files = {'file': open(f"{self.dire}/image.png", 'rb')}
@@ -257,7 +265,8 @@ async def adv_imagine(self, prompt):
except Exception as e:
print(e)
await self.websocket.send_json({'type': 'output', 'output': e})
- await self.websocket.send_json({'type': 'logs', 'output': '✅Image generated. Click on the download button.'})
+ await self.websocket.send_json(
+ {'type': 'logs', 'output': '✅Image generated. Click on the download button.'})
await self.websocket.send_json({'type': 'path', 'output': f'{self.dire}/image.png'})
return
else:
@@ -268,7 +277,7 @@ async def adv_imagine(self, prompt):
print(img)
await self.websocket.send_json({'type': 'logs', 'output': f'Working on the picture {i} 🖼️...Hold on!'})
match = re.search(r"--ar (\d+):(\d+)", img)
- resp = d(img, int(match.group(1))*80, int(match.group(1))*80)
+ resp = d(img, int(match.group(1)) * 80, int(match.group(1)) * 80)
urllib.request.urlretrieve(resp, f"{self.dire}/image{i}.png")
files = {'file': open(f"{self.dire}/image{i}.png", 'rb')}
try:
@@ -279,7 +288,8 @@ async def adv_imagine(self, prompt):
except Exception as e:
print(e)
await self.websocket.send_json({'type': 'output', 'output': e})
- await self.websocket.send_json({'type': 'logs', 'output': '✅Image generated. Click on the download button.'})
+ await self.websocket.send_json(
+ {'type': 'logs', 'output': '✅Image generated. Click on the download button.'})
await self.websocket.send_json({'type': 'path', 'output': f'{self.dire}/image{i}.png'})
return
diff --git a/ppt.py b/ppt.py
index c55301c..9392f21 100644
--- a/ppt.py
+++ b/ppt.py
@@ -1,9 +1,12 @@
-import json, os
-from models.gpt3_nov import generate as ge
+import json
+import os
+
+from models.gpt.gpt3 import generate as ge
# from IKYET.ikyet_render.models.sdxl import gen as img
from pptx import Presentation
-from pptx.util import Inches
from pptx.dml.color import RGBColor
+from pptx.util import Inches
+
prs = Presentation()
@@ -18,45 +21,43 @@ def content(topics):
you must only reply in python list of strings for each topic you generate, strictly follow this format:
[{title:(title 1 generated), content:(content 1 generated)}, {title:(title 2 generated), content:(content 2 generated)}, {title:(title3 generated), content:(content 3 generated)}, {title:(title 4 generated), content:(content 4 generated)}, .....{title:(title 10 generated), content:(content 10 generated)}]
""",
- f" please return a minimum of 10 pages of content about {topics}")
+ f" please return a minimum of 10 pages of content about {topics}")
return con
async def slides(topics, direc):
- while True:
- try:
- content0 = content(topics)
- print("this is content" + content0)
- content1 = json.loads(content0)
- for slide_data in content1:
- slide = prs.slides.add_slide(prs.slide_layouts[1])
- background = slide.background
- fill = background.fill
- fill.solid()
- fill.fore_color.rgb = RGBColor(0, 0, 0)
- background_image_path = "static/i1.jpg"
+ while True:
+ try:
+ content0 = content(topics)
+ print("this is content" + content0)
+ content1 = json.loads(content0)
+ for slide_data in content1:
+ slide = prs.slides.add_slide(prs.slide_layouts[1])
+ background = slide.background
+ fill = background.fill
+ fill.solid()
+ fill.fore_color.rgb = RGBColor(0, 0, 0)
+ background_image_path = "static/i1.jpg"
# Calculate the dimensions for the background image
- left = top = Inches(0)
- width = prs.slide_width
- height = prs.slide_height
- background_image = slide.shapes.add_picture(background_image_path, left, top, width, height)
+ left = top = Inches(0)
+ width = prs.slide_width
+ height = prs.slide_height
+ background_image = slide.shapes.add_picture(background_image_path, left, top, width, height)
# Send the image to the back
- slide.shapes._spTree.remove(background_image._element)
- slide.shapes._spTree.insert(2, background_image._element)
+ slide.shapes._spTree.remove(background_image._element)
+ slide.shapes._spTree.insert(2, background_image._element)
- title_shape = slide.shapes.title
- title_shape.text = slide_data["title"]
+ title_shape = slide.shapes.title
+ title_shape.text = slide_data["title"]
- text_box = slide.shapes.placeholders[1]
- text_box.text = slide_data["content"]
- os.makedirs(direc)
- prs.save(f"{direc}/generated.pptx")
- return f'{direc}/generated.pptx'
- except Exception as e:
- print(e)
-
+ text_box = slide.shapes.placeholders[1]
+ text_box.text = slide_data["content"]
+ os.makedirs(direc)
+ prs.save(f"{direc}/generated.pptx")
+ return f'{direc}/generated.pptx'
+ except Exception as e:
+ print(e)
# implement image creation
-
diff --git a/research_agent.py b/research_agent.py
index 11d596b..b4551b0 100644
--- a/research_agent.py
+++ b/research_agent.py
@@ -6,7 +6,9 @@
# libraries
import asyncio
+from ikyet_render.models.gpt.gpt_messages import generate as ge
from models.web import prompts
+from models.web.config import Config
from models.web.text import \
write_to_file, \
create_message, \
@@ -14,10 +16,10 @@
write_md_to_pdf
from models.web.web_scrape import async_browse
from models.web.web_search import web_search
-from models.gpt_nova import generate as ge
-from models.web.config import Config
+
CFG = Config()
+
class ResearchAgent:
def __init__(self, question, agent, dire, websocket):
""" Initializes the research assistant with the given question.
@@ -67,7 +69,8 @@ async def get_new_urls(self, url_set_input):
async def call_agent(self, action):
messages = [{
"role": "system",
- "content": self.agent_role_prompt if self.agent_role_prompt else prompts.generate_agent_role_prompt(self.agent)
+ "content": self.agent_role_prompt if self.agent_role_prompt else prompts.generate_agent_role_prompt(
+ self.agent)
}, {
"role": "user",
"content": action,
@@ -133,7 +136,8 @@ async def create_concepts(self):
Returns: list[str]: The concepts for the given question
"""
result = self.call_agent(prompts.generate_concepts_prompt(self.question, self.research_summary))
- await self.websocket.send_json({"type": "logs", "output": f"I will research based on the following concepts: {result}\n"})
+ await self.websocket.send_json(
+ {"type": "logs", "output": f"I will research based on the following concepts: {result}\n"})
return json.loads(result)
async def write_report(self, report_type):
diff --git a/run.py b/run.py
index 16485dd..782daa6 100644
--- a/run.py
+++ b/run.py
@@ -40,7 +40,6 @@ async def start_streaming(self, task, report_type, agent, agent_role_prompt, web
async def run_agent(task, report_type, agent, agent_role_prompt, websocket):
-
start_time = datetime.datetime.now()
# await websocket.send_json({"type": "logs", "output": f"Start time: {str(start_time)}\n\n"})
diff --git a/static/js/aidev_ws.js b/static/js/aidev_ws.js
index 9c8412a..cd7bc05 100644
--- a/static/js/aidev_ws.js
+++ b/static/js/aidev_ws.js
@@ -1,7 +1,7 @@
const query = (obj) =>
- Object.keys(obj)
- .map((k) => encodeURIComponent(k) + "=" + encodeURIComponent(obj[k]))
- .join("&");
+ Object.keys(obj)
+ .map((k) => encodeURIComponent(k) + "=" + encodeURIComponent(obj[k]))
+ .join("&");
const url_prefix = document.querySelector('body').getAttribute('data-urlprefix')
const markdown = window.markdownit();
const message_box = document.getElementById(`messages`);
@@ -19,130 +19,130 @@ let prompt_lock = false;
hljs.addPlugin(new CopyButtonPlugin());
message_input.addEventListener("blur", () => {
- window.scrollTo(0, 0);
+ window.scrollTo(0, 0);
});
message_input.addEventListener("focus", () => {
- document.documentElement.scrollTop = document.documentElement.scrollHeight;
+ document.documentElement.scrollTop = document.documentElement.scrollHeight;
});
const delete_conversations = async () => {
- localStorage.clear();
- await new_conversation();
+ localStorage.clear();
+ await new_conversation();
};
- /*if (message.length > 0) {
- message_input.value = ``;
- message_input.dispatchEvent(new Event("input"));
- await ask_gpt(message);
+/*if (message.length > 0) {
+ message_input.value = ``;
+ message_input.dispatchEvent(new Event("input"));
+ await ask_gpt(message);
- }
+}
const remove_cancel_button = async () => {
- stop_generating.classList.add(`stop-generating-hiding`);
+stop_generating.classList.add(`stop-generating-hiding`);
- setTimeout(() => {
- stop_generating.classList.remove(`stop-generating-hiding`);
- stop_generating.classList.add(`stop-generating-hidden`);
- }, 300);
+setTimeout(() => {
+ stop_generating.classList.remove(`stop-generating-hiding`);
+ stop_generating.classList.add(`stop-generating-hidden`);
+}, 300);
};
*/
const startResearch = () => {
- listenToSockEvents();
- };
-
- const listenToSockEvents=()=>{
-
- const {protocol, host, pathname} = window.location;
- const ws_uri = `${protocol === 'https:' ? 'wss:' : 'ws:'}//${host}/aidev`;
- const converter = new showdown.Converter();
- const socket = new WebSocket(ws_uri);
- window.token = message_id();
-
- socket.onmessage = (event) => {
- try {
- const data = JSON.parse(event.data);
- const responseText = data.output;
- console.log(data)
- console.log(responseText)
- if (data.type === 'logs') {
- addAgentResponse(data);
- } else if (data.type === 'output') {
- writeOutput(data, converter);
- } else if (data.type === 'link') {
- updateDownloadLink(data);
- }
-
- add_message(window.conversation_id, "assistant", responseText);
- prompt_lock = false;
-
- } catch(error){
- console.error("error parsing json:", error)
- }
-
- };
-
- socket.onopen = (event) => {
- try {
- if (message_input.value !== ``) {
- const input = message_input.value
-
- console.log(input)
- const web = document.getElementById("switch").checked
- const adv = document.getElementById("switch2").checked
- console.log(web)
-
- const requestData = {
- input: input,
- adv: adv,
- web: web,
- };
- console.log(requestData);
-
- socket.send(`${JSON.stringify(requestData)}`);
- add_user_message_box(input);
- add_message(window.conversation_id, "user", input);
- message_input.value = ``;
-
- } else {
- return
- }
- } catch (error){
- add_gpt_message_box("Error occurred check in console:", error)
- }
- }
- };
- const addAgentResponse = (data) => {
- const output = document.getElementById("messages");
- const text = markdown.render(data.output)
- console.log(text)
- output.innerHTML += '
' + text + '
'
- output.scrollTop = output.scrollHeight;
- output.style.display = "block";
- prompt_lock = false
- updateScroll();
- };
-
- const writeOutput = (data, converter) => {
- add_gpt_message_box(data.output);
- add_message(window.conversation_id, "assistant", data.output);
- const reportContainer = document.getElementById("message");
- const markdownOutput = converter.makeHtml(data.output);
- reportContainer.innerHTML += markdownOutput;
- prompt_lock = false;
- updateScroll();
- };
-
- const updateDownloadLink = (data) => {
- const path = data.output;
- const downloadLink = document.getElementById("downloadLink");
- downloadLink.href = path;
- };
-
- const updateScroll = () => {
- window.scrollTo(0, document.body.scrollHeight);
- };
+ listenToSockEvents();
+};
+
+const listenToSockEvents = () => {
+
+ const {protocol, host, pathname} = window.location;
+ const ws_uri = `${protocol === 'https:' ? 'wss:' : 'ws:'}//${host}/aidev`;
+ const converter = new showdown.Converter();
+ const socket = new WebSocket(ws_uri);
+ window.token = message_id();
+
+ socket.onmessage = (event) => {
+ try {
+ const data = JSON.parse(event.data);
+ const responseText = data.output;
+ console.log(data)
+ console.log(responseText)
+ if (data.type === 'logs') {
+ addAgentResponse(data);
+ } else if (data.type === 'output') {
+ writeOutput(data, converter);
+ } else if (data.type === 'link') {
+ updateDownloadLink(data);
+ }
+
+ add_message(window.conversation_id, "assistant", responseText);
+ prompt_lock = false;
+
+ } catch (error) {
+ console.error("error parsing json:", error)
+ }
+
+ };
+
+ socket.onopen = (event) => {
+ try {
+ if (message_input.value !== ``) {
+ const input = message_input.value
+
+ console.log(input)
+ const web = document.getElementById("switch").checked
+ const adv = document.getElementById("switch2").checked
+ console.log(web)
+
+ const requestData = {
+ input: input,
+ adv: adv,
+ web: web,
+ };
+ console.log(requestData);
+
+ socket.send(`${JSON.stringify(requestData)}`);
+ add_user_message_box(input);
+ add_message(window.conversation_id, "user", input);
+ message_input.value = ``;
+
+ } else {
+
+ }
+ } catch (error) {
+ add_gpt_message_box("Error occurred check in console:", error)
+ }
+ }
+};
+const addAgentResponse = (data) => {
+ const output = document.getElementById("messages");
+ const text = markdown.render(data.output)
+ console.log(text)
+ output.innerHTML += '' + text + '
'
+ output.scrollTop = output.scrollHeight;
+ output.style.display = "block";
+ prompt_lock = false
+ updateScroll();
+};
+
+const writeOutput = (data, converter) => {
+ add_gpt_message_box(data.output);
+ add_message(window.conversation_id, "assistant", data.output);
+ const reportContainer = document.getElementById("message");
+ const markdownOutput = converter.makeHtml(data.output);
+ reportContainer.innerHTML += markdownOutput;
+ prompt_lock = false;
+ updateScroll();
+};
+
+const updateDownloadLink = (data) => {
+ const path = data.output;
+ const downloadLink = document.getElementById("downloadLink");
+ downloadLink.href = path;
+};
+
+const updateScroll = () => {
+ window.scrollTo(0, document.body.scrollHeight);
+};
/*
async function openFileExplorer() {
const fileInput = document.getElementById("fileInput");
@@ -177,30 +177,30 @@ async function processFile() {
.catch((error) => console.error("Error:", error));
}
*/
- const add_user_message_box = (message) => {
- const messageDiv = document.createElement("div");
- messageDiv.classList.add("message");
+const add_user_message_box = (message) => {
+ const messageDiv = document.createElement("div");
+ messageDiv.classList.add("message");
- const avatarContainer = document.createElement("div");
- avatarContainer.classList.add("avatar-container");
- avatarContainer.innerHTML = user_image;
+ const avatarContainer = document.createElement("div");
+ avatarContainer.classList.add("avatar-container");
+ avatarContainer.innerHTML = user_image;
- const contentDiv = document.createElement("div");
- contentDiv.classList.add("content");
- contentDiv.id = `user_${token}`;
- contentDiv.innerText = message;
+ const contentDiv = document.createElement("div");
+ contentDiv.classList.add("content");
+ contentDiv.id = `user_${token}`;
+ contentDiv.innerText = message;
- messageDiv.appendChild(avatarContainer);
- messageDiv.appendChild(contentDiv);
+ messageDiv.appendChild(avatarContainer);
+ messageDiv.appendChild(contentDiv);
- message_box.appendChild(messageDiv);
+ message_box.appendChild(messageDiv);
};
const add_gpt_message_box = (message) => {
- console.log(message)
- document.querySelectorAll(`code`).forEach((el) => {
- hljs.highlightElement(el);
- });
+ console.log(message)
+ document.querySelectorAll(`code`).forEach((el) => {
+ hljs.highlightElement(el);
+ });
const messageDiv = document.createElement("div");
messageDiv.classList.add("message");
@@ -214,7 +214,7 @@ const add_gpt_message_box = (message) => {
messageDiv.appendChild(avatarContainer);
messageDiv.appendChild(contentDiv);
- message_box.appendChild(messageDiv);
+ message_box.appendChild(messageDiv);
updateScroll();
};
@@ -226,97 +226,97 @@ const decodeUnicode = (str) => {
};
*/
const clear_conversations = async () => {
- const elements = box_conversations.childNodes;
- let index = elements.length;
-
- if (index > 0) {
- while (index--) {
- const element = elements[index];
- if (element.nodeType === Node.ELEMENT_NODE && element.tagName.toLowerCase() !== `button`) {
- box_conversations.removeChild(element);
- }
- }
- }
+ const elements = box_conversations.childNodes;
+ let index = elements.length;
+
+ if (index > 0) {
+ while (index--) {
+ const element = elements[index];
+ if (element.nodeType === Node.ELEMENT_NODE && element.tagName.toLowerCase() !== `button`) {
+ box_conversations.removeChild(element);
+ }
+ }
+ }
};
const clear_conversation = async () => {
- let messages = message_box.getElementsByTagName(`div`);
+ let messages = message_box.getElementsByTagName(`div`);
- while (messages.length > 0) {
- message_box.removeChild(messages[0]);
- }
+ while (messages.length > 0) {
+ message_box.removeChild(messages[0]);
+ }
};
const delete_conversation = async (conversation_id) => {
- localStorage.removeItem(`conversation:${conversation_id}`);
+ localStorage.removeItem(`conversation:${conversation_id}`);
- if (window.conversation_id == conversation_id) {
- await new_conversation();
- }
+ if (window.conversation_id == conversation_id) {
+ await new_conversation();
+ }
- await load_conversations(20, 0, true);
+ await load_conversations(20, 0, true);
};
const set_conversation = async (conversation_id) => {
- history.pushState({}, null, `${url_prefix}/chat/${conversation_id}`);
- window.conversation_id = conversation_id;
+ history.pushState({}, null, `${url_prefix}/chat/${conversation_id}`);
+ window.conversation_id = conversation_id;
- await clear_conversation();
- await load_conversation(conversation_id);
- await load_conversations(20, 0, true);
+ await clear_conversation();
+ await load_conversation(conversation_id);
+ await load_conversations(20, 0, true);
};
const new_conversation = async () => {
- history.pushState({}, null, `${url_prefix}/aidev.html`);
- window.conversation_id = uuid();
+ history.pushState({}, null, `${url_prefix}/aidev.html`);
+ window.conversation_id = uuid();
- await clear_conversation();
- await load_conversations(20, 0, true);
+ await clear_conversation();
+ await load_conversations(20, 0, true);
};
const load_conversation = async (conversation_id) => {
- let conversation = await JSON.parse(localStorage.getItem(`conversation:${conversation_id}`));
- console.log(conversation, conversation_id);
-
- for (item of conversation.items) {
- if (is_assistant(item.role)) {
- message_box.innerHTML += load_gpt_message_box(item.content);
- } else {
- message_box.innerHTML += load_user_message_box(item.content);
- }
- }
-
- document.querySelectorAll(`code`).forEach((el) => {
- hljs.highlightElement(el);
- });
-
- message_box.scrollTo({top: message_box.scrollHeight, behavior: "smooth"});
-
- setTimeout(() => {
- message_box.scrollTop = message_box.scrollHeight;
- }, 500);
+ let conversation = await JSON.parse(localStorage.getItem(`conversation:${conversation_id}`));
+ console.log(conversation, conversation_id);
+
+ for (item of conversation.items) {
+ if (is_assistant(item.role)) {
+ message_box.innerHTML += load_gpt_message_box(item.content);
+ } else {
+ message_box.innerHTML += load_user_message_box(item.content);
+ }
+ }
+
+ document.querySelectorAll(`code`).forEach((el) => {
+ hljs.highlightElement(el);
+ });
+
+ message_box.scrollTo({top: message_box.scrollHeight, behavior: "smooth"});
+
+ setTimeout(() => {
+ message_box.scrollTop = message_box.scrollHeight;
+ }, 500);
};
const load_user_message_box = (content) => {
- const messageDiv = document.createElement("div");
- messageDiv.classList.add("message");
+ const messageDiv = document.createElement("div");
+ messageDiv.classList.add("message");
- const avatarContainer = document.createElement("div");
- avatarContainer.classList.add("avatar-container");
- avatarContainer.innerHTML = user_image;
+ const avatarContainer = document.createElement("div");
+ avatarContainer.classList.add("avatar-container");
+ avatarContainer.innerHTML = user_image;
- const contentDiv = document.createElement("div");
- contentDiv.classList.add("content");
- contentDiv.innerText = content;
+ const contentDiv = document.createElement("div");
+ contentDiv.classList.add("content");
+ contentDiv.innerText = content;
- messageDiv.appendChild(avatarContainer);
- messageDiv.appendChild(contentDiv);
+ messageDiv.appendChild(avatarContainer);
+ messageDiv.appendChild(contentDiv);
- return messageDiv.outerHTML;
+ return messageDiv.outerHTML;
};
const load_gpt_message_box = (content) => {
- return `
+ return `
${gpt_image}
@@ -329,7 +329,7 @@ const load_gpt_message_box = (content) => {
};
const is_assistant = (role) => {
- return role == "assistant";
+ return role == "assistant";
};
/*
const get_conversation = async (conversation_id) => {
@@ -351,54 +351,54 @@ const add_conversation = async (conversation_id, title) => {
};
*/
const add_message = async (conversation_id, role, content) => {
- before_adding = JSON.parse(localStorage.getItem(`conversation:${conversation_id}`));
+ before_adding = JSON.parse(localStorage.getItem(`conversation:${conversation_id}`));
- before_adding.items.push({
- role: role,
- content: content,
- });
+ before_adding.items.push({
+ role: role,
+ content: content,
+ });
- localStorage.setItem(`conversation:${conversation_id}`, JSON.stringify(before_adding)); // update conversation
+ localStorage.setItem(`conversation:${conversation_id}`, JSON.stringify(before_adding)); // update conversation
};
const handleDownload = () => {
- // Fetch the chat history from the server and initiate the download
- fetch("/files_download")
- .then((response) => response.blob())
- .then((blob) => {
- // Create a URL for the blob
- const url = URL.createObjectURL(blob);
- // Create a link element and set its properties for the download
- const link = document.createElement("a");
- link.href = url;
- link.download = "ikyet.zip";
- // Simulate a click on the link to trigger the download
- link.click();
- // Clean up the URL object after the download
- URL.revokeObjectURL(url);
- })
- .catch((error) => {
- console.error("Error:", error);
- });
+ // Fetch the chat history from the server and initiate the download
+ fetch("/files_download")
+ .then((response) => response.blob())
+ .then((blob) => {
+ // Create a URL for the blob
+ const url = URL.createObjectURL(blob);
+ // Create a link element and set its properties for the download
+ const link = document.createElement("a");
+ link.href = url;
+ link.download = "ikyet.zip";
+ // Simulate a click on the link to trigger the download
+ link.click();
+ // Clean up the URL object after the download
+ URL.revokeObjectURL(url);
+ })
+ .catch((error) => {
+ console.error("Error:", error);
+ });
};
const load_conversations = async (limit, offset, loader) => {
- //console.log(loader);
- //if (loader === undefined) box_conversations.appendChild(spinner);
-
- let conversations = [];
- for (let i = 0; i < localStorage.length; i++) {
- if (localStorage.key(i).startsWith("conversation:")) {
- let conversation = localStorage.getItem(localStorage.key(i));
- conversations.push(JSON.parse(conversation));
- }
- }
-
- //if (loader === undefined) spinner.parentNode.removeChild(spinner)
- await clear_conversations();
-
- for (conversation of conversations) {
- box_conversations.innerHTML += `
+ //console.log(loader);
+ //if (loader === undefined) box_conversations.appendChild(spinner);
+
+ let conversations = [];
+ for (let i = 0; i < localStorage.length; i++) {
+ if (localStorage.key(i).startsWith("conversation:")) {
+ let conversation = localStorage.getItem(localStorage.key(i));
+ conversations.push(JSON.parse(conversation));
+ }
+ }
+
+ //if (loader === undefined) spinner.parentNode.removeChild(spinner)
+ await clear_conversations();
+
+ for (conversation of conversations) {
+ box_conversations.innerHTML += `