-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
116 lines (94 loc) · 3.67 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import os
import sys
import threading
from openai import OpenAI
from dotenv import load_dotenv
from knowledge_integration import real_time_update
from language_proficiency import understand_language_syntax
from algorithm_generation import generate_algorithm
from adaptation_merging import merge_languages
from prompt_generation import generate_initial_prompt, refine_prompt
from complex_solving import solve_complex_problem
# Load environment variables from .env file
load_dotenv()
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
def interactive_terminal():
print("Interactive OpenAI Terminal. Type 'exit' to quit.")
while True:
user_input = input("You: ")
if user_input.lower() == 'exit':
break
response = client.chat_completions.create(
model="gpt-4o",
messages=[
{"role": "user", "content": user_input}
],
max_tokens=150
)
print("AI:", response.choices[0].message['content'])
def generate_summaries():
summaries = {}
w3_files = os.listdir("w3")
for file in w3_files:
file_path = os.path.join("w3", file)
with open(file_path, "r", encoding="utf-8") as f:
content = f.read()
if not content.strip():
continue
response = client.chat_completions.create(
model="gpt-4o",
messages=[
{"role": "user", "content": f"Summarize the following content:\n\n{content[:5000]}"}
],
max_tokens=1500
)
summary = response.choices[0].message['content']
summaries[file] = summary
return summaries
def main():
# Print Python path for debugging
print("Python path:", sys.path)
# Real-Time Knowledge Update
urls = [
'https://github.com/mastermindml/mastermind',
'https://github.com/pythaiml/automindx',
'https://github.com/Professor-Codephreak',
'https://github.com/augml/lwe-plugin-shell',
'https://github.com/augml/nicegui'
]
interval = 3600 # Update every hour
knowledge_update_thread = threading.Thread(target=real_time_update, args=(interval, urls))
knowledge_update_thread.start()
# Language Proficiency Example
code_snippet = "print('Professor Codephreak sends his regards')"
language = "Python"
syntax_explanation = understand_language_syntax(code_snippet, language)
print("Syntax Explanation:", syntax_explanation)
# Algorithm Generation Example
problem_statement = "Sort an array of integers."
language = "Python"
algorithm = generate_algorithm(problem_statement, language)
print("Generated Algorithm:", algorithm)
# Complex Problem Solving Example
problem_statement = "Develop a sustainable energy solution for urban areas."
domain = "Engineering"
solution = solve_complex_problem(problem_statement, domain)
print("Solution:", solution)
# Prompt Generation and Refinement Example
problem_context = "We need to develop a new method for real-time data analysis."
initial_prompt = generate_initial_prompt(problem_context)
print("Initial Prompt:", initial_prompt)
refined_prompt = refine_prompt(initial_prompt)
print("Refined Prompt:", refined_prompt)
# Generate and print summaries
summaries = generate_summaries()
for file, summary in summaries.items():
print(f"Summary for {file}:\n{summary}\n")
# Start interactive terminal
interactive_terminal()
if __name__ == "__main__":
# Create necessary directories
os.makedirs("memory", exist_ok=True)
os.makedirs("context", exist_ok=True)
os.makedirs("w3", exist_ok=True)
main()