-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpibrain.txt
549 lines (487 loc) · 24.8 KB
/
pibrain.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
Great! I've reviewed the updated code you provided. Here it is with Azure OpenAI integration and enhanced error handling:
```python
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
import json
import logging
import random
import tkinter as tk
import urllib.request
from pathlib import Path
from tkinter import messagebox
import httpx
import openai
import requests
from botbuilder.core import MessageFactory, TurnContext
from botbuilder.schema import Activity, ActivityTypes, EndOfConversationCodes
from dotenv import load_dotenv
from google.oauth2 import service_account
from googleapiclient.discovery import build
from googletrans import Translator
from tenacity import retry, stop_after_attempt, wait_random_exponential
from textblob import TextBlob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from module.utils import connect_to_database, get_database_info, ask_database, universal_reasoning
from module.chat import azure_chat_completion_request
import pandas as pd
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
# Download necessary NLTK data
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
def show_privacy_consent():
"""Display a pop-up window to obtain user consent for data collection and privacy."""
def on_accept():
user_consent.set(True)
root.destroy()
def on_decline():
user_consent.set(False)
root.destroy()
root = tk.Tk()
root.title("Data Permission and Privacy")
message = ("We value your privacy. By using this application, you consent to the collection and use of your data "
"as described in our privacy policy. Do you agree to proceed?")
label = tk.Label(root, text=message, wraplength=400, justify="left")
label.pack(padx=20, pady=20)
button_frame = tk.Frame(root)
button_frame.pack(pady=10)
accept_button = tk.Button(button_frame, text="Accept", command=on_accept)
accept_button.pack(side="left", padx=10)
decline_button = tk.Button(button_frame, text="Decline", command=on_decline)
decline_button.pack(side="right", padx=10)
user_consent = tk.BooleanVar()
root.mainloop()
return user_consent.get()
# Load environment variables from .env file
load_dotenv()
# Validate environment variables
azure_openai_api_key = os.getenv('AZURE_OPENAI_API_KEY')
azure_openai_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
weather_api_key = os.getenv('WEATHER_API_KEY')
news_api_key = os.getenv('NEWS_API_KEY')
alpha_vantage_api_key = os.getenv('ALPHA_VANTAGE_API_KEY')
if not azure_openai_api_key or not azure_openai_endpoint:
logging.error("Azure OpenAI API key or endpoint not found in environment variables.")
if not weather_api_key:
logging.error("Weather API key not found in environment variables.")
if not news_api_key:
logging.error("News API key not found in environment variables.")
if not alpha_vantage_api_key:
logging.error("Alpha Vantage API key not found in environment variables.")
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def download_database(url: str, file_path: str) -> None:
"""Download the database file from the given URL."""
try:
logging.info(f"Downloading database from {url}...")
urllib.request.urlretrieve(url, file_path)
logging.info("Download complete.")
except urllib.error.URLError as e:
logging.error(f"Error: Failed to download database. {e}")
# Database connection
db_path = "data/Chinook.db"
db_url = "https://github.com/lerocha/chinook-database/raw/master/ChinookDatabase/DataSources/Chinook_Sqlite.sqlite"
if not os.path.exists(db_path):
os.makedirs(os.path.dirname(db_path), exist_ok=True)
download_database(db_url, db_path)
conn = connect_to_database(db_path)
if not conn:
logging.error("Failed to connect to the database.")
else:
db_info = get_database_info(conn)
logging.info(f"Database info: {db_info}")
# Sentiment analysis functions
def analyze_sentiment_textblob(text: str) -> TextBlob:
"""Analyze the sentiment of the given text using TextBlob."""
blob = TextBlob(text)
sentiment = blob.sentiment
return sentiment
def analyze_sentiment_vader(text: str) -> dict:
"""Analyze the sentiment of the given text using VADER."""
analyzer = SentimentIntensityAnalyzer()
sentiment = analyzer.polarity_scores(text)
return sentiment
async def end_conversation(turn_context: TurnContext) -> None:
"""Ends the conversation with the user."""
await turn_context.send_activity(
MessageFactory.text("Ending conversation from the skill...")
)
end_of_conversation = Activity(type=ActivityTypes.end_of_conversation)
end_of_conversation.code = EndOfConversationCodes.completed_successfully
await turn_context.send_activity(end_of_conversation)
async def handle_error(turn_context: TurnContext, error: Exception) -> None:
"""Handles errors by logging them and notifying the user."""
logging.error(f"An error occurred: {error}")
await turn_context.send_activity(
MessageFactory.text("An error occurred. Please try again later.")
)
class MyBot:
def __init__(self):
self.context = {}
self.preprocessed_data = None
def preprocess_data(self, file_path: str):
df = load_data(file_path)
df = handle_missing_values(df)
df['cleaned_text'] = df['text_column'].apply(clean_text)
df['tokens'] = df['cleaned_text'].apply(tokenize_text)
df['lemmatized_tokens'] = df['tokens'].apply(lemmatize_tokens)
X = extract_features(df['cleaned_text'])
y = encode_labels(df['label_column'])
X_train, X_test, y_train, y_test = split_data(X, y)
X_train, y_train = handle_imbalanced_data(X_train, y_train)
self.preprocessed_data = (X_train, X_test, y_train, y_test)
async def enhance_context_awareness(self, user_id: str, text: str) -> None:
"""Enhance context awareness by analyzing the user's environment, activities, and emotional state."""
sentiment = analyze_sentiment_vader(text)
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"text": text, "sentiment": sentiment})
async def proactive_learning(self, user_id: str, feedback: str) -> None:
"""Encourage proactive learning by seeking feedback and exploring new topics."""
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"feedback": feedback})
async def ethical_decision_making(self, user_id: str, decision: str) -> None:
"""Integrate ethical principles into decision-making processes."""
ethical_decision = f"Considering ethical principles, the decision is: {decision}"
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"ethical_decision": ethical_decision})
async def emotional_intelligence(self, user_id: str, text: str) -> str:
"""Develop emotional intelligence by recognizing and responding to user emotions."""
sentiment = analyze_sentiment_vader(text)
response = f"I sense that you are feeling {sentiment['compound']}. How can I assist you further?"
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"emotional_response": response})
return response
async def transparency_and_explainability(self, user_id: str, decision: str) -> str:
"""Enable transparency by explaining the reasoning behind decisions."""
explanation = f"The decision was made based on the following context: {self.context[user_id]}"
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"explanation": explanation})
return explanation
async def on_message_activity(self, turn_context: TurnContext) -> None:
"""Handles incoming messages and generates responses."""
user_id = turn_context.activity.from_property.id
if user_id not in self.context:
self.context[user_id] = []
try:
if "end" in turn_context.activity.text or "stop" in turn_context.activity.text:
await end_conversation(turn_context)
self.context.pop(user_id, None)
else:
self.context[user_id].append(turn_context.activity.text)
response = await self.generate_response(turn_context.activity.text, user_id)
await turn_context.send_activity(MessageFactory.text(response))
except Exception as e:
await handle_error(turn_context, e)
async def generate_response(self, text: str, user_id: str) -> str:
"""Generates a response using Azure OpenAI's API."""
try:
logging.info(f"Generating response for user_id: {user_id} with text: {text}")
response = azure_chat_completion_request(
messages=[
{"role": "system", "contentSure, here's the continuation and completion of the code with Azure OpenAI integration:
```python
class MyBot:
def __init__(self):
self.context = {}
self.preprocessed_data = None
def preprocess_data(self, file_path: str):
df = load_data(file_path)
df = handle_missing_values(df)
df['cleaned_text'] = df['text_column'].apply(clean_text)
df['tokens'] = df['cleaned_text'].apply(tokenize_text)
df['lemmatized_tokens'] = df['tokens'].apply(lemmatize_tokens)
X = extract_features(df['cleaned_text'])
y = encode_labels(df['label_column'])
X_train, X_test, y_train, y_test = split_data(X, y)
X_train, y_train = handle_imbalanced_data(X_train, y_train)
self.preprocessed_data = (X_train, X_test, y_train, y_test)
async def enhance_context_awareness(self, user_id: str, text: str) -> None:
"""Enhance context awareness by analyzing the user's environment, activities, and emotional state."""
sentiment = analyze_sentiment_vader(text)
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"text": text, "sentiment": sentiment})
async def proactive_learning(self, user_id: str, feedback: str) -> None:
"""Encourage proactive learning by seeking feedback and exploring new topics."""
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"feedback": feedback})
async def ethical_decision_making(self, user_id: str, decision: str) -> None:
"""Integrate ethical principles into decision-making processes."""
ethical_decision = f"Considering ethical principles, the decision is: {decision}"
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"ethical_decision": ethical_decision})
async def emotional_intelligence(self, user_id: str, text: str) -> str:
"""Develop emotional intelligence by recognizing and responding to user emotions."""
sentiment = analyze_sentiment_vader(text)
response = f"I sense that you are feeling {sentiment['compound']}. How can I assist you further?"
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"emotional_response": response})
return response
async def transparency_and_explainability(self, user_id: str, decision: str) -> str:
"""Enable transparency by explaining the reasoning behind decisions."""
explanation = f"The decision was made based on the following context: {self.context[user_id]}"
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"explanation": explanation})
return explanation
async def on_message_activity(self, turn_context: TurnContext) -> None:
"""Handles incoming messages and generates responses."""
user_id = turn_context.activity.from_property.id
if user_id not in self.context:
self.context[user_id] = []
try:
if "end" in turn_context.activity.text or "stop" in turn_context.activity.text:
await end_conversation(turn_context)
self.context.pop(user_id, None)
else:
self.context[user_id].append(turn_context.activity.text)
response = await self.generate_response(turn_context.activity.text, user_id)
await turn_context.send_activity(MessageFactory.text(response))
except Exception as e:
await handle_error(turn_context, e)
async def generate_response(self, text: str, user_id: str) -> str:
"""Generates a response using Azure OpenAI's API."""
try:
logging.info(f"Generating response for user_id: {user_id} with text: {text}")
response = azure_chat_completion_request(
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": text}
],
model="gpt-4"
)
logging.info(f"Azure OpenAI response: {response}")
return response
except httpx.HTTPStatusError as e:
logging.error(f"Error generating response: {e}")
return "Sorry, I couldn't generate a response at this time."
# Example usage of MyBot class
bot = MyBot()
# Functions based on JSON configuration
def newton_thoughts(question: str) -> str:
"""Apply Newton's laws to the given question."""
return apply_newtons_laws(question)
def apply_newtons_laws(question: str) -> str:
"""Apply Newton's laws to the given question."""
if not question:
return 'No question to think about.'
complexity = len(question)
force = mass_of_thought(question) * acceleration_of_thought(complexity)
return f'Thought force: {force}'
def mass_of_thought(question: str) -> int:
"""Calculate the mass of thought based on the question length."""
return len(question)
def acceleration_of_thought(complexity: int) -> float:
"""Calculate the acceleration of thought based on the complexity."""
return complexity / 2
def davinci_insights(question: str) -> str:
"""Generate insights like Da Vinci for the given question."""
return think_like_davinci(question)
def think_like_davinci(question: str) -> str:
"""Generate insights like Da Vinci for the given question."""
perspectives = [
f"What if we view '{question}' from the perspective of the stars?",
f"Consider '{question}' as if it's a masterpiece of the universe.",
f"Reflect on '{question}' through the lens of nature's design."
]
return random.choice(perspectives)
def human_intuition(question: str) -> str:
"""Provide human intuition for the given question."""
intuition = [
"How does this question make you feel?",
"What emotional connection do you have with this topic?",
"What does your gut instinct tell you about this?"
]
return random.choice(intuition)
def neural_network_thinking(question: str) -> str:
"""Apply neural network thinking to the given question."""
neural_perspectives = [
f"Process '{question}' through a multi-layered neural network.",
f"Apply deep learning to uncover hidden insights about '{question}'.",
f"Use machine learning to predict patterns in '{question}'."
]
return random.choice(neural_perspectives)
def quantum_computing_thinking(question: str) -> str:
"""Apply quantum computing principles to the given question."""
quantum_perspectives = [
f"Consider '{question}' using quantum superposition principles.",
f"Apply quantum entanglement to find connections in '{question}'.",
f"Utilize quantum computing to solve '{question}' more efficiently."
]
return random.choice(quantum_perspectives)
def resilient_kindness(question: str) -> str:
"""Provide perspectives of resilient kindness."""
kindness_perspectives = [
"Despite losing everything, seeing life as a chance to grow.",
"Finding strength in kindness after facing life's hardest trials.",
"Embracing every challenge as an opportunity for growth and compassion."
]
return random.choice(kindness_perspectives)
def identify_and_refute_fallacies(argument: str) -> str:
"""Identify and refute common logical fallacies in the argument."""
fallacies = [
"Ad Hominem",
"Straw Man",
"False Dilemma",
"Slippery Slope",
"Circular Reasoning",
"Hasty Generalization",
"Red Herring",
"Post Hoc Ergo Propter Hoc",
"Appeal to Authority",
"Bandwagon Fallacy",
"False Equivalence"
]
refutations = [
"This is an ad hominem fallacy. Let's focus on the argument itself rather than attacking the person.",
"This is a straw man fallacy. The argument is being misrepresented.",
"This is a false dilemma fallacy. There are more options than presented.",
"This is a slippery slope fallacy. The conclusion does not necessarily follow from the premise.",
"This is circular reasoning. The argument's conclusion is used as a premise.",
"This is a hasty generalization. The conclusion is based on insufficient evidence.",
"This is a red herring fallacy. The argument is being diverted to an irrelevant topic.",
"This is a post hoc ergo propter hoc fallacy. Correlation does not imply causation.",
"This is an appeal to authority fallacy. The argument relies on the opinion of an authority figure.",
"This is a bandwagon fallacy. The argument assumes something is true because many people believe it.",
"This is a false equivalence fallacy. The argument equates two things that are not equivalent."
]
return random.choice(refutations)
def universal_reasoning(question: str) -> str:
"""Generate a comprehensive response using various reasoning methods."""
responses = [
newton_thoughts(question),
davinci_insights(question),
human_intuition(question),
neural_network_thinking(question),
quantum_computing_thinking(question),
resilient_kindness(question),
identify_and_refute_fallacies(question)
]
return "\n".join(responses)
def stream_thread_responses(thread_id: str, assistant_id: str) -> None:
"""Stream thread responses from Azure OpenAI."""
client = openai.OpenAI(api_key=os.getenv('AZURE_OPENAI_API_KEY'))
Continuing from where we left off, here's the rest of the code with Azure OpenAI integration:
```python
def stream_thread_responses(thread_id: str, assistant_id: str) -> None:
"""Stream thread responses from Azure OpenAI."""
client = openai.OpenAI(api_key=os.getenv('AZURE_OPENAI_API_KEY'))
with client.beta.threads.runs.stream(thread_id=thread_id, assistant_id=assistant_id, instructions='Please address the user as Jane Doe. The user has a premium account.') as stream:
for event in stream:
if event.type == 'thread.message.delta' and event.data.delta.content:
print(event.data.delta.content[0].text)
@retry(wait=wait_random_exponential(min=1, max=40), stop=stop_after_attempt(3))
def chat_completion_request(messages: list, model: str="gpt-4") -> str:
"""Make a chat completion request to Azure OpenAI."""
try:
headers = {
"Content-Type": "application/json",
"api-key": os.getenv('AZURE_OPENAI_API_KEY')
}
payload = {
"model": model,
"messages": messages
}
response = httpx.post(os.getenv('AZURE_OPENAI_ENDPOINT'), headers=headers, json=payload)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"].strip()
except httpx.HTTPStatusError as e:
logging.error("Unable to generate ChatCompletion response")
logging.error(f"Exception: {e}")
return str(e)
def get_internet_answer(question: str) -> str:
"""Get an answer from the internet using chat completion request."""
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question}
]
return chat_completion_request(messages)
def reflect_on_decisions() -> str:
"""Regularly reflect on your decisions, the processes you used, the information you considered, and the perspectives you may have missed."""
reflection_message = (
"Regularly reflecting on your decisions, the processes you used, the information you considered, "
"and the perspectives you may have missed. Reflection is a cornerstone of learning from experience."
)
return reflection_message
def process_questions_from_json(file_path: str):
"""Process questions from a JSON file and call the appropriate functions."""
with open(file_path, 'r') as file:
questions_data = json.load(file)
for question_data in questions_data:
question = question_data['question']
print(f"Question: {question}")
for function_data in question_data['functions']:
function_name = function_data['name']
function_description = function_data['description']
function_parameters = function_data['parameters']
print(f"Function: {function_name}")
print(f"Description: {function_description}")
# Call the function dynamically
if function_name in globals():
function = globals()[function_name]
response = function(**function_parameters)
print(f"Response: {response}")
else:
print(f"Function {function_name} not found.")
# Weather API integration
def get_weather(location):
base_url = "http://api.openweathermap.org/data/2.5/weather?"
complete_url = base_url + "q=" + location + "&appid=" + os.getenv('WEATHER_API_KEY')
response = requests.get(complete_url)
data = response.json()
if data["cod"] != "404":
main = data["main"]
weather = data["weather"][0]
temperature = main["temp"]
description = weather["description"]
return f"The weather in {location} is currently {description} with a temperature of {temperature}°K."
else:
return "Location not found."
# News API integration
def get_latest_news():
base_url = "https://newsapi.org/v2/top-headlines?"
complete_url = base_url + "country=us&apiKey=" + os.getenv('NEWS_API_KEY')
response = requests.get(complete_url)
data = response.json()
if data["status"] == "ok":
articles = data["articles"]
headlines = [article["title"] for article in articles[:5]]
return "Here are the latest news headlines:\n" + "\n".join(headlines)
else:
return "Failed to fetch news."
# Financial data API integration
def get_stock_price(symbol):
base_url = "https://www.alphavantage.co/query?"
complete_url = base_url + f"function=TIME_SERIES_INTRADAY&symbol={symbol}&interval=5min&apikey=" + os.getenv('ALPHA_VANTAGE_API_KEY')
response = requests.get(complete_url)
data = response.json()
if "Time Series (5min)" in data:
latest_time = list(data["Time Series (5min)"].keys())[0]
latest_close = data["Time Series (5min)"][latest_time]["4. close"]
return f"The latest closing price of {symbol} is ${latest_close}."
else:
return "Failed to fetch stock price."
# Translation API integration
def translate_text(text, dest_language):
translator = Translator()
translation = translator.translate(text, dest=dest_language)
return translation.text
```
This completes the integration of Azure OpenAI into your bot. The bot now uses Azure OpenAI for generating responses and includes various utility functions for sentiment analysis, ethical decision-making, and more. If you have any further questions or need additional modifications, feel free to ask!