Skip to content

Commit

Permalink
Update engine.py
Browse files Browse the repository at this point in the history
  • Loading branch information
mar0ls authored Feb 19, 2023
1 parent fd7dcc9 commit d87dca9
Showing 1 changed file with 21 additions and 16 deletions.
37 changes: 21 additions & 16 deletions engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
import json
import pandas as pd
import colorama
from unidecode import unidecode
from unidecode import unidecode
import time

colorama.init()

Expand All @@ -25,7 +26,7 @@ def twitterUser():
choose2 = choose1.replace(" ", "")
choose = unidecode(choose2, "utf-8")

scraper = twitterScraper.TwitterUserScraper(choose, False)
scraper = twitterScraper.TwitterUserScraper(user = choose, top = True)

while True:
try:
Expand All @@ -48,26 +49,28 @@ def twitterUser():
print(f"{i} content: {tweet.content}, data: {tweet.date}")
tweets.append({"id": tweet.id, "content": tweet.content, "media": str(tweet.media),
"user": tweet.user.username, "user_location": tweet.user.location,
"url": tweet.url, "likes": tweet.likeCount})
"url": tweet.url, "likes": tweet.likeCount})

timer = time.strftime("%Y%m%d-%H%M%S")

f = open("results/tweets_user.json", "w")
f = open(f"results/tweets_user_{choose}_{timer}.json", "w")
j = json.dumps(tweets)
f.write(j)
f.close()
# path = Path(r'tweets.json')
with open('results/tweets_user.json', 'r', encoding='utf-8') as f:

with open(f'results/tweets_user_{choose}_{timer}.json', 'r', encoding='utf-8') as f:
data = json.loads(f.read())
df = pd.json_normalize(data)
df.to_csv('results/tweets_user.csv', index=False, encoding='utf-8')
df.to_csv(f'results/tweets_user_{choose}_{timer}.csv', index=False, encoding='utf-8')
print(
bcolors.BOLD + bcolors.WARNING + "\nThe results were saved to the results folder in the tweets_search.json and tweet_search.csv files" + bcolors.ENDC)
bcolors.BOLD + bcolors.WARNING + f"\nThe results were saved to the results folder in the tweets_user_{choose}_{timer}.json and tweet_user_{choose}_{timer}.csv files" + bcolors.ENDC)


def twitterSearch():
choose = str(input(bcolors.BOLD + bcolors.OKGREEN + "Enter search data: " + bcolors.ENDC))
while not choose.strip():
choose = str(input(bcolors.BOLD + bcolors.OKGREEN + "Enter search data: ` " + bcolors.ENDC))
scraper = twitterScraper.TwitterSearchScraper(choose, False)
scraper = twitterScraper.TwitterSearchScraper(choose, top = True)

while True:
try:
Expand All @@ -89,16 +92,18 @@ def twitterSearch():
break
print(f"{i} content: {tweet.content}, user: {tweet.user.username}, data: {tweet.date}")
tweets.append({"id": tweet.id, "content": tweet.content, "data": str(tweet.date), "user": tweet.user.username,
"user_location": tweet.user.location, "url": tweet.url, "media": str(tweet.media), "likes": tweet.likeCount})
"user_location": tweet.user.location, "url": tweet.url, "media": str(tweet.media), "likes": tweet.likeCount})

timer = time.strftime("%Y%m%d-%H%M%S")

f = open("results/tweets_search.json", "w")
f = open(f"results/tweets_search_{timer}.json", "w")
j = json.dumps(tweets)
f.write(j)
f.write(j)
f.close()
# path = Path(r'tweets.json')
with open('results/tweets_search.json', 'r', encoding='utf-8') as f:

with open(f'results/tweets_search_{timer}.json', 'r', encoding='utf-8') as f:
data = json.loads(f.read())
df = pd.json_normalize(data)
df.to_csv('results/tweets_search.csv', index=False, encoding='utf-8')
df.to_csv(f'results/tweets_search_{timer}.csv', index=False, encoding='utf-8')
print(
bcolors.BOLD + bcolors.WARNING + "\nThe results were saved to the results folder in the tweets_search.json and tweet_search.csv files\n" + bcolors.ENDC)
bcolors.BOLD + bcolors.WARNING + f"\nThe results were saved to the results folder in the tweets_search_{timer}.json and tweet_search_{timer}.csv files\n" + bcolors.ENDC)

0 comments on commit d87dca9

Please sign in to comment.