Skip to content

Commit

Permalink
Version 3.0
Browse files Browse the repository at this point in the history
  • Loading branch information
amadejpapez authored Feb 5, 2021
1 parent 2bbcc95 commit a643792
Showing 1 changed file with 45 additions and 35 deletions.
80 changes: 45 additions & 35 deletions ApplSec.py
Original file line number Diff line number Diff line change
@@ -1,61 +1,71 @@
import re
import requests
import httplib2
from datetime import date
from bs4 import BeautifulSoup, SoupStrainer


updatesPage = requests.get("https://support.apple.com/en-us/HT201222").text
# currentDate = str(date.today().day) + " " + str(date.today().strftime("%B")[0:3]) + " " + str(date.today().year)
currentDate = str(1) + " " + "Feb" + " " + str(date.today().year)


# checks if there was a new version added today or not
if currentDate in updatesPage:
wasUpdated = True
else:
wasUpdated = False


if (wasUpdated == True):
# if there was an update scrape all the links
allLinks = []
http = httplib2.Http()
status, response = http.request("https://support.apple.com/en-us/HT201222")
currentDateFormatOne = str(date.today().day) + " " + str(date.today().strftime("%B")[0:3]) + " " + str(date.today().year)
currentDateFormatTwo = str(date.today().strftime("%B")) + " " + str(date.today().day) + ", " + str(date.today().year)

for link in BeautifulSoup(response, features="html.parser", parse_only=SoupStrainer("a")):
if link.has_attr("href"):
allLinks.append(link["href"])

# get only the new links
searchResults = len(re.findall(currentDate, updatesPage)) + 22
newLinks = allLinks[22:searchResults]
newHeader = []
numberCVE = []

# scrape new links and gather info from the links
newHeader = []
numberCVE = []
def getData(link):
for x in link:
newPage = requests.get(x)
soup = BeautifulSoup(newPage.content, "html.parser")

for x in newLinks:
newLink = requests.get(x)
soup = BeautifulSoup(newLink.content, "html.parser")

# get the header of the new version
# get the title of the new version
allHeaders = soup.find_all("h2")
newHeader.append(re.sub("<[^>]*?>","", str(allHeaders[1])))

# search how many CVEs there are
# get the number of CVEs on the page
numberCVE.append(len(re.findall("CVE", str(soup))) - 1)

for x in newHeader:
# if there is macOS in the header take only the first part, not the full header
# if there is macOS in the header take only the first part, not the full title
if "macOS" in x:
macosIsHere = newHeader.index(x)
newHeader[macosIsHere] = x.split(",", 1)[0]


allLinks = []
for link in BeautifulSoup(updatesPage, features="html.parser", parse_only=SoupStrainer("a")):
# get all the links from the page
if link.has_attr("href"):
allLinks.append(link["href"])


if currentDateFormatOne in updatesPage:
# get only the new links from the page
newLinks = allLinks[22:len(re.findall(currentDateFormatOne, updatesPage)) + 22]
getData(newLinks)

# print results
results = "PATCH TIME!\n"
results = "RELEASED TODAY:\n"
for x in newHeader:
results += x + " released with " + str(numberCVE[0]) + " security fixes\n"
numberCVE.pop(0)
print(results)


updatedLinks = []
for x in allLinks[22:42]:
# check if the last 20 update pages got any changes today
page = requests.get(x).text
search = "Entry added " + str(currentDateFormatTwo)

if search in page:
updatedLinks.append(x)

if updatedLinks != []:
newHeader.clear()
getData(updatedLinks)

# print results
results = "UPDATED TODAY:\n"
for x in newHeader:
results += x + " was released " + str(numberCVE[0]) + " security fixes\n"
numberCVE.pop(0)
print(results)

0 comments on commit a643792

Please sign in to comment.