-
-
Notifications
You must be signed in to change notification settings - Fork 182
/
cloakquest3r.py
317 lines (271 loc) · 12.2 KB
/
cloakquest3r.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
import socket
import sys
import ssl
import os
import requests
import urllib.request
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from colorama import init, Fore
import threading
import time
from bs4 import BeautifulSoup
import configparser
from urllib.parse import urlparse
twitter_url = 'https://spyboy.in/twitter'
discord = 'https://spyboy.in/Discord'
website = 'https://spyboy.in/'
blog = 'https://spyboy.blog/'
github = 'https://github.com/spyboy-productions/CloakQuest3r'
VERSION = '1.0.5'
R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
Y = '\033[33m' # yellow
banner = r'''
___ _ _ ____ _ _____
/ __\ | ___ __ _| | __ /___ \_ _ ___ ___| |_|___ / _ __
/ / | |/ _ \ / _` | |/ /// / / | | |/ _ \/ __| __| |_ \| '__|
/ /___| | (_) | (_| | </ \_/ /| |_| | __/\__ \ |_ ___) | |
\____/|_|\___/ \__,_|_|\_\___,_\ \__,_|\___||___/\__|____/|_|
Uncover the true IP address of websites safeguarded by Cloudflare & ohers.
'''
init()
def print_banners():
"""
prints the program banners
"""
print(f'{R}{banner}{W}\n')
print(f'{G}[+] {Y}Version : {W}{VERSION}')
print(f'{G}[+] {Y}Created By : {W}Spyboy')
print(f'{G} \u2514\u27A4 {Y}Twitter : {W}{twitter_url}')
print(f'{G} \u2514\u27A4 {Y}Discord : {W}{discord}')
print(f'{G} \u2514\u27A4 {Y}Website : {W}{website}')
print(f'{G} \u2514\u27A4 {Y}Blog : {W}{blog}')
print(f'{G} \u2514\u27A4 {Y}Github : {W}{github}\n')
def is_using_cloudflare(domain):
try:
response = requests.head(f"https://{domain}", timeout=5)
headers = response.headers
if "server" in headers and "cloudflare" in headers["server"].lower():
return True
if "cf-ray" in headers:
return True
if "cloudflare" in headers:
return True
except (requests.exceptions.RequestException, requests.exceptions.ConnectionError):
pass
return False
def detect_web_server(domain):
try:
response = requests.head(f"https://{domain}", timeout=5)
server_header = response.headers.get("Server")
if server_header:
return server_header.strip()
except (requests.exceptions.RequestException, requests.exceptions.ConnectionError):
pass
return "UNKNOWN"
wordlist_url = "https://github.com/danielmiessler/SecLists/raw/master/Discovery/DNS/subdomains-top1million-5000.txt"
default_wordlist = "wordlist.txt"
updated_wordlist = "wordlist.txt"
def download_wordlist(wordlist_path):
print(f"\n{Fore.GREEN}[+] {C}Downloading an updated wordlist from {Fore.GREEN}SecLists{Fore.RESET}")
try:
urllib.request.urlretrieve(wordlist_url, wordlist_path)
print(f"{Fore.GREEN}[+] {C}Wordlist downloaded successfully as {Fore.GREEN}{wordlist_path}{Fore.RESET}")
except Exception as e:
print(f"{Fore.RED}[!] {C}Error downloading wordlist: {Fore.RED}{e}{Fore.RESET}")
print(f"{Fore.GREEN}[+] {C}Using the existing wordlist {Fore.GREEN}{updated_wordlist}{Fore.RESET}")
return updated_wordlist
def get_ssl_certificate_info(host):
try:
context = ssl.create_default_context()
with context.wrap_socket(socket.socket(), server_hostname=host) as sock:
sock.connect((host, 443))
certificate_der = sock.getpeercert(True)
certificate = x509.load_der_x509_certificate(certificate_der, default_backend())
common_name = certificate.subject.get_attributes_for_oid(x509.NameOID.COMMON_NAME)[0].value
issuer = certificate.issuer.get_attributes_for_oid(x509.NameOID.COMMON_NAME)[0].value
validity_start = certificate.not_valid_before
validity_end = certificate.not_valid_after
#validity_start = certificate.not_valid_before_utc
#validity_end = certificate.not_valid_after_utc
return {
"Common Name": common_name,
"Issuer": issuer,
"Validity Start": validity_start,
"Validity End": validity_end,
}
except Exception as e:
print(f"{Fore.RED}Error extracting SSL certificate information: {e}{Fore.RESET}")
return None
def find_subdomains_with_ssl_analysis(domain, wordlist_path=None, timeout=20):
subdomains_found = []
subdomains_lock = threading.Lock()
def check_subdomain(subdomain):
subdomain_url = f"https://{subdomain}.{domain}"
try:
response = requests.get(subdomain_url, timeout=timeout)
if response.status_code == 200:
with subdomains_lock:
subdomains_found.append(subdomain_url)
print(f"{Fore.GREEN}Subdomain Found \u2514\u27A4: {subdomain_url}{Fore.RESET}")
except requests.exceptions.RequestException as e:
if "Max retries exceeded with url" in str(e):
pass
if wordlist_path is None:
default_wordlist = "wordlist.txt"
wordlist_path = input(f"\n{Fore.CYAN}> Do you have a custom wordlist for subdomain scanning? {Fore.GREEN}(yes/no): ").lower()
if wordlist_path == "yes":
wordlist_path = input(f"\n{Fore.CYAN}> Enter the path to your custom wordlist: {Fore.GREEN}")
else:
wordlist_path = default_wordlist
with open(wordlist_path, "r") as file:
subdomains = [line.strip() for line in file.readlines()]
print(f"\n{Fore.YELLOW}Starting threads...")
start_time = time.time()
threads = []
for subdomain in subdomains:
thread = threading.Thread(target=check_subdomain, args=(subdomain,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
end_time = time.time()
elapsed_time = end_time - start_time
print(f"\n{G} \u2514\u27A4 {C}Total Subdomains Scanned:{W} {len(subdomains)}")
print(f"{G} \u2514\u27A4 {C}Total Subdomains Found:{W} {len(subdomains_found)}")
print(f"{G} \u2514\u27A4 {C}Time taken:{W} {elapsed_time:.2f} seconds")
real_ips = []
for subdomain in subdomains_found:
subdomain_parts = subdomain.split('//')
if len(subdomain_parts) > 1:
host = subdomain_parts[1]
real_ip = get_real_ip(host)
if real_ip:
real_ips.append((host, real_ip))
print(f"\n{Fore.YELLOW}[+] {Fore.CYAN}Real IP Address of {Fore.GREEN}{host}:{Fore.RED} {real_ip}")
ssl_info = get_ssl_certificate_info(host)
if ssl_info:
print(f"{Fore.RED} [+] {Fore.CYAN}SSL Certificate Information:")
for key, value in ssl_info.items():
print(f"{Fore.RED} \u2514\u27A4 {Fore.CYAN}{key}:{W} {value}")
if not real_ips:
print(f"{R}No real IP addresses found for subdomains.")
else:
print("\nTask Complete!!\n")
def get_real_ip(host):
try:
real_ip = socket.gethostbyname(host)
return real_ip
except socket.gaierror:
return None
#Read config file
def read_config():
config = configparser.ConfigParser()
#check if config file exists
if not os.path.exists('config.ini'):
#create config file
# Create the [DEFAULT] section and set the securitytrails_api_key option
config["DEFAULT"] = {
"securitytrails_api_key": "your_api_key"}
with open('config.ini', 'w') as configfile:
config.write(configfile)
print(f"\n[!] {Fore.RED}Please add your {C}SecurityTrails{Fore.RED} API Key in config.ini file{Fore.RESET}")
else:
config.read('config.ini')
APIKEY = config['DEFAULT']['securitytrails_api_key']
return APIKEY
def securitytrails_historical_ip_address(domain):
if read_config():
url = f"https://api.securitytrails.com/v1/history/{domain}/dns/a"
headers = {
"accept": "application/json",
"APIKEY": read_config()}
try:
response = requests.get(url, headers=headers)
data = response.json()
print(f"\n{Fore.GREEN}[+] {Fore.YELLOW}Historical IP Address Info from {C}SecurityTrails{Y} for {Fore.GREEN}{domain}:{W}")
for record in data['records']:
ip = record["values"][0]["ip"]
first_seen = record["first_seen"]
last_seen = record["last_seen"]
organizations = record["organizations"][0]
print(f"\n{R} [+] {C}IP Address: {R}{ip}{W}")
print(f"{Y} \u2514\u27A4 {C}First Seen: {G}{first_seen}{W}")
print(f"{Y} \u2514\u27A4 {C}Last Seen: {G}{last_seen}{W}")
print(f"{Y} \u2514\u27A4 {C}Organizations: {G}{organizations}{W}")
except:
print(f"{Fore.RED}Error extracting Historical IP Address information from SecurityTrails{Fore.RESET}")
None
else:
print(f"\n{Fore.RED}Please add your {C}SecurityTrails{Fore.RED} API Key in config.ini file{Fore.RESET}")
None
def get_domain_historical_ip_address(domain):
try:
url = f"https://viewdns.info/iphistory/?domain={domain}"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
}
response = requests.get(url, headers=headers)
html = response.text
soup = BeautifulSoup(html, 'html.parser')
table = soup.find('table', {'border': '1'})
if table:
rows = table.find_all('tr')[2:]
print(f"\n{Fore.GREEN}[+] {Fore.YELLOW}Historical IP Address Info from {C}Viewdns{Y} for {Fore.GREEN}{domain}:{W}")
for row in rows:
columns = row.find_all('td')
ip_address = columns[0].text.strip()
location = columns[1].text.strip()
owner = columns[2].text.strip()
last_seen = columns[3].text.strip()
print(f"\n{R} [+] {C}IP Address: {R}{ip_address}{W}")
print(f"{Y} \u2514\u27A4 {C}Location: {G}{location}{W}")
print(f"{Y} \u2514\u27A4 {C}Owner: {G}{owner}{W}")
print(f"{Y} \u2514\u27A4 {C}Last Seen: {G}{last_seen}{W}")
else:
None
except:
None
if __name__ == "__main__":
#domain = sys.argv[1]
if len(sys.argv) < 2:
print("Usage: python3 cloakquest3r.py <domain>")
sys.exit(1)
domain = sys.argv[1]
# Extract domain if a full URL is provided
parsed_url = urlparse(domain)
if parsed_url.scheme:
domain = parsed_url.netloc
filename = "wordlist.txt"
print_banners()
CloudFlare_IP = get_real_ip(domain)
print(f"\n{Fore.GREEN}[!] {C}Checking if the website uses Cloudflare{Fore.RESET}\n")
if is_using_cloudflare(domain):
print(f"\n{R}Target Website: {W}{domain}")
print(f"{R}Visible IP Address: {W}{CloudFlare_IP}\n")
get_domain_historical_ip_address(domain)
securitytrails_historical_ip_address(domain)
print(f"\n{Fore.GREEN}[+] {Fore.YELLOW}Scanning for subdomains.{Fore.RESET}")
if "wordlist_path" not in locals():
download_wordlist(default_wordlist)
find_subdomains_with_ssl_analysis(domain)
else:
print(f"{Fore.RED}- Website is not using Cloudflare.")
technology = detect_web_server(domain)
print(f"\n{Fore.GREEN}[+] {C}Website is using: {Fore.GREEN} {technology}")
proceed = input(f"\n{Fore.YELLOW}> Do you want to proceed? {Fore.GREEN}(yes/no): ").lower()
if proceed == "yes":
print(f"\n{R}Target Website: {W}{domain}")
print(f"{R}Visible IP Address: {W}{CloudFlare_IP}\n")
get_domain_historical_ip_address(domain)
securitytrails_historical_ip_address(domain)
print(f"{Fore.GREEN}[+] {Fore.YELLOW}Scanning for subdomains.{Fore.RESET}")
if "wordlist_path" not in locals():
download_wordlist(default_wordlist)
find_subdomains_with_ssl_analysis(domain)
else:
print(f"{R}Operation aborted. Exiting...{W}")