Skip to content

Commit

Permalink
v1.03 Commit 1
Browse files Browse the repository at this point in the history
Version 1.1.0 Changelog:

- Added logging functionality
- Change Browser to use chrome user agents
- Changed Search Link structure
- Increased Browser scroll speed by 33%
- Increased RECAPTCHAV2 Solver speed by 29.58% - 35.16%

Todo:

Improve searches so they dont hit captcha 100% (try tls client with safari 16
If the above doesnt work make the search to google.com and then make the request via searchbar for proper url
Finish adding logging
  • Loading branch information
CallahanVentures committed Jun 30, 2024
1 parent a11ade2 commit 7487d36
Show file tree
Hide file tree
Showing 9 changed files with 133 additions and 93 deletions.
5 changes: 1 addition & 4 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,4 @@
src/config.ini
src/proxies.txt
src/queries.txt
src/vulnerables.txt
src/proxies.txt
src/vulnerables.txt
src/proxies.txt
src/vulnerables.txt
40 changes: 26 additions & 14 deletions src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from utilities.browser import *
from utilities.colored import print_red, print_blue, print_green
from utilities.config import Config, load_config
from utilities.logger import Logger
from utilities.query import *
from utilities.scanner import check_links_for_keywords
from utilities.search import *
Expand All @@ -20,35 +21,39 @@
def main() -> None:
show_branding()
backup_last_session()
logger = Logger()
config: Optional[Config] = load_config()
if config is None:
logger.logCritical("Error loading configuration file during runtime.")
handle_failure_point_and_exit("main.py", "loading config file")

# Load queries from queries.txt
queries = load_queries()
# Loads queries from queries.txt
queries: List[str] = load_queries()

# Function to process each query
# Processes each query
def process_query(query_string: str) -> Tuple[str, List[str]]:
USER_AGENT = random_firefox_ua()
USER_AGENT: str = random_chrome_ua()
try:
browser = None
retries = 5
retries: int = 5

for _ in range(retries):
browser = initialize_browser(use_proxy=True, proxy_type=config.proxy_type, user_agent=USER_AGENT)
if browser is not None:
break
print_red(f"Failed to initialize browser, retrying...")
print_red("Failed to initialize browser, retrying...")
logger.logInfo("Failed to initialize browser, retrying...")

if browser is None:
return query_string, []

thread_id = f"Thread {get_thread_id()}"
thread_id: str = f"Thread {get_thread_id()}"
print_blue(f"[{thread_id}]: Processing query: {query_string}")
logger.logInfo(f"[{thread_id}]: Processing query: {query_string}")

# Part one
first_search_operator = get_first_operator(query_string)
first_dork_decoded = str(b"\x12\x0c", "utf-8") + 'gws-wiz-serp"E' + first_search_operator
first_search_operator: str = get_first_operator(query_string)
first_dork_decoded: str = str(b"\x12\x0c", "utf-8") + 'gws-wiz-serp"E' + first_search_operator

# Part two
inurl = inurl_queries(query_string)
Expand All @@ -68,37 +73,46 @@ def process_query(query_string: str) -> Tuple[str, List[str]]:
first_dork_decoded, operators_string_decoded, first_search_operator
) # Google search location profile
search_link = generate_search_link(query_string, gs_lp_string)
print(search_link)

# Make search request and process results
response_text = get_search_response(browser, search_link, thread_id)

if response_text == "swap proxy":
print_blue(f"[{thread_id}]: Swapping proxy for query: {query_string}")
logger.logInfo(f"[{thread_id}]: Swapping proxy for query: {query_string}")

status_and_browser = swap_proxy(browser, PROXY_TYPE=config.proxy_type, current_url=browser.current_url)

if not len(status_and_browser) == 2: # swap_proxy returns either [bool(True), new_driver] or [bool(False)]
handle_failure_point("Unable to swap proxy, exiting application with links parsed")
handle_failure_point("Unable to swap proxy, closing current thread.")
logger.logCritical("Unable to swap proxy, closing current thread.")
return "break"

else: # if length of status_and_browser is 2 it has a value of [True, new_driver]
browser = status_and_browser[1]

solver = RecaptchaSolver(browser, thread_id)
if not solver.solveCaptcha():
browser.quit()
print_red(f"[{thread_id}]: Failed to solve captcha.")
logger.logCritical(f"[{thread_id}]: Failed to solve captcha.")

if response_text is None:
return query_string, "break"

extracted_hrefs = extract_hrefs(response_text)
cleaned_links = clean_hrefs(extracted_hrefs, config.excluded_domains)
print_green(f"[{thread_id}]: Finished processing query: {query_string} with {len(cleaned_links)} unique links")
logger.logInfo(f"[{thread_id}]: Finished processing query: {query_string} with {len(cleaned_links)} unique links")
return query_string, cleaned_links

except Exception as e:
if "GetHandleVerifier" in str(e):
print_red(f"[{thread_id}]: Exception while processing query: {query_string}: browser instance was closed")
logger.logCritical(f"[{thread_id}]: Exception while processing query: {query_string}: browser instance was closed")
return query_string, []

finally:
if browser is not None:
close_browser(browser)
Expand All @@ -117,6 +131,7 @@ def process_query(query_string: str) -> Tuple[str, List[str]]:
all_cleaned_links.extend(cleaned_links)
except Exception as exc:
print_red(f"Query '{query}' generated an exception: {exc}")
logger.logError(f"Query '{query}' generated an exception: {exc}")

# Ensure all_cleaned_links has unique links
unique_cleaned_links = list(set(all_cleaned_links))
Expand All @@ -129,7 +144,4 @@ def process_query(query_string: str) -> Tuple[str, List[str]]:


if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
quit()
main()
3 changes: 1 addition & 2 deletions src/utilities/backup.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ def backup_last_session() -> None:
except Exception as e:
handle_generic_error(location, task, e)


def backup_old_session_files(old_links_files:List[str], old_vulnerables_files:List[str], backup_folder:str) -> None:
current_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

Expand Down Expand Up @@ -104,4 +103,4 @@ def backup_old_session_files(old_links_files:List[str], old_vulnerables_files:Li
destination_path = os.path.join(os.getcwd(), backup_folder, new_file)
shutil.move(temp_file, destination_path)

os.remove(source_path)
os.remove(source_path)
4 changes: 3 additions & 1 deletion src/utilities/branding.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,15 @@


application_name = "GSP Pro"
application_version = "v1.0.0"
application_version = "v1.1.0"
heart_emoji = emoji.emojize(":blue_heart:")
application_credits = f"""Developed with {heart_emoji} by Callahan Ventures LLC - https://callahanventures.com/
Special thanks to:
sarperavci (https://github.com/sarperavci/GoogleRecaptchaBypass)
verq (https://github.com/cordlesscoder/)
For providing a blueprint on solving RECAPTCHAV2"""

application_features = {
Expand Down
Loading

0 comments on commit 7487d36

Please sign in to comment.