diff --git a/Cricket_info.py b/Cricket_info.py
index 29f2649..77ec2bc 100644
--- a/Cricket_info.py
+++ b/Cricket_info.py
@@ -7,7 +7,7 @@
# All rights reserved.
-import urllib.request
+import aiohttp
from main_startup.helper_func.basic_helpers import edit_or_reply, get_text
from bs4 import BeautifulSoup
from pyrogram import filters
@@ -23,15 +23,15 @@
)
async def _(client, message):
score_page = "http://static.cricinfo.com/rss/livescores.xml"
- page = urllib.request.urlopen(score_page)
+ async with aiohttp.ClientSession() as session:
+ async with session.get(score_page) as resp:
+ page = await resp.text()
soup = BeautifulSoup(page, "html.parser")
result = soup.find_all("description")
- Sed = ""
- for match in result:
- Sed += match.get_text() + "\n\n"
+ Sed = "".join(match.get_text() + "\n\n" for match in result)
await edit_or_reply(
message,
- f"Match information gathered successful\n\n\n{Sed}",
+ f"Match information Gathered Successfully\n\n\n{Sed}",
parse_mode="html",
)
diff --git a/advice.py b/advice.py
new file mode 100644
index 0000000..9d7a103
--- /dev/null
+++ b/advice.py
@@ -0,0 +1,24 @@
+# Copyright (C) 2020-2021 by DevsExpo@Github, < https://github.com/DevsExpo >.
+#
+# This file is part of < https://github.com/DevsExpo/FridayUserBot > project,
+# and is released under the "GNU v3.0 License Agreement".
+# Please see < https://github.com/DevsExpo/blob/master/LICENSE >
+#
+# All rights reserved.
+
+from main_startup.core.decorators import friday_on_cmd
+from main_startup.helper_func.basic_helpers import edit_or_reply, get_text
+import requests
+
+@friday_on_cmd(
+ ["advice"],
+ cmd_help={
+ "help": "Gives You Simple Advice",
+ "example": "{ch}advice",
+ },
+)
+async def advice(client, message):
+ engine = message.Engine
+ pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
+ r = requests.get("https://api.adviceslip.com/advice")
+ await pablo.edit(r.json()["slip"]["advice"])
diff --git a/amazon_watch.py b/amazon_watch.py
deleted file mode 100644
index 869396a..0000000
--- a/amazon_watch.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright (C) 2020-2021 by DevsExpo@Github, < https://github.com/DevsExpo >.
-#
-# This file is part of < https://github.com/DevsExpo/FridayUserBot > project,
-# and is released under the "GNU v3.0 License Agreement".
-# Please see < https://github.com/DevsExpo/blob/master/LICENSE >
-#
-# All rights reserved.
-
-import requests
-from apscheduler.schedulers.asyncio import AsyncIOScheduler
-from bs4 import BeautifulSoup
-from main_startup import Friday
-from main_startup.config_var import Config
-from main_startup.core.decorators import friday_on_cmd
-from main_startup.helper_func.basic_helpers import edit_or_reply, get_text
-from xtraplugins.dB.amazon_price_tracker_db import (
- add_amazon_tracker,
- get_all_amazon_trackers,
- is_amazon_tracker_in_db,
- rmamazon_tracker,
-)
-
-
-@friday_on_cmd(
- ["atl", "amazontrack"],
- is_official=False,
- cmd_help={
- "help": "Add Amazon Product To Tracking List!",
- "example": "{ch}atl (amazon-url)",
- },
-)
-async def add_to_db(client, message):
- aurl = await edit_or_reply(message, "`Processing..`")
- headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
- }
- url = get_text(message)
- page = requests.get(url, headers=headers)
- soup = BeautifulSoup(page.content, "html.parser")
- try:
- title = soup.find(id="productTitle").get_text()
- price = soup.find(id="priceblock_ourprice").get_text()
- title = title.strip()
- price = price[2:].split(",")
- except BaseException:
- await aurl.edit("`Url is Invalid!`")
- return
- price = round(float("".join(price)))
- if await is_amazon_tracker_in_db(str(url)):
- await aurl.edit("`Tracker Already Found In DB. Whats Point in Adding Again?`")
- return
- await add_amazon_tracker(url, price)
- await aurl.edit(
- f"**Added To TrackList** \n**Title :** `{title}` \n**Price :** `{price}`"
- )
-
-
-@friday_on_cmd(
- ["rmlt", "rmamazontrack"],
- is_official=False,
- cmd_help={
- "help": "Remove Amazon Product From Tracking List!",
- "example": "{ch}rmlt (amazon-url)",
- },
-)
-async def rm_from_db(client, message):
- rmurl = await edit_or_reply(message, "`Processing..`")
- url = get_text(message)
- if not await is_amazon_tracker_in_db(str(url)):
- await rmurl.edit("`This Url Was Not Found In My DB!`")
- return
- await rmamazon_tracker(str(url))
- await rmurl.edit("`Removed This Product From DB!`")
- return
-
-
-async def track_amazon():
- headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
- }
- kk = await get_all_amazon_trackers()
- if len(kk) == 0:
- return
- for ujwal in kk:
- page = requests.get(ujwal["amazon_link"], headers=headers)
- soup = BeautifulSoup(page.content, "html.parser")
- title = soup.find(id="productTitle").get_text()
- price = soup.find(id="priceblock_ourprice").get_text()
- title = title.strip()
- price = price[2:].split(",")
- price = round(float("".join(price)))
- if int(price) > int(ujwal["price"]):
- await Friday.send_message(
- Config.LOG_GRP,
- f"#Tracker - Price Reduced \nProduct Name : {title} \nCurrent price : {price}",
- )
- await rmamazon_tracker(str(ujwal["amazon_link"]))
- else:
- pass
-
-
-scheduler = AsyncIOScheduler(timezone="Asia/Kolkata")
-scheduler.add_job(track_amazon, trigger="cron", hour=13, minute=35)
-scheduler.start()
\ No newline at end of file
diff --git a/amazonsearch.py b/amazonsearch.py
index d5a0cf6..0d85743 100644
--- a/amazonsearch.py
+++ b/amazonsearch.py
@@ -6,11 +6,13 @@
#
# All rights reserved.
-import requests
+import aiohttp
from main_startup.core.decorators import friday_on_cmd
from main_startup.helper_func.basic_helpers import edit_or_reply, get_text
+
+
@friday_on_cmd(
["amsearch"],
cmd_help={
@@ -26,7 +28,12 @@ async def _am_search_by_lackhac(client,message):
await msg_.edit("`Please, Give Input!`")
return
product = ""
- r = requests.get(f"https://amznsearch.vercel.app/api/?query={query}").json()
+ url = f"https://amznsearch.vercel.app/api/?query={query}"
+ async with aiohttp.ClientSession() as session:
+ resp = await session.get(url)
+ r = await resp.json()
+ if not r:
+ return await msg_.edit("`No Results Found!`")
for products in r:
link = products['productLink']
name = products['productName']
diff --git a/any_dl.py b/any_dl.py
new file mode 100644
index 0000000..1f5153e
--- /dev/null
+++ b/any_dl.py
@@ -0,0 +1,283 @@
+# Copyright (C) 2020-2021 by DevsExpo@Github, < https://github.com/DevsExpo >.
+#
+# This file is part of < https://github.com/DevsExpo/FridayUserBot > project,
+# and is released under the "GNU v3.0 License Agreement".
+# Please see < https://github.com/DevsExpo/blob/master/LICENSE >
+#
+# All rights reserved.
+
+import time
+import aiohttp
+from main_startup.helper_func.basic_helpers import edit_or_reply, humanbytes, time_formatter
+from .helper_files.dl_ import AnyDL
+from fsplit.filesplit import Filesplit
+import os
+import re
+import pathlib
+import uuid
+from pyrogram.errors import FloodWait, MessageNotModified
+import math
+from main_startup.config_var import Config
+from main_startup.core.decorators import friday_on_cmd
+from main_startup.core.startup_helpers import run_cmd
+from main_startup.helper_func.basic_helpers import edit_or_reply, get_text, progress
+
+async def download_file(message, url, file_name):
+ c_ = time.time()
+ with open(file_name, mode='wb') as f:
+ async with aiohttp.ClientSession() as session:
+ async with session.get(url) as r:
+ total_length = r.headers.get('content-length') or r.headers.get("Content-Length")
+ if total_length is None:
+ f.write(await r.read())
+ else:
+ total_length = int(total_length)
+ dl = 0
+ async for chunk in r.content.iter_chunked(max(total_length // 500, (1024*1024)*2)):
+ dl += len(chunk)
+ e_ = time.time()
+ diff = e_ - c_
+ percentage = dl * 100 / total_length
+ speed = dl / diff
+ elapsed_time = round(diff) * 1000
+ time_to_completion = round((total_length - dl) / speed) * 1000
+ estimated_total_time = elapsed_time + time_to_completion
+ f.write(chunk)
+ progress_str = "{0}{1} {2}%\n".format(
+ "".join(
+ [
+ "▰"
+ for _ in range(math.floor(percentage / 10))
+ ]
+ ),
+ "".join(
+ [
+ "▱"
+ for _ in range(
+ 10 - math.floor(percentage / 10)
+ )
+ ]
+ ),
+ round(percentage, 2),
+ )
+
+ r_ = f"Downloading This File \nFile : {file_name} \nFile Size : {humanbytes(total_length)} \nDownloaded : {humanbytes(dl)} \n{progress_str} \n\nSpeed : {humanbytes(round(speed))}/ps \nETA : {time_formatter(estimated_total_time)}"
+ try:
+ await message.edit(r_)
+ except MessageNotModified:
+ pass
+ return file_name
+
+image_ext = tuple([".jpg", ".png", ".jpeg"])
+vid_ext = tuple([".mp4", ".mkv"])
+sticker_ext = tuple([".wepb", ".tgs"])
+song_ext = tuple([".mp3", ".wav", ".m4a"])
+
+async def upload_file(client, reply_message, message, file_path, caption):
+ rndm = uuid.uuid4().hex
+ siz_e = os.stat(file_path).st_size
+ if siz_e > 2040108421:
+ list_ = []
+ await message.edit("`File Size More Than 2GB. Telegram Won't Allow This. Splitting Files.`")
+ fs = Filesplit()
+ if not os.path.exists(f"./splitted_{rndm}"):
+ os.makedirs(f"./splitted_{rndm}")
+ fs.split(
+ file=file_path,
+ split_size=2040108421,
+ output_dir=f"./splitted_{rndm}",
+ )
+ file_list(f"./splitted_{rndm}", list_)
+ for oof in list_:
+ if oof == "fs_manifest.csv":
+ return
+ await send_file(client, reply_message, oof, caption, message)
+ else:
+ await send_file(client, reply_message, file_path, caption, message)
+ return await message.delete()
+
+async def send_file(client, r_msg, file, capt, e_msg):
+ c_time = time.time()
+ file_name = os.path.basename(file)
+ send_as_thumb = bool(os.path.exists("./main_startup/Cache/thumb.jpg"))
+ if file.endswith(image_ext):
+ await r_msg.reply_video(
+ file,
+ quote=True,
+ caption=capt,
+ progress=progress,
+ progress_args=(e_msg, c_time, f"`Uploading {file_name}!`", file_name),
+ )
+ elif file.endswith(vid_ext):
+ if send_as_thumb:
+ await r_msg.reply_video(
+ file,
+ quote=True,
+ thumb="./main_startup/Cache/thumb.jpg",
+ caption=capt,
+ progress=progress,
+ progress_args=(e_msg, c_time, f"`Uploading {file_name}!`", file_name),
+ )
+ else:
+ await r_msg.reply_video(
+ file,
+ quote=True,
+ caption=capt,
+ progress=progress,
+ progress_args=(e_msg, c_time, f"`Uploading {file_name}!`", file_name),
+ )
+ elif file.endswith(".gif"):
+ if send_as_thumb:
+ await r_msg.reply_animation(
+ file,
+ quote=True,
+ thumb="./main_startup/Cache/thumb.jpg",
+ caption=capt,
+ progress=progress,
+ progress_args=(e_msg, c_time, f"`Uploading {file_name}!`", file_name),
+ )
+ else:
+ await r_msg.reply_animation(
+ file,
+ quote=True,
+ caption=capt,
+ progress=progress,
+ progress_args=(e_msg, c_time, f"`Uploading {file_name}!`", file_name),
+ )
+ elif file.endswith(song_ext):
+ if send_as_thumb:
+ await r_msg.reply_audio(
+ file,
+ quote=True,
+ thumb="./main_startup/Cache/thumb.jpg",
+ caption=capt,
+ progress=progress,
+ progress_args=(e_msg, c_time, f"`Uploading {file_name}!`", file_name),
+ )
+ else:
+ await r_msg.reply_audio(
+ file,
+ quote=True,
+ caption=capt,
+ progress=progress,
+ progress_args=(e_msg, c_time, f"`Uploading {file_name}!`", file_name),
+ )
+ elif file.endswith(sticker_ext):
+ await r_msg.reply_sticker(
+ file,
+ quote=True,
+ progress=progress,
+ progress_args=(e_msg, c_time, f"`Uploading {file_name}!`", file_name),
+ )
+ elif send_as_thumb:
+ await r_msg.reply_document(
+ file,
+ quote=True,
+ thumb="./main_startup/Cache/thumb.jpg",
+ caption=capt,
+ progress=progress,
+ progress_args=(e_msg, c_time, f"`Uploading {file_name}!`", file_name),
+ )
+ else:
+ await r_msg.reply_document(
+ file,
+ quote=True,
+ caption=capt,
+ progress=progress,
+ progress_args=(e_msg, c_time, f"`Uploading {file_name}!`", file_name),
+ )
+
+def file_list(path, lisT):
+ pathlib.Path(path)
+ for filepath in pathlib.Path(path).glob("**/*"):
+ lisT.append(filepath.absolute())
+ return lisT
+
+@friday_on_cmd(
+ ["udl", "any_dl"],
+ cmd_help={
+ "help": "Download Files From Anonfiles, Mega, MediaFire. If Its Direct Link Make Sure To Give File Name",
+ "example": "{ch}udl (file url as input) if url in supported sites else {ch}udl (file url|file name)",
+ }
+)
+async def download_(client, message):
+ s = await edit_or_reply(message, "`Trying To Downloading..`")
+ dl_client = AnyDL()
+ url = get_text(message)
+ msg = message.reply_to_message or message
+ if 'drive.google.com' in url:
+ try:
+ link = re.findall(r'\bhttps?://drive\.google\.com\S+', url)[0]
+ except IndexError:
+ return await s.edit("`No Drive Url Links Found!`")
+ try:
+ file_url, file_name = await dl_client.gdrive(url)
+ except BaseException as e:
+ return await s.edit(f"**Failed To GET Direct Link ::** `{e}`")
+ if file_url is None:
+ return await s.edit("**Failed To GET Direct Link**")
+ file = await download_file(s, file_url, file_name)
+ caption = f"File Downloaded & Uploaded \nFile Name : {file_name}"
+ await upload_file(client, msg, s, file, caption)
+ return os.remove(file)
+ if "mediafire.com" in url:
+ try:
+ link = re.findall(r'\bhttps?://.*mediafire\.com\S+', url)[0]
+ except IndexError:
+ return await s.edit("`No Media File Url Links Found!`")
+ try:
+ file_url, file_name, file_size, file_upload_date, caption_, scan_result = await dl_client.media_fire_dl(url)
+ except BaseException as e:
+ return await s.edit(f"**Failed To GET Direct Link ::** `{e}`")
+ if file_url is None:
+ return await s.edit("**Failed To GET Direct Link**")
+ file = await download_file(s, file_url, file_name)
+ caption = f"File Downloaded & Uploaded \nFile Name : {file_name} \nFile Size : {file_size} \nFile Upload Date : {file_upload_date} \nFile Scan Result : {scan_result} \n{caption_}"
+ await upload_file(client, msg, s, file, caption)
+ return os.remove(file)
+ if "mega.nz" in url:
+ try:
+ link = re.findall(r'\bhttps?://.*mega\.nz\S+', url)[0]
+ except IndexError:
+ return await s.edit("`No Mega Url Links Found!`")
+ if "folder" in link:
+ return await s.edit("`What? Download A Folder? Are You Nutes?")
+ try:
+ file_url, file_name, file_size = await dl_client.mega_dl(link)
+ except BaseException as e:
+ return await s.edit(f"**Failed To GET Direct Link ::** `{e}`")
+ if file_url is None:
+ return await s.edit("**Failed To GET Direct Link**")
+ file = await download_file(s, file_url, file_name)
+ file_size = humanbytes(file_size)
+ caption = f"File Downloaded & Uploaded \nFile Name : {file_name} \nFile Size : {file_size}"
+ await upload_file(client, msg, s, file, caption)
+ return os.remove(file)
+ if "anonfiles" in url:
+ try:
+ link = re.findall(r"\bhttps?://.*anonfiles\.com\S+", url)[0]
+ except IndexError:
+ return await s.edit("`No Anon Files Link Found.`")
+ try:
+ file_url, file_size, file_name = await dl_client.anon_files_dl(link)
+ except BaseException as e:
+ return await s.edit(f"**Failed To GET Direct Link ::** `{e}`")
+ if file_url is None:
+ return await s.edit("**Failed To GET Direct Link**")
+ file = await download_file(s, file_url, file_name)
+ else:
+ url_ = url.split('|')
+ if len(url_) != 2:
+ return await s.edit("`You Have To Give Me File Name & Url. Please Check Help Menu.`")
+ url = url_[0]
+ file_name = url_[1]
+ try:
+ file = await download_file(s, url_, file_name)
+ except BaseException as e:
+ return await s.edit(f"**Failed To Download ::** `{e}`")
+ file_size = humanbytes(os.stat(file).st_size)
+
+ caption = f"File Downloaded & Uploaded \nFile Name : {file_name} \nFile Size : {file_size}"
+ await upload_file(client, msg, s, file, caption)
+ return os.remove(file)
+
diff --git a/carbon.py b/carbon.py
new file mode 100644
index 0000000..be7e93e
--- /dev/null
+++ b/carbon.py
@@ -0,0 +1,88 @@
+# Copyright (C) 2020-2021 by DevsExpo@Github, < https://github.com/DevsExpo >.
+#
+# This file is part of < https://github.com/DevsExpo/FridayUserBot > project,
+# and is released under the "GNU v3.0 License Agreement".
+# Please see < https://github.com/DevsExpo/blob/master/LICENSE >
+#
+# All rights reserved.
+
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.common.by import By
+from selenium import webdriver
+from selenium.webdriver.chrome.options import Options
+import asyncio
+import logging
+from selenium.common.exceptions import NoSuchElementException
+from main_startup import Config
+import random
+import os
+from main_startup.core.decorators import friday_on_cmd
+from main_startup.core.startup_helpers import run_cmd
+from main_startup.helper_func.basic_helpers import edit_or_reply, get_text, run_in_exc
+
+
+GOOGLE_CHROME_BIN = Config.CHROME_BIN_PATH
+CHROME_DRIVER = Config.CHROME_DRIVER_PATH
+
+@run_in_exc
+def make_carbon(code, driver, lang="auto"):
+ url = f'https://carbon.now.sh/?l={lang}&code={code}'
+ driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')
+ params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': './'}}
+ command_result = driver.execute("send_command", params)
+ driver.get(url)
+ type_ = '//*[@id="__next"]/main/div[2]/div[2]/div[1]/div[1]/div/span[2]'
+ em = "export-menu"
+ png_xpath = '//*[@id="export-png"]'
+ four_x_path = '//*[@id="__next"]/main/div[2]/div[2]/div[1]/div[3]/div[4]/div[3]/div[2]/div[3]/div/button[3]'
+ color_used_xpath = '/html/body/div[1]/main/div[2]/div[2]/div[1]/div[1]/div/span[2]/input'
+ random_int = random.randint(1, 29)
+ value_ = f"downshift-0-item-{str(random_int)}"
+ wait = WebDriverWait(driver, 20)
+ wait.until(EC.visibility_of_element_located((By.XPATH, type_))).click()
+ wait.until(EC.visibility_of_element_located((By.ID, value_))).click()
+ wait.until(EC.visibility_of_element_located((By.ID, em))).click()
+ wait.until(EC.visibility_of_element_located((By.XPATH, four_x_path))).click()
+ wait.until(EC.visibility_of_element_located((By.XPATH, png_xpath))).click()
+ file_ = "./carbon.png"
+ color_used = wait.until(EC.visibility_of_element_located((By.XPATH, color_used_xpath))).get_attribute("value")
+ return file_, color_used
+
+
+@friday_on_cmd(
+ ["carbon", "karb"],
+ cmd_help={
+ "help": "`Carbonize Codes In A Cool Way.`",
+ "example": "{ch}carbon (input or reply_message will be taken)",
+ },
+)
+async def karb(client, message):
+ e_ = await edit_or_reply(message, "`Carbonzing Code...`")
+ code = get_text(message)
+ if not code:
+ if not message.reply_to_message:
+ return await message.edit("`Nothing To Carbonize..`")
+ if not message.reply_to_message.text:
+ return await message.edit("`Nothing To Carbonize...`")
+ code = code or message.reply_to_message.text
+ reply_ = message.reply_to_message or message
+ chrome_options = Options()
+ chrome_options.add_argument("--headless")
+ chrome_options.binary_location = GOOGLE_CHROME_BIN
+ chrome_options.add_argument("--window-size=1920x1080")
+ chrome_options.add_argument("--disable-dev-shm-usage")
+ chrome_options.add_argument("--no-sandbox")
+ chrome_options.add_argument("--disable-gpu")
+ prefs = {'download.default_directory' : './'}
+ chrome_options.add_experimental_option('prefs', prefs)
+ driver = webdriver.Chrome(executable_path=CHROME_DRIVER, options=chrome_options)
+ try:
+ carbon_file, value_ = await make_carbon(code, driver)
+ await asyncio.sleep(5)
+ except BaseException as e:
+ await e_.edit(f"[Selenium] - [Chrome - Driver] - [Carbon] >> {e}")
+ return driver.quit()
+ driver.quit()
+ await reply_.reply_photo(carbon_file, caption=f"Code Carbonized Using Friday \nStyle Used : {value_}")
+ await e_.delete()
diff --git a/cc_tools.py b/cc_tools.py
index 9e4bf13..c6c9745 100644
--- a/cc_tools.py
+++ b/cc_tools.py
@@ -23,18 +23,20 @@
get_text,
get_user,
iter_chats,
+ run_in_exc
)
from main_startup.helper_func.logger_s import LogIt
from plugins import devs_id
from selenium.webdriver.support.ui import WebDriverWait
-from selenium.webdriver.support import expected_conditions
+from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
GOOGLE_CHROME_BIN = Config.CHROME_BIN_PATH
CHROME_DRIVER = Config.CHROME_DRIVER_PATH
-async def namso_gen(bin, no_of_result=15):
+@run_in_exc
+def namso_gen(bin, no_of_result=15):
url = "https://namso-gen.com/"
chrome_options = Options()
chrome_options.add_argument("--headless")
@@ -45,27 +47,19 @@ async def namso_gen(bin, no_of_result=15):
chrome_options.add_argument("--disable-gpu")
driver = webdriver.Chrome(executable_path=CHROME_DRIVER, options=chrome_options)
driver.get(url)
- # Sleep Until Page is Fully Loaded
- await asyncio.sleep(5)
w = WebDriverWait(driver, 20)
bin_xpath = '//*[@id="main"]/div/div/div[3]/div[1]/form/div[1]/label/input'
no_of_r_xpath = '//*[@id="main"]/div/div/div[3]/div[1]/form/div[3]/div[3]/label/input'
button_xpath = '/html/body/div/div/div/main/div/div/div[3]/div[1]/form/div[5]/button'
- w.until(expected_conditions.presence_of_element_located((By.XPATH, bin_xpath)))
- elem = driver.find_element_by_xpath(bin_xpath)
- elem.send_keys(bin)
- await asyncio.sleep(2)
- elem3 = driver.find_element_by_xpath(no_of_r_xpath)
- for i in range(2):
+ w.until(EC.visibility_of_element_located((By.XPATH, bin_xpath))).send_keys(bin)
+ elem3 = w.until(EC.visibility_of_element_located((By.XPATH, no_of_r_xpath)))
+ for _ in range(2):
elem3.send_keys(Keys.BACKSPACE)
- await asyncio.sleep(1)
- elem3 = driver.find_element_by_xpath(no_of_r_xpath)
+ elem3 = w.until(EC.visibility_of_element_located((By.XPATH, no_of_r_xpath)))
elem3.send_keys(no_of_result)
- await asyncio.sleep(2)
- driver.find_element_by_xpath(button_xpath).click()
- await asyncio.sleep(2)
- s = driver.find_elements_by_xpath('//*[@id="result"]')[0].get_attribute("value")
- driver.close()
+ w.until(EC.visibility_of_element_located((By.XPATH, button_xpath))).click()
+ s = w.until(EC.visibility_of_element_located((By.XPATH, '//*[@id="result"]'))).get_attribute("value")
+ driver.quit()
return s
@friday_on_cmd(
@@ -89,7 +83,7 @@ async def ns_gen(client, message):
bin = input
s = await namso_gen(bin, no_of_results)
if not s:
- return msg.edit("`Invalid Bin Or Input Given More Than 25`")
+ return await msg.edit("`Invalid Bin Given Or Results Limit Reached.`")
t = f"""
**Bin :** `{bin}`
@@ -101,9 +95,16 @@ async def ns_gen(client, message):
**Powered By FridayUb**
"""
await msg.edit(t, parse_mode="md")
-
-
-
+
+def stark_finder(to_find, from_find):
+ return bool(
+ re.search(
+ f"( |^|[^\\w]){re.escape(to_find)}( |$|[^\\w])",
+ from_find,
+ flags=re.IGNORECASE,
+ )
+ )
+
my_code = {
400: "『! Invalid Key !』",
200: "『 Valid Key 』",
@@ -136,57 +137,6 @@ async def check_stripe_key(key_: str):
else:
return 200
-def stark_finder(to_find, from_find):
- if re.search(r"( |^|[^\w])" + re.escape(to_find) + r"( |$|[^\w])", from_find, flags=re.IGNORECASE):
- return True
- return False
-
-
-async def cc_(cc):
- url = "https://starkapis.herokuapp.com/ccn/"
- data_ = {
- "cc": cc
- }
- async with aiohttp.ClientSession() as session:
- async with session.get(url, json=data_) as resp:
- response_ = await resp.json()
- check_response = f"『 ✮ {response_['msg']} ✮ 』"
- time_taken = response_['time_taken']
- cc = response_['cc']
- approved = response_['approved']
- mes = response_['exp_month']
- yes = response_['exp_year']
- cvc = response_['cvc']
- final_t = f"""
-Result
-
-CC Number : {cc}
-Approved : {approved}
-CVC : {cvc}
-Expiry Month : {mes}
-Expiry Year : {yes}
-Response : {check_response}
-Time Taken: {time_taken}
-
-Checked Using FridayUB
-"""
- return final_t
-
-@friday_on_cmd(
- ["ccn"],
- cmd_help={
- "help": "Check CC - CCN Based.",
- "example": "{ch}ccn 5224252466461650|11|2022|858",
- },
-)
-async def cc_check(client, message):
- msg = await edit_or_reply(message, "`Please Wait`")
- cc = get_text(message)
- if not cc:
- return await msg.edit("`Give Me A CC Check.`")
- r = await cc_(cc)
- await msg.edit(r)
-
@friday_on_cmd(
["sk"],
cmd_help={
diff --git a/collage.py b/collage.py
index d2cc34d..9ef620b 100644
--- a/collage.py
+++ b/collage.py
@@ -21,7 +21,7 @@
async def create_s_collage(file_path, filename, width, stark_h):
"""Create Image Collage"""
- img_stark = [filepath for filepath in pathlib.Path(file_path).glob("**/*")]
+ img_stark = list(pathlib.Path(file_path).glob("**/*"))
margin_size = 2
while True:
img_stark_list = list(img_stark)
@@ -45,10 +45,12 @@ async def create_s_collage(file_path, filename, width, stark_h):
stark_h -= 10
else:
break
- out_lol_h = 0
- for meisnub, sedlife in ujwal_liness:
- if sedlife:
- out_lol_h += int(stark_h / meisnub) + margin_size
+ out_lol_h = sum(
+ int(stark_h / meisnub) + margin_size
+ for meisnub, sedlife in ujwal_liness
+ if sedlife
+ )
+
if not out_lol_h:
return None
final_image = Image.new('RGB', (width, int(out_lol_h)), (35, 35, 35))
@@ -100,19 +102,21 @@ async def wow_collage(client, message):
limit = final_input[0]
width = int(final_input[1])
stark_h = int(final_input[2])
- if not limit.isdigit():
- return await owo.edit("`Limit Should Be Digits.`")
- limit_ = int(limit)
+ try:
+ limit_ = int(limit)
+ except ValueError:
+ return await owo.edit("`Limit Should Be In Digits.`")
file_path = "./to_collage/"
if os.path.exists(file_path):
shutil.rmtree(file_path)
os.mkdir(file_path)
- async for msg in client.search_messages(chat, filter="photo", limit=limit_):
- img_ += 1
- try:
- await msg.download(file_path)
- except Exception as e:
- logging.info(e)
+ async for msg in client.iter_history(chat, limit=limit_):
+ if msg.photo:
+ img_ += 1
+ try:
+ await msg.download(file_path)
+ except Exception as e:
+ logging.error(e)
if img_ == 0:
await owo.edit("`No Images Found.`")
shutil.rmtree(file_path)
diff --git a/fban.py b/fban.py
index bd9cb95..5ffbc48 100644
--- a/fban.py
+++ b/fban.py
@@ -123,6 +123,47 @@ async def fban_s(client, message):
good_f_msg = f"**FBANNED** \n**Affected Feds :** `{len(fed_s) - failed_n}` \n**Failed :** `{failed_n}` \n**Total Fed :** `{len(fed_s)}`"
await uj.edit(good_f_msg)
+@friday_on_cmd(
+ ["unfban", "unfedban"],
+ is_official=False,
+ cmd_help={
+ "help": "Un-Fban a user!",
+ "example": "{ch}unfban (enter username or id)",
+ },
+)
+async def un_fban_s(client, message):
+ uj = await edit_or_reply(message, "`Fbanning!`")
+ failed_n = 0
+ ur = get_text(message)
+ if not ur:
+ await uj.edit("`Who Should I Un-Fban? You?`")
+ return
+ if not Config.FBAN_GROUP:
+ await uj.edit("`Please Setup Fban Group!`")
+ return
+ fed_s = await get_all_feds()
+ if len(fed_s) == 0:
+ await uj.edit("`You Need Atleast One Fed In Db To Use This Plugin!`")
+ return
+ await uj.edit(f"`Un-Fbanning In {len(fed_s)} Feds!`")
+ try:
+ await client.send_message(Config.FBAN_GROUP, "/start")
+ except BaseException:
+ await uj.edit(f"`Unable To Send Message To Fban Group! \nTraceBack : {e}`")
+ return
+ for i in fed_s:
+ await asyncio.sleep(2)
+ try:
+ await client.send_message(Config.FBAN_GROUP, f"/joinfed {i['fed_s']}")
+ await client.send_message(Config.FBAN_GROUP, f"/unfban {ur}")
+ except FloodWait as e:
+ await asyncio.sleep(e.x)
+ except BaseException as eb:
+ logging.error(eb)
+ failed_n += 1
+ good_f_msg = f"**UN-FBANNED** \n**Affected Feds :** `{len(fed_s) - failed_n}` \n**Failed :** `{failed_n}` \n**Total Fed :** `{len(fed_s)}`"
+ await uj.edit(good_f_msg)
+
async def fetch_all_fed(client, message):
fed_list = []
@@ -138,32 +179,32 @@ async def fetch_all_fed(client, message):
pass
await asyncio.sleep(7)
sed = (await client.get_history("@MissRose_bot", 1))[0]
- if sed.media:
- fed_file = await sed.download()
- file = open(fed_file, "r")
- lines = file.readlines()
- for line in lines:
- try:
- fed_list.append(line[:36])
- except BaseException:
- pass
- os.remove(fed_file)
- else:
+ if not sed.media:
return None
+ fed_file = await sed.download()
+ file = open(fed_file, "r")
+ lines = file.readlines()
+ for line in lines:
+ try:
+ fed_list.append(line[:36])
+ except BaseException:
+ pass
+ os.remove(fed_file)
else:
X = ok.text
lol = X.splitlines()
if "you are the owner" in X.lower():
for lo in lol:
- if "you are the owner" not in lo.lower():
- if "you are admin" not in lo.lower():
- if lo[:36] != "":
- if not lo.startswith("-"):
- fed_list.append(lo[:36])
- else:
- fed_list.append(lo[2:38])
+ if (
+ "you are the owner" not in lo.lower()
+ and "you are admin" not in lo.lower()
+ and lo[:36] != ""
+ ):
+ if not lo.startswith("-"):
+ fed_list.append(lo[:36])
+ else:
+ fed_list.append(lo[2:38])
else:
Y = X[44:].splitlines()
- for lol in Y:
- fed_list.append(lol[2:38])
+ fed_list.extend(lol[2:38] for lol in Y)
return fed_list
diff --git a/github_search.py b/github_search.py
new file mode 100644
index 0000000..2cc4953
--- /dev/null
+++ b/github_search.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2020-2021 by DevsExpo@Github, < https://github.com/DevsExpo >.
+#
+# This file is part of < https://github.com/DevsExpo/FridayUserBot > project,
+# and is released under the "GNU v3.0 License Agreement".
+# Please see < https://github.com/DevsExpo/blob/master/LICENSE >
+#
+# All rights reserved.
+
+
+import logging
+import os
+import requests
+from main_startup.core.decorators import friday_on_cmd
+from main_startup.helper_func.basic_helpers import edit_or_reply, get_text
+
+
+@friday_on_cmd(
+ ["git"],
+ cmd_help={
+ "help": "Search In GitHub",
+ "example": "{ch}git ",
+ },
+)
+async def git(client, message):
+ engine = message.Engine
+ pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
+ args = get_text(message)
+ if not args:
+ await pablo.edit(engine.get_string("INPUT_REQ").format("Search Text"))
+ return
+ r = requests.get("https://api.github.com/search/repositories", params={"q": args})
+ lool = r.json()
+ if lool.get("total_count") == 0:
+ await pablo.edit(engine.get_string("F_404"))
+ return
+ else:
+ lol = lool.get("items")
+ qw = lol[0]
+ txt = f"""
+Name : {qw.get("name")}
+Full Name : {qw.get("full_name")}
+Link : {qw.get("html_url")}
+Fork Count : {qw.get("forks_count")}
+Open Issues : {qw.get("open_issues")}
+"""
+ if qw.get("description"):
+ txt += f'Description : {qw.get("description")}'
+ if qw.get("language"):
+ txt += f'Language : {qw.get("language")}'
+ if qw.get("size"):
+ txt += f'Size : {qw.get("size")}'
+ if qw.get("score"):
+ txt += f'Score : {qw.get("score")}'
+ if qw.get("created_at"):
+ txt += f'Created At : {qw.get("created_at")}'
+ if qw.get("archived") == True:
+ txt += "This Project is Archived"
+ await pablo.edit(txt, disable_web_page_preview=True)
diff --git a/harem.py b/harem.py
index 5ae8683..c7c574d 100644
--- a/harem.py
+++ b/harem.py
@@ -17,6 +17,7 @@
get_text,
get_user,
iter_chats,
+ run_in_exc
)
from main_startup.helper_func.logger_s import LogIt
from plugins import devs_id
@@ -86,10 +87,7 @@ async def remove_nsfw(client, message):
async def is_harem_enabled(f, client, message):
if Config.ENABLE_WAIFU_FOR_ALL_CHATS:
return bool(True)
- if await is_chat_in_db(int(message.chat.id)):
- return bool(True)
- else:
- return bool(False)
+ return bool(True) if await is_chat_in_db(int(message.chat.id)) else bool(False)
async def harem_event(f, client, message):
if not message:
@@ -102,13 +100,8 @@ async def harem_event(f, client, message):
return bool(True)
return bool(False)
-
-harem_event = filters.create(func=harem_event, name="harem_event")
-is_harem_enabled = filters.create(func=is_harem_enabled, name="is_harem_enabled")
-
-@listen(filters.user([int(792028928)]) & ~filters.edited & is_harem_enabled & harem_event & filters.group)
-async def harem_catcher(client, message):
- img = await message.download()
+@run_in_exc
+def get_data(img):
searchUrl = "https://www.google.com/searchbyimage/upload"
file_img = {"encoded_image": (img, open(img, "rb")), "image_content": ""}
response = requests.post(searchUrl, files=file_img, allow_redirects=False)
@@ -116,8 +109,16 @@ async def harem_catcher(client, message):
os.remove(img)
if response.status_code == 400:
return logging.info("(Waifu Catch Failed) - [Invalid Response]")
- fetchUrl = response.headers["Location"]
- match = await ParseSauce(fetchUrl + "&preferences?hl=en&fg=1#languages")
+ return response.headers["Location"]
+
+harem_event = filters.create(func=harem_event, name="harem_event")
+is_harem_enabled = filters.create(func=is_harem_enabled, name="is_harem_enabled")
+
+@listen(filters.user([792028928]) & ~filters.edited & is_harem_enabled & harem_event & filters.group)
+async def harem_catcher(client, message):
+ img = await message.download()
+ fetchUrl = await get_data(img)
+ match = await ParseSauce(f'{fetchUrl}&preferences?hl=en&fg=1#languages')
guessp = match["best_guess"]
if not guessp:
return logging.info("(Waifu Catch Failed.) \nERROR : 404: Waifu Not Found.")
diff --git a/helper_files/dl_.py b/helper_files/dl_.py
new file mode 100644
index 0000000..75c3276
--- /dev/null
+++ b/helper_files/dl_.py
@@ -0,0 +1,103 @@
+# Copyright (C) 2020-2021 by DevsExpo@Github, < https://github.com/DevsExpo >.
+#
+# This file is part of < https://github.com/DevsExpo/FridayUserBot > project,
+# and is released under the "GNU v3.0 License Agreement".
+# Please see < https://github.com/DevsExpo/blob/master/LICENSE >
+#
+# All rights reserved.
+
+import re
+import aiohttp
+import base64
+from fake_useragent import UserAgent
+import requests
+from bs4 import BeautifulSoup
+from lxml import etree
+from main_startup.helper_func.basic_helpers import run_in_exc
+from xtraplugins.helper_files.dl_helpers import api_request, find_between, base64_url_decode, decrypt_attr, base64_to_a32, parse_url
+
+class AnyDL:
+ def __init__(self):
+ self.dl_path = "./main_startup/downloads"
+
+ @run_in_exc
+ def gdrive(self, url):
+ drive = 'https://drive.google.com'
+ file_id = ''
+ if url.find('view') != -1:
+ file_id = url.split('/')[-2]
+ elif url.find('open?id=') != -1:
+ file_id = url.split('open?id=')[1].strip()
+ elif url.find('uc?id=') != -1:
+ file_id = url.split('uc?id=')[1].strip()
+ url = f'{drive}/uc?export=download&id={file_id}'
+ download = requests.get(url, stream=True, allow_redirects=False)
+ cookies = download.cookies
+ dl_url = download.headers.get("location") if download.headers else None
+ if not dl_url:
+ page = BeautifulSoup(download.content, 'lxml')
+ export = drive + page.find('a', {'id': 'uc-download-url'}).get('href')
+ name = page.find('span', {'class': 'uc-name-size'}).text
+ response = requests.get(export, stream=True, allow_redirects=False, cookies=cookies)
+ dl_url = response.headers['location']
+ if 'accounts.google.com' in dl_url:
+ return None
+ return dl_url, name
+
+ async def mega_dl(self, url):
+ path = parse_url(url).split('!')
+ if path is None:
+ return None, None, None
+ file_handle = path[0]
+ file_key = path[1]
+ file_key = base64_to_a32(file_key)
+ file_data = await api_request({
+ 'a': 'g',
+ 'g': 1,
+ 'p': file_handle
+ })
+ k = (file_key[0] ^ file_key[4], file_key[1] ^ file_key[5],
+ file_key[2] ^ file_key[6], file_key[3] ^ file_key[7])
+ if 'g' not in file_data:
+ return None, None, None
+ file_url = file_data['g']
+ file_size = file_data['s']
+ attribs = base64_url_decode(file_data['at'])
+ attribs = decrypt_attr(attribs, k)
+ file_name = attribs['n']
+ return file_url, file_name, file_size
+
+ async def media_fire_dl(self, media_fire_url):
+ ua = UserAgent()
+ user_agent = ua.random
+ headers = {"User-Agent": user_agent}
+ async with aiohttp.ClientSession(headers=headers) as session:
+ async with session.get(media_fire_url) as resp:
+ if resp.status != 200:
+ return None
+ b_ = BeautifulSoup(await resp.read(), 'html.parser')
+ dom = etree.HTML(str(b_))
+ file_url = dom.xpath('//*[@id="downloadButton"]')[0].get("href")
+ file_name = dom.xpath('/html/body/div[1]/div[1]/div[2]/div/div[2]/div[1]/div[1]')[0].text
+ file_size = dom.xpath('/html/body/div[1]/div[1]/div[2]/div/div[2]/ul/li[1]/span')[0].text
+ file_uploaded_date = dom.xpath('/html/body/div[1]/div[1]/div[2]/div/div[2]/ul/li[2]/span')[0].text
+ caption_ = dom.xpath('/html/body/div[1]/div[1]/div[6]/div[1]/div[1]/div[3]/p')[0].text
+ scan_result = dom.xpath("/html/body/div[1]/div[1]/div[6]/div[1]/div[2]/div/div[2]/p/span")[0].text
+ return file_url, file_name, file_size, file_uploaded_date, caption_, scan_result
+
+ async def anon_files_dl(self, anon_url):
+ ua = UserAgent()
+ user_agent = ua.random
+ headers = {"User-Agent": user_agent}
+ async with aiohttp.ClientSession(headers=headers) as session:
+ async with session.get(anon_url) as resp:
+ if resp.status != 200:
+ return None
+ b_ = BeautifulSoup(await resp.read(), 'lxml')
+ file_url = b_.find("a", {"id": "download-url"}).get("href")
+ file_name = b_.find("h1", {"class": "text-center text-wordwrap"}).text
+ file_size = b_.find("a", {"id": "download-url"}).text
+ file_size = find_between(r"\(", r"\)", file_size)
+ return file_url, file_size, file_name
+
+
diff --git a/helper_files/dl_helpers.py b/helper_files/dl_helpers.py
new file mode 100644
index 0000000..d7ffc99
--- /dev/null
+++ b/helper_files/dl_helpers.py
@@ -0,0 +1,99 @@
+"""
+Generates Direct Link From Mega Public Url
+Copied From mega.py And Modified To Fit My Purpose.
+"""
+
+import json
+import re
+import json
+import base64
+import struct
+from Crypto.Cipher import AES
+import aiohttp
+import asyncio
+import random
+import codecs
+
+def aes_cbc_decrypt(data, key):
+ aes_cipher = AES.new(key, AES.MODE_CBC, codecs.latin_1_encode('\0' * 16)[0])
+ return aes_cipher.decrypt(data)
+
+def decrypt_attr(attr, key):
+ attr = aes_cbc_decrypt(attr, a32_to_str(key))
+ attr = codecs.latin_1_decode(attr)[0]
+ attr = attr.rstrip('\0')
+ return json.loads(attr[4:]) if attr[:6] == 'MEGA{"' else False
+
+def a32_to_str(a):
+ return struct.pack('>%dI' % len(a), *a)
+
+def str_to_a32(b):
+ if isinstance(b, str):
+ b = codecs.latin_1_encode(b)[0]
+ if len(b) % 4:
+ b += b'\0' * (4 - len(b) % 4)
+ return struct.unpack('>%dI' % (len(b) / 4), b)
+
+def base64_url_decode(data):
+ data += '=='[(2 - len(data) * 3) % 4:]
+ for search, replace in (('-', '+'), ('_', '/'), (',', '')):
+ data = data.replace(search, replace)
+ return base64.b64decode(data)
+
+def base64_to_a32(s):
+ return str_to_a32(base64_url_decode(s))
+
+def parse_url(url):
+ if '/file/' in url:
+ url = url.replace(' ', '')
+ file_id = re.findall(r'\W\w\w\w\w\w\w\w\w\W', url)[0][1:-1]
+ id_index = re.search(file_id, url).end()
+ key = url[id_index + 1:]
+ return f'{file_id}!{key}'
+ elif '!' in url:
+ match = re.findall(r'/#!(.*)', url)
+ return match[0]
+ else:
+ return None
+
+async def download_file(url):
+ path = parse_url(url).split('!')
+ if path is None:
+ return None, None, None
+ file_handle = path[0]
+ file_key = path[1]
+ file_key = base64_to_a32(file_key)
+ file_data = await api_request({
+ 'a': 'g',
+ 'g': 1,
+ 'p': file_handle
+ })
+ k = (file_key[0] ^ file_key[4], file_key[1] ^ file_key[5],
+ file_key[2] ^ file_key[6], file_key[3] ^ file_key[7])
+ if 'g' not in file_data:
+ return None, None, None
+ file_url = file_data['g']
+ file_size = file_data['s']
+ attribs = base64_url_decode(file_data['at'])
+ attribs = decrypt_attr(attribs, k)
+ file_name = attribs['n']
+ return file_name,file_size, file_url
+
+async def api_request(data):
+ sequence_num = random.randint(0, 0xFFFFFFFF)
+ if not isinstance(data, list):
+ data = [data]
+ url = 'https://g.api.mega.co.nz/cs'
+ params = {'id': sequence_num}
+ async with aiohttp.ClientSession() as session:
+ response = await session.post(url, data=json.dumps(data), params=params)
+ json_resp = await response.json()
+ return json_resp[0]
+
+def find_between(start_string, end_string, to_find):
+ _to_ = f"{start_string}(.*?){end_string}"
+ result = re.search(_to_, to_find)
+ if not result:
+ return None
+ return result.group(1)
+
diff --git a/imdb.py b/imdb.py
index 425e3c4..49af634 100644
--- a/imdb.py
+++ b/imdb.py
@@ -8,10 +8,16 @@
from main_startup.core.decorators import friday_on_cmd
from main_startup.helper_func.basic_helpers import edit_or_reply, get_text
-import requests
-import bs4
+import aiohttp
+from bs4 import BeautifulSoup
+import json
import re
+async def get_content(url):
+ async with aiohttp.ClientSession() as session:
+ r = await session.get(url)
+ return await r.read()
+
@friday_on_cmd(
["imdb"],
cmd_help={
@@ -19,103 +25,59 @@
"example": "{ch}imdb joker",
}
)
-
async def _(client,message):
- msgg = get_text(message)
- sedlife = await edit_or_reply(message, "```Searching For Movie..```")
- if not msgg:
- await sedlife.edit("`Dumb Give Me Inpit`")
+ query = get_text(message)
+ msg = await edit_or_reply(message, "`Searching For Movie..`")
+ reply = message.reply_to_message or message
+ if not query:
+ await msg.edit("`Please Give Me An Input.`")
return
- try:
- movie_name = msgg
- final_name = "+".join(movie_name)
- page = requests.get(
- "https://www.imdb.com/find?ref_=nv_sr_fn&q=" + final_name + "&s=all"
- )
- str(page.status_code)
- soup = bs4.BeautifulSoup(page.content, "lxml")
- odds = soup.findAll("tr", "odd")
- mov_title = odds[0].findNext("td").findNext("td").text
- mov_link = (
- "http://www.imdb.com/" + odds[0].findNext("td").findNext("td").a["href"]
- )
- page1 = requests.get(mov_link)
- soup = bs4.BeautifulSoup(page1.content, "lxml")
- if soup.find("div", "poster"):
- poster = soup.find("div", "poster").img["src"]
- else:
- poster = ""
- if soup.find("div", "title_wrapper"):
- pg = soup.find("div", "title_wrapper").findNext("div").text
- mov_details = re.sub(r"\s+", " ", pg)
- else:
- mov_details = ""
- credits = soup.findAll("div", "credit_summary_item")
- if len(credits) == 1:
- director = credits[0].a.text
- writer = "Not available"
- stars = "Not available"
- elif len(credits) > 2:
- director = credits[0].a.text
- writer = credits[1].a.text
- actors = []
- for x in credits[2].findAll("a"):
- actors.append(x.text)
- actors.pop()
- stars = actors[0] + "," + actors[1] + "," + actors[2]
- else:
- director = credits[0].a.text
- writer = "Not available"
- actors = []
- for x in credits[1].findAll("a"):
- actors.append(x.text)
- actors.pop()
- stars = actors[0] + "," + actors[1] + "," + actors[2]
- if soup.find("div", "inline canwrap"):
- story_line = soup.find("div", "inline canwrap").findAll("p")[0].text
- else:
- story_line = "Not available"
- info = soup.findAll("div", "txt-block")
- if info:
- mov_country = []
- mov_language = []
- for node in info:
- a = node.findAll("a")
- for i in a:
- if "country_of_origin" in i["href"]:
- mov_country.append(i.text)
- elif "primary_language" in i["href"]:
- mov_language.append(i.text)
- if soup.findAll("div", "ratingValue"):
- for r in soup.findAll("div", "ratingValue"):
- mov_rating = r.strong["title"]
- else:
- mov_rating = "Not available"
- await sedlife.edit(
- ""
- "Title : "
- + mov_title
- + "\n"
- + mov_details
- + "\nRating : "
- + mov_rating
- + "\nCountry : "
- + mov_country[0]
- + "\nLanguage : "
- + mov_language[0]
- + "\nDirector : "
- + director
- + "\nWriter : "
- + writer
- + "\nStars : "
- + stars
- + "\nIMDB Url : "
- + mov_link
- + "\nStory Line : "
- + story_line,
- parse_mode="HTML"
- )
- except IndexError:
- await sedlife.edit("Ploxxx enter **Valid movie name** kthx")
-
-
+ url = f"https://www.imdb.com/find?ref_=nv_sr_fn&q={query}&s=all"
+ r = await get_content(url)
+ soup = BeautifulSoup(r, "lxml")
+ o_ = soup.find("td", {"class": "result_text"})
+ if not o_:
+ return await msg.edit("`No Results Found, Matching Your Query.`")
+ url = "https://www.imdb.com" + o_.find('a').get('href')
+ resp = await get_content(url)
+ b = BeautifulSoup(resp, "lxml")
+ r_json = json.loads(b.find("script", attrs={"type": "application/ld+json"}).contents[0])
+ res_str = "IMDB SEARCH RESULT"
+ if r_json.get("@type"):
+ res_str += f"\nType : {r_json['@type']} \n"
+ if r_json.get("name"):
+ res_str += f"Name : {r_json['name']} \n"
+ if r_json.get("contentRating"):
+ res_str += f"Content Rating : {r_json['contentRating']} \n"
+ if r_json.get("genre"):
+ all_genre = r_json['genre']
+ genre = "".join(f"{i}, " for i in all_genre)
+ genre = genre[:-2]
+ res_str += f"Genre : {genre} \n"
+ if r_json.get("actor"):
+ all_actors = r_json['actor']
+ actors = "".join(f"{i['name']}, " for i in all_actors)
+ actors = actors[:-2]
+ res_str += f"Actors : {actors} \n"
+ if r_json.get("trailer"):
+ trailer_url = "https://imdb.com" + r_json['trailer']['embedUrl']
+ res_str += f"Trailer : {trailer_url} \n"
+ if r_json.get("description"):
+ res_str += f"Description : {r_json['description']} \n"
+ if r_json.get("keywords"):
+ keywords = r_json['keywords'].split(",")
+ key_ = ""
+ for i in keywords:
+ i = i.replace(" ", "_")
+ key_ += f"#{i}, "
+ key_ = key_[:-2]
+ res_str += f"Keywords / Tags : {key_} \n"
+ if r_json.get("datePublished"):
+ res_str += f"Date Published : {r_json['datePublished']} \n"
+ if r_json.get("aggregateRating"):
+ res_str += f"Rating Count : {r_json['aggregateRating']['ratingCount']} \nRating Value : {r_json['aggregateRating']['ratingValue']} \n"
+ res_str += f"URL : {url}"
+ if thumb := r_json.get('image'):
+ await msg.delete()
+ return await reply.reply_photo(thumb, caption=res_str)
+ await msg.edit(res_str)
diff --git a/music_player.py b/music_player.py
index 5cbd8c7..46d24f0 100644
--- a/music_player.py
+++ b/music_player.py
@@ -7,13 +7,27 @@
# All rights reserved.
import os
+import math
+import os
+import shlex
+import time
+from math import ceil
import logging
import ffmpeg
from main_startup import Friday
+import functools
+import threading
+from concurrent.futures import ThreadPoolExecutor
+from pyrogram.errors import FloodWait, MessageNotModified
+import multiprocessing
import time
+import calendar
from main_startup.core.decorators import friday_on_cmd
-from main_startup.helper_func.basic_helpers import edit_or_reply, get_text
-from pytgcalls import GroupCall
+from main_startup.helper_func.basic_helpers import edit_or_reply, get_text, humanbytes, time_formatter, run_in_exc
+from pytgcalls import GroupCallFactory, GroupCallFileAction
+import signal
+import random
+import string
import asyncio
import os
import time
@@ -22,10 +36,8 @@
from youtube_dl import YoutubeDL
from youtubesearchpython import SearchVideos
-s = []
s_dict = {}
-group_call = GroupCall(None, play_on_repeat=False)
-
+GPC = {}
@friday_on_cmd(
["playlist"],
@@ -33,43 +45,59 @@
cmd_help={"help": "Get Current Chat Playlist!", "example": "{ch}playlist"},
)
async def pl(client, message):
- group_call.client = client
+ group_call = GPC.get((message.chat.id, client.me.id))
play = await edit_or_reply(message, "`Please Wait!`")
song = f"**PlayList in {message.chat.title}** \n"
- sno = 0
+ s = s_dict.get((message.chat.id, client.me.id))
+ if not group_call:
+ return await play.edit("`Voice Chat Not Connected. So How Am i Supposed To Give You Playlist?`")
if not s:
if group_call.is_connected:
- await play.edit(f"**Currently Playing :** `{str(group_call.input_filename).replace('.raw', '')}`")
+ return await play.edit(f"**Currently Playing :** `{group_call.song_name}`")
else:
- await play.edit("`Playlist is Empty Sar And Nothing is Playing Also :(!`")
- return
+ return await play.edit("`Voice Chat Not Connected. So How Am i Supposed To Give You Playlist?`")
if group_call.is_connected:
- song += f"**Currently Playing :** `{str(group_call.input_filename).replace('.raw', '')}` \n\n"
- for i in s:
- sno += 1
- song += f"**{sno} ▶** `{i.replace('.raw', '')} | {s_dict[i]['singer']} | {s_dict[i]['dur']}` \n\n"
+ song += f"**Currently Playing :** `{group_call.song_name}` \n\n"
+ for sno, i in enumerate(s, start=1):
+ song += f"**{sno} ▶** [{i['song_name']}]({i['url']}) `| {i['singer']} | {i['dur']}` \n\n"
await play.edit(song)
-
-@group_call.on_playout_ended
+
+async def get_chat_(client, chat_):
+ chat_ = str(chat_)
+ if chat_.startswith("-100"):
+ try:
+ return (await client.get_chat(int(chat_))).id
+ except ValueError:
+ chat_ = chat_.split("-100")[1]
+ chat_ = f'-{str(chat_)}'
+ return int(chat_)
+
async def playout_ended_handler(group_call, filename):
- global s
client_ = group_call.client
+ chat_ = await get_chat_(client_, f"-100{group_call.full_chat.id}")
+ chat_ = int(chat_)
+ s = s_dict.get((chat_, client_.me.id))
if os.path.exists(group_call.input_filename):
os.remove(group_call.input_filename)
if not s:
- await client_.send_message(
- int(f"-100{group_call.full_chat.id}"),
- f"`Finished Playing. Nothing Left Play! Left VC.`",
- )
await group_call.stop()
+ del GPC[(message.chat.id, client.me.id)]
return
+ name_ = s[0]['song_name']
+ singer_ = s[0]['singer']
+ dur_ = s[0]['dur']
+ raw_file = s[0]['raw']
+ link = s[0]['url']
+ file_size = humanbytes(os.stat(raw_file).st_size)
+ song_info = f'🎼 Now Playing 🎼 \n🎵 Song : {name_} \n🎸 Singer : {singer_} \n⏲️ Duration : {dur_} \n📂 Size : {file_size}'
await client_.send_message(
- int(f"-100{group_call.full_chat.id}"), f"**Now Playing :** `{str(s[0]).replace('.raw', '')} | {s_dict[s[0]]['singer']} | {s_dict[s[0]]['dur']}` \n\n"
+ chat_,
+ song_info,
+ disable_web_page_preview=True,
)
- holi = s[0]
s.pop(0)
- logging.info("Now Playing " + str(holi).replace(".raw", ""))
- group_call.input_filename = holi
+ logging.debug(song_info)
+ group_call.input_filename = raw_file
@friday_on_cmd(
["skip_vc"],
@@ -77,133 +105,235 @@ async def playout_ended_handler(group_call, filename):
cmd_help={"help": "Skip Song in Playlist.", "example": "{ch}skip_vc (key_len)"}
)
async def ski_p(client, message):
- group_call.client = client
- if not group_call.is_connected:
- await m_.edit("`Is Group Call Even Connected?`")
- return
m_ = await edit_or_reply(message, "`Please Wait!`")
no_t_s = get_text(message)
+ group_call = GPC.get((message.chat.id, client.me.id))
+ s = s_dict.get((message.chat.id, client.me.id))
+ if not group_call:
+ await m_.edit("`Is Group Call Even Connected?`")
+ return
+ if not group_call.is_connected:
+ await m_.edit("`Is Group Call Even Connected?`")
+ return
if not no_t_s:
return await m_.edit("`Give Me Valid List Key Len.`")
if no_t_s == "current":
if not s:
return await m_.edit("`No Song in List. So Stopping Song is A Smarter Way.`")
- next_s = s[0]
+ next_s = s[0]['raw']
s.pop(0)
- name = str(next_s).replace(".raw", "")
- prev = group_call.input_filename
+ name = str(s[0]['song_name'])
+ prev = group_call.song_name
group_call.input_filename = next_s
- return await m_.edit(f"`Skipped {prev}. Now Playing {name}!`")
+ return await m_.edit(f"`Skipped {prev}. Now Playing {name}!`")
else:
if not s:
- return await m_.edit("`There is No Playlist.`")
+ return await m_.edit("`There is No Playlist!`")
if not no_t_s.isdigit():
return await m_.edit("`Input Should Be In Digits.`")
no_t_s = int(no_t_s)
- if int(no_t_s) == 0:
+ if no_t_s == 0:
return await m_.edit("`0? What?`")
no_t_s = int(no_t_s - 1)
try:
- s_ = s[no_t_s]
+ s_ = s[no_t_s]['song_name']
s.pop(no_t_s)
except:
return await m_.edit("`Invalid Key.`")
return await m_.edit(f"`Skipped : {s_} At Position #{no_t_s}`")
-
-
+
+
@friday_on_cmd(
["play_vc"],
is_official=False,
cmd_help={"help": "Play The Song In VC Directly From Youtube Or Telegram!", "example": "{ch}play_vc (song query)"},
)
async def play_m(client, message):
- global s
- global s_dict
- group_call.client = client
+ group_call = GPC.get((message.chat.id, client.me.id))
u_s = await edit_or_reply(message, "`Processing..`")
- if message.reply_to_message:
- if message.reply_to_message.audio:
- await u_s.edit_text("`Please Wait, Let Me Download This File!`")
- audio = message.reply_to_message.audio
- audio_original = await message.reply_to_message.download()
- vid_title = audio.title or audio.file_name
- uploade_r = message.reply_to_message.audio.performer or "Unknown Artist."
- dura_ = message.reply_to_message.audio.duration
- dur = datetime.timedelta(seconds=dura_)
- raw_file_name = f"{audio.file_name}.raw" if audio.file_name else f"{audio.title}.raw"
- else:
- return await u_s.edit("`Reply To A File To PLay It.`")
+ if input_str := get_text(message):
+ search = SearchVideos(str(input_str), offset=1, mode="dict", max_results=1)
+ rt = search.result()
+ result_s = rt.get("search_result")
+ if not result_s:
+ return await u_s.edit(f"`No Song Found Matching With Query - {input_str}, Please Try Giving Some Other Name.`")
+ url = result_s[0]["link"]
+ dur = result_s[0]["duration"]
+ vid_title = result_s[0]["title"]
+ yt_id = result_s[0]["id"]
+ uploade_r = result_s[0]["channel"]
+ start = time.time()
+ try:
+ audio_original = await yt_dl(url, client, message, start)
+ except BaseException as e:
+ return await u_s.edit(f"**Failed To Download** \n**Error :** `{str(e)}`")
+ raw_file_name = (
+ ''.join([random.choice(string.ascii_lowercase) for _ in range(5)])
+ + ".raw"
+ )
+
else:
- input_str = get_text(message)
- if not input_str:
- return await u_s.edit("`Give Me A Song Name. Like Why we lose or Alone.`")
- search = SearchVideos(str(input_str), offset=1, mode="dict", max_results=1)
- rt = search.result()
- try:
- result_s = rt["search_result"]
- except:
- return await u_s.edit(f"`Song Not Found With Name {input_str}, Please Try Giving Some Other Name.`")
- url = result_s[0]["link"]
- dur = result_s[0]["duration"]
- vid_title = result_s[0]["title"]
- yt_id = result_s[0]["id"]
- uploade_r = result_s[0]["channel"]
- opts = {
+ if not message.reply_to_message:
+ return await u_s.edit_text("`Reply To A File To PLay It.`")
+ if not message.reply_to_message.audio:
+ return await u_s.edit("`Reply To A File To PLay It.`")
+ await u_s.edit_text("`Please Wait, Let Me Download This File!`")
+ audio = message.reply_to_message.audio
+ audio_original = await message.reply_to_message.download()
+ vid_title = audio.title or audio.file_name
+ uploade_r = message.reply_to_message.audio.performer or "Unknown Artist."
+ dura_ = message.reply_to_message.audio.duration
+ dur = datetime.timedelta(seconds=dura_)
+ raw_file_name = (
+ ''.join([random.choice(string.ascii_lowercase) for _ in range(5)])
+ + ".raw"
+ )
+
+ url = message.reply_to_message.link
+ try:
+ raw_file_name = await convert_to_raw(audio_original, raw_file_name)
+ except BaseException as e:
+ return await u_s.edit(f"`FFmpeg Failed To Convert Song To raw Format.` \n**Error :** `{e}`")
+ if os.path.exists(audio_original):
+ os.remove(audio_original)
+ if not group_call:
+ group_call = GroupCallFactory(client).get_file_group_call()
+ GPC[(message.chat.id, client.me.id)] = group_call
+ try:
+ await group_call.start(message.chat.id)
+ except BaseException as e:
+ return await u_s.edit(f"**Error While Joining VC:** `{e}`")
+ group_call.add_handler(playout_ended_handler, GroupCallFileAction.PLAYOUT_ENDED)
+ group_call.input_filename = raw_file_name
+ return await u_s.edit(f"Playing `{vid_title}` in `{message.chat.title}`!")
+ elif not group_call.is_connected:
+ try:
+ await group_call.start(message.chat.id)
+ except BaseException as e:
+ return await u_s.edit(f"**Error While Joining VC:** `{e}`")
+ group_call.add_handler(playout_ended_handler, GroupCallFileAction.PLAYOUT_ENDED)
+ group_call.input_filename = raw_file_name
+ return await u_s.edit(f"Playing `{vid_title}` in `{message.chat.title}`!")
+ else:
+ s_d = s_dict.get((message.chat.id, client.me.id))
+ f_info = {"song_name": vid_title,
+ "raw": raw_file_name,
+ "singer": uploade_r,
+ "dur": dur,
+ "url": url
+ }
+ if s_d:
+ s_d.append(f_info)
+ else:
+ s_dict[(message.chat.id, client.me.id)] = [f_info]
+ s_d = s_dict.get((message.chat.id, client.me.id))
+ return await u_s.edit(f"Added `{vid_title}` To Position `#{len(s_d)+1}`!")
+
+@run_in_exc
+def convert_to_raw(audio_original, raw_file_name):
+ ffmpeg.input(audio_original).output(raw_file_name, format="s16le", acodec="pcm_s16le", ac=2, ar="48k", loglevel="error").overwrite_output().run()
+ return raw_file_name
+
+def edit_msg(client, message, to_edit):
+ try:
+ client.loop.create_task(message.edit(to_edit))
+ except MessageNotModified:
+ pass
+ except FloodWait as e:
+ client.loop.create_task(asyncio.sleep(e.x))
+ except TypeError:
+ pass
+
+def download_progress_hook(d, message, client, start):
+ if d['status'] == 'downloading':
+ current = d.get("_downloaded_bytes_str") or humanbytes(d.get("downloaded_bytes", 1))
+ total = d.get("_total_bytes_str") or d.get("_total_bytes_estimate_str")
+ file_name = d.get("filename")
+ eta = d.get('_eta_str', "N/A")
+ percent = d.get("_percent_str", "N/A")
+ speed = d.get("_speed_str", "N/A")
+ to_edit = f"Downloading File \nFile Name : {file_name} \nFile Size : {total} \nSpeed : {speed} \nETA : {eta} \nDownload {current} out of {total} (__{percent}__)"
+ threading.Thread(target=edit_msg, args=(client, message, to_edit)).start()
+
+@run_in_exc
+def yt_dl(url, client, message, start):
+ opts = {
"format": "bestaudio",
"addmetadata": True,
"key": "FFmpegMetadata",
- "writethumbnail": True,
"prefer_ffmpeg": True,
"geo_bypass": True,
+ "progress_hooks": [lambda d: download_progress_hook(d, message, client, start)],
"nocheckcertificate": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
- "preferredcodec": "mp3",
- "preferredquality": "720",
+ "preferredcodec": "mp3"
}
],
"outtmpl": "%(id)s.mp3",
"quiet": True,
"logtostderr": False,
}
- try:
- with YoutubeDL(opts) as ytdl:
- ytdl_data = ytdl.extract_info(url, download=True)
- except Exception as e:
- await u_s.edit(f"**Failed To Download** \n**Error :** `{str(e)}`")
- return
- audio_original = f"{ytdl_data['id']}.mp3"
- raw_file_name = f"{vid_title}.raw"
- raw_file_name = await convert_to_raw(audio_original, raw_file_name)
- if not raw_file_name:
- return await u_s.edit("`FFmpeg Failed To Convert Song To raw Format. Please Give Valid File.`")
- os.remove(audio_original)
- if not group_call.is_connected:
- try:
- await group_call.start(message.chat.id)
- except BaseException as e:
- return await u_s.edit(f"**Error While Joining VC:** `{e}`")
- group_call.input_filename = raw_file_name
- return await u_s.edit(f"Playing `{vid_title}` in `{message.chat.title}`!")
- else:
- s.append(raw_file_name)
- f_info = {"song name": vid_title,
- "singer": uploade_r,
- "dur": dur
- }
- s_dict[raw_file_name] = f_info
- return await u_s.edit(f"Added `{vid_title}` To Position `#{len(s)+1}`!")
-
+ with YoutubeDL(opts) as ytdl:
+ ytdl_data = ytdl.extract_info(url, download=True)
+ return str(ytdl_data['id']) + ".mp3"
+
+RD_ = {}
+FFMPEG_PROCESSES = {}
-
-async def convert_to_raw(audio_original, raw_file_name):
- try:
- ffmpeg.input(audio_original).output(
- raw_file_name, format="s16le", acodec="pcm_s16le", ac=2, ar="48k").overwrite_output().run()
- except:
- return None
- return raw_file_name
+
+@friday_on_cmd(
+ ["pradio"],
+ is_official=False,
+ cmd_help={"help": "Play Radio.", "example": "{ch}pradio (radio url)"},
+)
+async def radio_s(client, message):
+ if g_s_ := GPC.get((message.chat.id, client.me.id)):
+ if g_s_.is_connected:
+ await g_s_.stop()
+ del GPC[(message.chat.id, client.me.id)]
+ s = await edit_or_reply(message, "`Please Wait.`")
+ input_filename = f"radio_{message.chat.id}.raw"
+ radio_url = get_text(message)
+ if not radio_url:
+ return await s.edit("`Invalid Radio URL...`")
+ group_call = RD_.get((message.chat.id, client.me.id))
+ if not group_call:
+ group_call = GroupCall(client, input_filename, path_to_log_file='')
+ RD_[(message.chat.id, client.me.id)] = group_call
+ process = FFMPEG_PROCESSES.get((message.chat.id, client.me.id))
+ if process:
+ process.send_signal(signal.SIGTERM)
+ await group_call.start(message.chat.id)
+ process = ffmpeg.input(radio_url).output(
+ input_filename,
+ format='s16le',
+ acodec='pcm_s16le',
+ ac=2,
+ ar='48k',
+ loglevel='error'
+ ).overwrite_output().run_async()
+ FFMPEG_PROCESSES[(message.chat.id, client.me.id)] = process
+ await s.edit(f"**📻 Playing :** `{radio_url}`")
+
+@friday_on_cmd(
+ ["sradio"],
+ is_official=False,
+ cmd_help={"help": "Stop Radio.", "example": "{ch}stop_radio"},
+)
+async def stop_radio(client, message):
+ msg = await edit_or_reply(message, "`Please Wait.`")
+ if not (group_call := RD_.get((message.chat.id, client.me.id))):
+ return await msg.edit("`Is Vc is Connected?`")
+ if group_call.is_connected:
+ await group_call.stop()
+ else:
+ return await msg.edit("`Is Vc is Connected?`")
+ process = FFMPEG_PROCESSES.get((message.chat.id, client.me.id))
+ await msg.edit("`Radio Stopped : 📻`")
+ if process:
+ process.send_signal(signal.SIGTERM)
@friday_on_cmd(
@@ -212,7 +342,10 @@ async def convert_to_raw(audio_original, raw_file_name):
cmd_help={"help": "Pause Currently Playing Song.", "example": "{ch}pause"},
)
async def no_song_play(client, message):
- group_call.client = client
+ group_call = GPC.get((message.chat.id, client.me.id))
+ if not group_call:
+ await edit_or_reply(message, "`Is Group Call Even Connected?`")
+ return
if not group_call.is_connected:
await edit_or_reply(message, "`Is Group Call Even Connected?`")
return
@@ -226,12 +359,15 @@ async def no_song_play(client, message):
cmd_help={"help": "Resume Paused Song.", "example": "{ch}resume"},
)
async def wow_dont_stop_songs(client, message):
- group_call.client = client
+ group_call = GPC.get((message.chat.id, client.me.id))
+ if not group_call:
+ await edit_or_reply(message, "`Is Group Call Even Connected?`")
+ return
if not group_call.is_connected:
await edit_or_reply(message, "`Is Group Call Even Connected?`")
- return
+ return
group_call.resume_playout()
- await edit_or_reply(message, f"`▶️ Resumed.`")
+ await edit_or_reply(message, "`▶️ Resumed.`")
@friday_on_cmd(
@@ -240,7 +376,10 @@ async def wow_dont_stop_songs(client, message):
cmd_help={"help": "Stop VoiceChat!", "example": "{ch}stopvc"},
)
async def kill_vc_(client, message):
- group_call.client = client
+ group_call = GPC.get((message.chat.id, client.me.id))
+ if not group_call:
+ await edit_or_reply(message, "`Is Group Call Even Connected?`")
+ return
if not group_call.is_connected:
await edit_or_reply(message, "`Is Group Call Even Connected?`")
return
@@ -248,6 +387,7 @@ async def kill_vc_(client, message):
os.remove(group_call.input_filename)
group_call.stop_playout()
await edit_or_reply(message, "`Stopped Playing Songs!`")
+ del GPC[(message.chat.id, client.me.id)]
@friday_on_cmd(
@@ -256,7 +396,10 @@ async def kill_vc_(client, message):
cmd_help={"help": "Replay Song In VC!", "example": "{ch}rvc"},
)
async def replay(client, message):
- group_call.client = client
+ group_call = GPC.get((message.chat.id, client.me.id))
+ if not group_call:
+ await edit_or_reply(message, "`Is Group Call Even Connected?`")
+ return
if not group_call.is_connected:
await edit_or_reply(message, "`Is Group Call Even Connected?`")
return
@@ -270,12 +413,15 @@ async def replay(client, message):
cmd_help={"help": "Rejoin Voice Chat!", "example": "{ch}rjvc"},
)
async def rejoinvcpls(client, message):
- group_call.client = client
+ group_call = GPC.get((message.chat.id, client.me.id))
+ if not group_call:
+ await edit_or_reply(message, "`Is Group Call Even Connected?`")
+ return
if not group_call.is_connected:
await edit_or_reply(message, "`Is Group Call Even Connected?`")
return
await group_call.reconnect()
- await edit_or_reply(message, f"`Rejoined! - Vc`")
+ await edit_or_reply(message, "`Rejoined! - Vc`")
@friday_on_cmd(
@@ -284,7 +430,10 @@ async def rejoinvcpls(client, message):
cmd_help={"help": "Leave Voice Call!", "example": "{ch}leavevc"},
)
async def leave_vc_test(client, message):
- group_call.client = client
+ group_call = GPC.get((message.chat.id, client.me.id))
+ if not group_call:
+ await edit_or_reply(message, "`Is Group Call Even Connected?`")
+ return
if not group_call.is_connected:
await edit_or_reply(message, "`Is Group Call Even Connected?`")
return
@@ -292,6 +441,7 @@ async def leave_vc_test(client, message):
os.remove(group_call.input_filename)
await group_call.stop()
await edit_or_reply(message, f"`Left : {message.chat.title} - Vc`")
+ del GPC[(message.chat.id, client.me.id)]
@friday_on_cmd(
@@ -303,7 +453,10 @@ async def leave_vc_test(client, message):
},
)
async def set_vol(client, message):
- group_call.client = client
+ group_call = GPC.get((message.chat.id, client.me.id))
+ if not group_call:
+ await edit_or_reply(message, "`Is Group Call Even Connected?`")
+ return
if not group_call.is_connected:
await edit_or_reply(message, "`Is Group Call Even Connected?`")
return
diff --git a/req.txt b/req.txt
index a7bbba1..5d9c258 100644
--- a/req.txt
+++ b/req.txt
@@ -1,7 +1,9 @@
+lxml
wikipedia
-pytgcalls
+pytgcalls[pyrogram]
mal-api
-git+https://github.com/chsaiujwal/maigret
+git+https://github.com/StarkGang/maigret
+git+https://github.com/starkgang/shazamio
git+https://github.com/gleitz/howdoi
speedtest-cli
tswift
@@ -10,4 +12,5 @@ emoji-country-flag
quotefancy
feedparser
asyncurban
-validators>=0.18.2
+validators
+pycryptodome
diff --git a/rom_search.py b/rom_search.py
new file mode 100644
index 0000000..7a6adfc
--- /dev/null
+++ b/rom_search.py
@@ -0,0 +1,140 @@
+# Copyright (C) 2020-2021 by DevsExpo@Github, < https://github.com/DevsExpo >.
+#
+# This file is part of < https://github.com/DevsExpo/FridayUserBot > project,
+# and is released under the "GNU v3.0 License Agreement".
+# Please see < https://github.com/DevsExpo/blob/master/LICENSE >
+#
+# All rights reserved.
+
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.common.exceptions import TimeoutException
+from selenium.webdriver.common.by import By
+from selenium import webdriver
+from selenium.webdriver.common.keys import Keys
+from selenium.webdriver.support.ui import Select
+from selenium.webdriver.chrome.options import Options
+from selenium.common.exceptions import NoSuchElementException
+import asyncio
+from main_startup import Config
+import aiohttp
+from bs4 import BeautifulSoup
+from lxml import etree
+from main_startup.core.decorators import friday_on_cmd
+from main_startup.core.startup_helpers import run_cmd
+from main_startup.helper_func.basic_helpers import edit_or_reply, get_text, run_in_exc
+
+GOOGLE_CHROME_BIN = Config.CHROME_BIN_PATH
+CHROME_DRIVER = Config.CHROME_DRIVER_PATH
+ch_ = Config.COMMAND_HANDLER
+
+@run_in_exc
+def get_url(query: str):
+ url = "https://xiaomifirmwareupdater.com/miui/"
+ chrome_options = Options()
+ chrome_options.add_argument("--headless")
+ chrome_options.binary_location = GOOGLE_CHROME_BIN
+ chrome_options.add_argument("--disable-dev-shm-usage")
+ chrome_options.add_argument("--no-sandbox")
+ chrome_options.add_argument("--disable-gpu")
+ driver = webdriver.Chrome(executable_path=CHROME_DRIVER, options=chrome_options)
+ driver.get(url)
+ wait = WebDriverWait(driver, 20)
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#miui_filter > label > input"))).send_keys(query)
+ try:
+ bruh = driver.find_element_by_css_selector("#miui > tbody > tr:nth-child(1) > td:nth-child(8) > a")
+ except NoSuchElementException:
+ driver.quit()
+ return None, None, None, None, None, None, None
+ if not bruh:
+ driver.quit()
+ return None, None, None, None, None, None, None
+ href = bruh.get_attribute('href')
+ driver.quit()
+ return href
+
+async def fetch_data(url: str):
+ async with aiohttp.ClientSession() as session:
+ resp = await session.get(url)
+ b_ = BeautifulSoup(await resp.read(), 'lxml')
+ device_name = b_.select("#downloads > div > ul > li:nth-child(1) > h5")[0].text.split("Device: ")[1]
+ version = b_.select("#downloads > div > ul > li:nth-child(3) > h5")[0].text.split("Version: ")[1]
+ size = b_.select("#downloads > div > ul > li:nth-child(6) > h5")[0].text.split("Size: ")[1]
+ rs_date = b_.select("#downloads > div > ul > li:nth-child(7) > h5")[0].text.split("Release Date: ")[1]
+ type_ = b_.select("#downloads > div > ul > li:nth-child(5) > h5")[0].text.split("Type: ")[1]
+ package_name = b_.find("span", {"id": "filename"}).text
+ url = f"https://bigota.d.miui.com/{version}/{package_name}"
+ return url, device_name, version, size, rs_date, type_, package_name
+
+@run_in_exc
+def realme_rom_search(query: str):
+ url = "https://realmeupdater.com/"
+ chrome_options = Options()
+ chrome_options.add_argument("--headless")
+ chrome_options.binary_location = GOOGLE_CHROME_BIN
+ chrome_options.add_argument("--disable-dev-shm-usage")
+ chrome_options.add_argument("--no-sandbox")
+ chrome_options.add_argument("--disable-gpu")
+ driver = webdriver.Chrome(executable_path=CHROME_DRIVER, options=chrome_options)
+ driver.get(url)
+ driver.maximize_window()
+ wait = WebDriverWait(driver, 30)
+ driver.get("https://realmeupdater.com/")
+ driver.execute_script("var scrollingElement = (document.scrollingElement || document.body);scrollingElement.scrollTop = scrollingElement.scrollHeight;")
+ wait.until(EC.visibility_of_element_located((By.ID, "select2-device-container"))).click()
+ wait.until(EC.visibility_of_element_located((By.XPATH, "/html/body/span/span/span[1]/input"))).send_keys(query)
+ try:
+ all_options = driver.find_elements(By.CSS_SELECTOR, "#select2-device-results li")
+ except NoSuchElementException:
+ driver.quit()
+ return None, None, None, None, None
+ if not all_options:
+ driver.quit()
+ return None, None, None, None, None
+ all_options[0].click()
+ wait.until(EC.visibility_of_element_located((By.XPATH, "/html/body/div[5]/div[1]/div[2]/div/div/div[2]/div/div[1]/form/div/div[3]/button"))).click()
+ device = wait.until(EC.visibility_of_element_located((By.XPATH, "//h5[./b[text()='Device: ']]"))).text.split(maxsplit=1)[1]
+ system = wait.until(EC.visibility_of_element_located((By.XPATH, "//h5[./b[text()='System: ']]"))).text.split(maxsplit=1)[1]
+ size = wait.until(EC.visibility_of_element_located((By.XPATH, "//h5[./b[text()='Size: ']]"))).text.split(maxsplit=1)[1]
+ rdate = wait.until(EC.visibility_of_element_located((By.XPATH, "//h5[./b[text()='Release Date: ']]"))).text.split(": ", maxsplit=1)[1]
+ file_name = wait.until(EC.visibility_of_element_located((By.ID, "filename"))).text
+ file_url = f"https://download.c.realme.com/osupdate/{file_name}"
+ driver.quit()
+ return file_url, rdate, size, system, device
+
+@friday_on_cmd(
+ ["mrs"],
+ cmd_help={
+ "help": "`Search MiUi Roms :)`",
+ "example": "{ch}mrs mi 10 pro",
+ },
+)
+async def m_(client, message):
+ e_ = await edit_or_reply(message, "`Please Wait..`")
+ query = get_text(message)
+ if not query:
+ return await e_.edit("`Please Give Me An Query.`")
+ href = await get_url(query)
+ if href is None:
+ return await e_.edit("`No Results Matching You Query.`")
+ url, device_name, version, size, rs_date, type_, package_name = await fetch_data(href)
+ final_ = f"MIUI Search \nModel : {device_name} \nVersion : {version} \nSize : {size} \nRelease Date : {rs_date} \nType : {type_} \nPackage Name : {package_name} \nDownload : {ch_}udl {url}"
+ await message.edit(final_)
+
+@friday_on_cmd(
+ ["rms"],
+ cmd_help={
+ "help": "`Search Realme Roms :)`",
+ "example": "{ch}rms pro",
+ },
+)
+async def rm_s(client, message):
+ e_ = await edit_or_reply(message, "`Please Wait..`")
+ query = get_text(message)
+ if not query:
+ return await e_.edit("`Please Give Me An Query.`")
+ file_url, r_date, size, system, device = await realme_rom_search(query)
+ if file_url is None:
+ return await e_.edit("`No Results Matching You Query.`")
+ final_ = f"RealMeRom Search \nDevice : {device} \nSystem : {system} \nSize : {size} \nRelease Date : {r_date} \nDownload : {ch_}udl {file_url}"
+ await message.edit(final_)
diff --git a/shazam.py b/shazam.py
new file mode 100644
index 0000000..1f5a1f3
--- /dev/null
+++ b/shazam.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2020-2021 by DevsExpo@Github, < https://github.com/DevsExpo >.
+#
+# This file is part of < https://github.com/DevsExpo/FridayUserBot > project,
+# and is released under the "GNU v3.0 License Agreement".
+# Please see < https://github.com/DevsExpo/blob/master/LICENSE >
+#
+# All rights reserved.
+
+
+import os
+import asyncio
+from shazamio import Shazam
+from main_startup.core.startup_helpers import run_cmd
+import datetime
+import requests
+import time
+from main_startup.core.decorators import friday_on_cmd
+from main_startup.helper_func.basic_helpers import edit_or_reply, get_text, humanbytes
+
+async def shazam(file):
+ shazam = Shazam()
+ try:
+ r = await shazam.recognize_song(file)
+ except:
+ return None, None, None
+ if not r:
+ return None, None, None
+ track = r.get("track")
+ nt = track.get("images")
+ image = nt.get("coverarthq")
+ by = track.get("subtitle")
+ title = track.get("title")
+ return image, by, title
+
+async def convert_to_audio(vid_path):
+ stark_cmd = f"ffmpeg -i {vid_path} -map 0:a friday.mp3"
+ await runcmd(stark_cmd)
+ final_warner = "friday.mp3"
+ if not os.path.exists(final_warner):
+ return None
+ return final_warner
+
+@friday_on_cmd(
+ ["shazam"],
+ cmd_help={
+ "help": "Recognize / Discover A Song",
+ "example": "{ch}shazam (reply to music file)",
+ },
+)
+async def shazam_(client, message):
+ stime = time.time()
+ msg = await edit_or_reply(message, "`Shazaming This Song.")
+ if not message.reply_to_message:
+ return await msg.edit("`Reply To Song File`")
+ if not (message.reply_to_message.audio or message.reply_to_message.voice or message.reply_to_message.video):
+ return await msg.edit("`Reply To Audio File.`")
+ if message.reply_to_message.video:
+ video_file = await message.reply_to_message.download()
+ music_file = await convert_to_audio(video_file)
+ dur = message.reply_to_message.video.duration
+ if not music_file:
+ return await msg.edit("`Unable To Convert To Song File. Is This A Valid File?`")
+ elif (message.reply_to_message.voice or message.reply_to_message.audio):
+ dur = message.reply_to_message.voice.duration if message.reply_to_message.voice else message.reply_to_message.audio.duration
+ music_file = await message.reply_to_message.download()
+ size_ = humanbytes(os.stat(music_file).st_size)
+ dur = datetime.timedelta(seconds=dur)
+ thumb, by, title = await shazam(music_file)
+ if title is None:
+ return await msg.edit("`No Results Found.`")
+ etime = time.time()
+ t_k = round(etime - stime)
+ caption = f"""Shazamed Song
+
+Song Name : {title}
+Singer : {by}
+Duration : {dur}
+Size : {size_}
+Time Taken : {t_k} Seconds
+
+Shazamed By FridayUB
+ """
+ if thumb:
+ await msg.delete()
+ await message.reply_to_message.reply_photo(thumb, caption=caption, quote=True)
+ else:
+ await msg.edit(caption)
\ No newline at end of file
diff --git a/unzip.py b/unzip.py
new file mode 100644
index 0000000..16799f7
--- /dev/null
+++ b/unzip.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2020-2021 by DevsExpo@Github, < https://github.com/DevsExpo >.
+#
+# This file is part of < https://github.com/DevsExpo/FridayUserBot > project,
+# and is released under the "GNU v3.0 License Agreement".
+# Please see < https://github.com/DevsExpo/blob/master/LICENSE >
+#
+# All rights reserved.
+
+import logging
+import os
+import pathlib
+import time
+import time as t
+import zipfile
+from datetime import datetime
+
+from main_startup.core.decorators import friday_on_cmd
+from main_startup.helper_func.basic_helpers import edit_or_reply, humanbytes
+
+extracted = "./downloads/extracted/"
+
+
+@friday_on_cmd(
+ ["unzip"],
+ cmd_help={
+ "help": "Unzip the Zip File!",
+ "example": "{ch}unzip (reply to zip file)",
+ },
+)
+async def test(client, message):
+ Pablo = await edit_or_reply(message, "`Processing...`")
+ if not message.reply_to_message:
+ await Pablo.edit("`Reply To Zip File To Unzip!`")
+ return
+ if not message.reply_to_message.document:
+ await Pablo.edit("`Reply To Zip File To Unzip!`")
+ return
+ if message.reply_to_message.document.mime_type != "application/zip":
+ await Pablo.edit("`Is That Even A Zip?`")
+ return
+ if not os.path.isdir(extracted):
+ os.makedirs(extracted)
+ start = datetime.now()
+ downloaded_file_name = await message.reply_to_message.download()
+ end = datetime.now()
+ ms = (end - start).seconds
+ await Pablo.edit(
+ "Stored the zip to `{}` in {} seconds.".format(downloaded_file_name, ms)
+ )
+ try:
+ with zipfile.ZipFile(downloaded_file_name, "r") as zip_ref:
+ zip_ref.extractall(extracted)
+ except Exception as e:
+ await Pablo.edit(f"`Error! Zip Couldn't Extarct Zip. \nTraceBack : {e}`")
+ return
+ filename = []
+ list(file_list(extracted, filename))
+ total_files = len(filename)
+ failed_s = 0
+ await Pablo.edit("`Unzipping, Please Wait!`")
+ for single_file in filename:
+ if os.path.exists(single_file):
+ caption_rts = os.path.basename(single_file)
+ size = os.stat(single_file).st_size
+ capt = f"<< **{caption_rts}** [`{humanbytes(size)}`] >>"
+ try:
+ await client.send_document(
+ message.chat.id, single_file, caption=capt, force_document=False
+ )
+ except Exception as e:
+ logging.info(e)
+ failed_s += 1
+ os.remove(single_file)
+ await Pablo.edit(
+ f"`Unzipped And Uploaded {total_files-failed_s} File Out Of {total_files}!`"
+ )
+ os.remove(downloaded_file_name)
+
+
+def file_list(path, lisT):
+ pathlib.Path(path)
+ for filepath in pathlib.Path(path).glob("**/*"):
+ if os.path.isdir(filepath):
+ file_list(filepath, lisT)
+ else:
+ lisT.append(filepath.absolute())
+ return lisT
\ No newline at end of file
diff --git a/user-agent-info.py b/user-agent-info.py
new file mode 100644
index 0000000..393ed0f
--- /dev/null
+++ b/user-agent-info.py
@@ -0,0 +1,39 @@
+# Copyright (C) 2020-2021 by DevsExpo@Github, < https://github.com/DevsExpo >.
+#
+# This file is part of < https://github.com/DevsExpo/FridayUserBot > project,
+# and is released under the "GNU v3.0 License Agreement".
+# Please see < https://github.com/DevsExpo/blob/master/LICENSE >
+#
+# All rights reserved.
+
+from main_startup.core.decorators import friday_on_cmd
+from main_startup.helper_func.basic_helpers import edit_or_reply, get_text
+import requests
+
+
+@friday_on_cmd(
+ ["ua", "user_agent"],
+ cmd_help={
+ "help": "Get Info From user agent",
+ "example": "{ch}ua (user agent)",
+ },
+)
+async def useragenti(client, message):
+ engine = message.Engine
+ pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
+ tex_t = get_text(message)
+ if not tex_t:
+ await pablo.edit(engine.get_string("INPUT_REQ").format("User Agent"))
+ return
+ ue = tex_t
+ data = {"ua" : ue}
+ r = requests.post("https://api.apicagent.com", data = data)
+ Lol = r.json()
+ await pablo.edit(f"""
+Browser: {Lol["client"]["name"]}
+Browser Version: {Lol["client"]["version"]}
+Device Brand: {Lol["device"]["brand"]}
+Device Model: {Lol["device"]["model"]}
+OS: {Lol["os"]["name"]}
+OS version: {Lol["os"]["version"]}
+""")
diff --git a/webtools.py b/webtools.py
new file mode 100644
index 0000000..51ede0f
--- /dev/null
+++ b/webtools.py
@@ -0,0 +1,129 @@
+import os
+import time
+
+import pyshorteners
+import requests
+from bs4 import BeautifulSoup
+from faker import Faker
+from faker.providers import internet
+
+from main_startup.core.decorators import friday_on_cmd
+from main_startup.helper_func.basic_helpers import (
+ delete_or_pass,
+ edit_or_reply,
+ get_text,
+ progress,
+)
+
+
+@friday_on_cmd(
+ ["fakegen", "fakedata"],
+ cmd_help={"help": "Generate Random Fake Details", "example": "{ch}fakegen"},
+)
+async def gen_fake_details(client, message):
+ lel = await edit_or_reply(message, "`Processing...`")
+ fake = Faker()
+ name = str(fake.name())
+ fake.add_provider(internet)
+ address = str(fake.address())
+ ip = fake.ipv4_private()
+ cc = fake.credit_card_full()
+ email = fake.ascii_free_email()
+ job = fake.job()
+ android = fake.android_platform_token()
+ pc = fake.chrome()
+ await lel.edit(
+ f" Fake Information Generated\nName :-{name}\n\nAddress:-{address}\n\nIP ADDRESS:-{ip}\n\ncredit card:-{cc}\n\nEmail Id:-{email}\n\nJob:-{job}\n\nandroid user agent:-{android}\n\nPc user agent:-{pc}",
+ parse_mode="HTML",
+ )
+
+
+@friday_on_cmd(
+ ["short"],
+ cmd_help={"help": "Shorten URL link!", "example": "{ch}short link"},
+)
+async def vom(client, message):
+ event = await edit_or_reply(message, "`Shortening the link.....`")
+ link = get_text(message)
+ if not link:
+ await event.edit(
+ "``Please Give Me A Valid Input. You Can Check Help Menu To Know More!``"
+ )
+ return
+ sed = pyshorteners.Shortener()
+ kek = sed.dagd.short(link)
+ bestisbest = (
+ f"Url Shortened \nGiven Link ➠ {link}\n"
+ f"Shortened Link ➠ {kek}"
+ )
+ await event.edit(bestisbest)
+
+
+@friday_on_cmd(
+ ["rmeme", "randomeme"],
+ cmd_help={"help": "Generate Random Memes!", "example": "{ch}rmeme"},
+)
+async def givemememe(client, message):
+ hmm_s = "https://some-random-api.ml/meme"
+ r = requests.get(url=hmm_s).json()
+ image_s = r["image"]
+ await message.reply_photo(image_s)
+ await delete_or_pass(message)
+
+
+@friday_on_cmd(
+ ["binlookup", "bin"],
+ cmd_help={"help": "Get Details About Bin!", "example": "{ch}bin (bin number)"},
+)
+async def nobin(client, message):
+ stark_m = await edit_or_reply(message, "`Please Wait!`")
+ bin = get_text(message)
+ if not bin:
+ await stark_m.edit(
+ "`Please Give Me A Valid Input. You Can Check Help Menu To Know More!`"
+ )
+ return
+ url = f"https://lookup.binlist.net/{bin}"
+ r = requests.get(url=url)
+ if r.status_code != 200:
+ await stark_m.edit("Invalid Bin, Please Give Me A Valid Bin To Check.")
+ return
+ jr = r.json()
+ data_is = (
+ f"Bin ➠ {bin} \n"
+ f"Type ➠ {jr.get('type', '?')} \n"
+ f"Scheme ➠ {jr.get('scheme', '?')} \n"
+ f"Brand ➠ {jr.get('brand', '?')} \n"
+ f"Country ➠ {jr['country']['name']} {jr['country']['emoji']} \n"
+ )
+ await stark_m.edit(data_is, parse_mode="html")
+
+
+@friday_on_cmd(
+ ["iban", "ibaninfo"],
+ cmd_help={"help": "Get Details About IBAN", "example": "{ch}iban (iban here)"},
+)
+async def ibanbanem(client, message):
+ stark_m = await edit_or_reply(message, "`Please Wait!`")
+ iban = get_text(message)
+ if not iban:
+ await stark_m.edit(
+ "`Please Give Me A Valid Input. You Can Check Help Menu To Know More!`"
+ )
+ return
+ api = f"https://openiban.com/validate/{iban}?getBIC=true&validateBankCode=true"
+ r = requests.get(url=api).json()
+ if r["valid"] is False:
+ await stark_m.edit("Invalid IBAN, Try Again With A Valid IBAN!")
+ return
+ banks = r["bankData"]
+ kek = (
+ f"VALID ➠ {r['valid']} \n"
+ f"IBAN ➠ {r['iban']} \n"
+ f"BANK-CODE ➠ {banks['bankCode']} \n"
+ f"BANK-NAME ➠ {banks['name']} \n"
+ f"ZIP ➠ {banks['zip']} \n"
+ f"CITY ➠ {banks['city']} \n"
+ f"BIC ➠ {banks['bic']} \n"
+ )
+ await stark_m.edit(kek, parse_mode="html")
\ No newline at end of file
diff --git a/xvideo.py b/xvideo.py
deleted file mode 100644
index c3ee2b4..0000000
--- a/xvideo.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import requests
-import bs4
-
-from main_startup.core.decorators import friday_on_cmd
-from main_startup.helper_func.basic_helpers import edit_or_reply, get_text
-
-
-@friday_on_cmd(
- ["xvideo"],
- cmd_help={
- "help": "Get direct Downloadable",
- "example": "{ch}xvideo xvideo_link",
- },
-)
-async def xvid(client, message):
- editer= await edit_or_reply(message, "`Please Wait.....`")
- msg = get_text(message)
- if not msg:
- await editer.edit("`Please Enter Valid Input`")
- return
- try:
- req = requests.get(msg)
- soup = bs4.BeautifulSoup(req.content, 'html.parser')
-
- soups = soup.find("div",{"id":"video-player-bg"})
- link =""
- for a in soups.find_all('a', href=True):
- link = a["href"]
- await editer.edit(f"HERE IS YOUR LINK:\n`{link}`")
- except:
- await editer.edit("Something went wrong")
-
-
-
-
-
-@friday_on_cmd(
- ["xsearch"],
- cmd_help={
- "help": "Xvideo Searcher",
- "example": "{ch}xsearch query",
- },
-)
-
-async def xvidsearch(client, message):
- editer= await edit_or_reply(message, "`Please Wait.....`")
- msg = get_text(message)
- if not msg:
- await editer.edit("`Please Enter Valid Input`")
- return
- try:
- qu = msg.replace(" ","+")
- page= requests.get(f"https://www.xvideos.com/?k={qu}").content
- soup = bs4.BeautifulSoup(page, 'html.parser')
- col= soup.findAll("div",{"class":"thumb"})
-
- links= ""
-
- for i in col:
- a = i.find("a")
- link = a.get('href')
-
- semd = link.split("/")[2]
-
- links += f"• {semd.upper()}\n"
- await editer.edit(links,parse_mode="HTML")
-
-
- except:
- await editer.edit("Something Went Wrong")
-