Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
uk365 authored Jan 30, 2024
1 parent df30af7 commit 086e05d
Showing 1 changed file with 32 additions and 33 deletions.
65 changes: 32 additions & 33 deletions FZBypass/core/bypass_scrape.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,42 +164,41 @@ async def toonworld4all(url: str):
def re_findall(regex: str, text: str) -> List[str]:
return re.findall(regex, text)

async def tamilmv(url):
req=requests.get(url)
soup=bs(req.content,'html.parser')
magnets=soup.findAll('a')
links=[]
for i in magnets:
#for no, i in enumerate(magnets, start=1):
try:
if i.get_text()=="MAGNET" or i.find('img').get('alt')=="magnet.png":
j=i.find_previous_sibling('strong')
links.append({"name":j.get_text(),"link":i.get('href')})
name = j.get_text()
linkx = i.get('href')
except:
pass
print(links)
# async def tamilmv(url):
# req=requests.get(url)
# soup=bs(req.content,'html.parser')
# magnets=soup.findAll('a')
# links=[]
# for i in magnets:
# #for no, i in enumerate(magnets, start=1):
# try:
# if i.get_text()=="MAGNET" or i.find('img').get('alt')=="magnet.png":
# j=i.find_previous_sibling('strong')
# links.append({"name":j.get_text(),"link":i.get('href')})
# name = j.get_text()
# linkx = i.get('href')
# except:
# pass
# print(links)

parse_data = f'''
# parse_data = f'''

<code>{name}</code>
┖ <b>Links :</b> <a href="https://t.me/share/url?url={linkx}"><b>Magnet </b>🧲</a> | <a href="{linkx}"><b>Torrent 🌐</b></a>'''
# <code>{name}</code>
# ┖ <b>Links :</b> <a href="https://t.me/share/url?url={linkx}"><b>Magnet </b>🧲</a> | <a href="{linkx}"><b>Torrent 🌐</b></a>'''

return parse_data
# return parse_data


# async def tamilmv(url):
# cget = create_scraper().get
# resp = cget(url, allow_redirects=False)
# soup = BeautifulSoup(resp.text, 'html.parser')
# mag = soup.select('a[href^="magnet:?xt=urn:btih:"]')
# tor = soup.select('a[data-fileext="torrent"]')
# parse_data = f"<b><u>{soup.title.string}</u></b>"
# for no, (t, m) in enumerate(zip(tor, mag), start=1):
# filename = sub(r"www\S+|\- |\.torrent", '', t.string) if t is not None and t.string is not None else ""
# parse_data += f'''
async def tamilmv(url):
req=requests.get(url)
soup=bs(req.content,'html.parser')
mag = soup.select('a[href^="magnet:?xt=urn:btih:"]')
tor = soup.select('a[data-fileext="torrent"]')
parse_data = f"<b><u>{soup.title.string}</u></b>"
for no, (t, m) in enumerate(zip(tor, mag), start=1):
filename = sub(r"www\S+|\- |\.torrent", '', t.string) if t is not None and t.string is not None else ""
parse_data += f'''
# {no}. <code>{filename}</code>
# ┖ <b>Links :</b> <a href="https://t.me/share/url?url={m['href'].split('&')[0]}"><b>Magnet </b>🧲</a> | <a href="{t['href']}"><b>Torrent 🌐</b></a>'''
# return parse_data
{no}. <code>{filename}</code>
┖ <b>Links :</b> <a href="https://t.me/share/url?url={m['href'].split('&')[0]}"><b>Magnet </b>🧲</a> | <a href="{t['href']}"><b>Torrent 🌐</b></a>'''
return parse_data

0 comments on commit 086e05d

Please sign in to comment.