From 30dc88caaaaef8b4df31565aaec77016e69834e7 Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Tue, 24 Sep 2024 14:11:48 +0200 Subject: [PATCH] ARREGLOS Y MEJORAS - Cambios de dominio: HDFull, Dontorrent - Cambios de estructura: Pelitorrent, Eztv, Dontorrent - Ajustes en Downloads - Ajustes para Filtertools --- plugin.video.alfa/channels/dontorrent.py | 8 +- plugin.video.alfa/channels/eztv.py | 4 +- plugin.video.alfa/channels/hdfull.py | 6 +- plugin.video.alfa/channels/pelitorrent.py | 2 +- plugin.video.alfa/core/httptools.py | 2 +- plugin.video.alfa/lib/AlfaChannelHelper.py | 28 +- plugin.video.alfa/lib/generictools.py | 394 ++++++++++++------ plugin.video.alfa/modules/downloads.py | 64 ++- plugin.video.alfa/modules/videolibrary.py | 3 +- plugin.video.alfa/platformcode/custom_code.py | 2 +- plugin.video.alfa/platformcode/envtal.py | 7 +- plugin.video.alfa/platformcode/launcher.py | 82 +++- .../platformcode/platformtools.py | 4 +- plugin.video.alfa/servers/torrent.py | 16 +- 14 files changed, 425 insertions(+), 197 deletions(-) diff --git a/plugin.video.alfa/channels/dontorrent.py b/plugin.video.alfa/channels/dontorrent.py index 34cb237f7..27e6e19b1 100644 --- a/plugin.video.alfa/channels/dontorrent.py +++ b/plugin.video.alfa/channels/dontorrent.py @@ -25,10 +25,10 @@ canonical = { 'channel': 'dontorrent', 'host': config.get_setting("current_host", 'dontorrent', default=''), - 'host_alt': ["https://dontorrent.email/", "https://elitedivx.net/", "https://lilatorrent.com/", + 'host_alt': ["https://dontorrent.exposed/", "https://elitedivx.net/", "https://lilatorrent.com/", "https://mastorrents.net/", "https://reinventorrent.org/", "https://todotorrents.org/", - "https://www13.dontorrent.link/", "https://tomadivx.net/"], - 'host_black_list': ["https://dontorrent.date/", + "https://www14.dontorrent.link/", "https://tomadivx.net/"], + 'host_black_list': ["https://dontorrent.education/", "https://dontorrent.email/", "https://dontorrent.date/", "https://dontorrent.earth/", "https://dontorrent.cricket/", "https://dontorrent.dance/", "https://dontorrent.cologne/", "https://dontorrent.city/", "https://dontorrent.esq/", "https://dontorrent.cc/", "https://dontorrent.sbs/", "https://dontorrent.fyi/", @@ -745,7 +745,7 @@ def search(item, texto, **AHkwargs): try: if texto: if item.btdigg: item.btdigg = texto - item.url = item.referer = host + 'buscar/' + texto + '/page/1' + item.url = item.referer = host + 'buscar/' + texto item.c_type = "search" item.texto = texto return list_all(item) diff --git a/plugin.video.alfa/channels/eztv.py b/plugin.video.alfa/channels/eztv.py index dc961c43a..eea50a8c2 100644 --- a/plugin.video.alfa/channels/eztv.py +++ b/plugin.video.alfa/channels/eztv.py @@ -82,7 +82,7 @@ 'controls': {'min_temp': min_temp, 'url_base64': True, 'add_video_to_videolibrary': True, 'cnt_tot': 20, 'get_lang': False, 'reverse': False, 'videolab_status': True, 'tmdb_extended_info': True, 'seasons_search': False, 'host_torrent': host_torrent, 'btdigg': False, 'duplicates': [], 'dup_list': 'title', 'dup_movies': True, - 'join_dup_episodes': False, 'manage_torrents': True}, + 'join_dup_episodes': False, 'manage_torrents': True, 'sort_findvideos': True}, 'timeout': timeout} AlfaChannel = DictionaryAllChannel(host, movie_path=movie_path, tv_path=tv_path, canonical=canonical, finds=finds, idiomas=IDIOMAS, language=language, list_language=list_language, list_servers=list_servers, @@ -401,7 +401,7 @@ def findvideos_links(item, elem_in, elem_json): elem_json['url'] = url.get('href', '') if elem_json['url'].endswith('.torrent'): break if elem_json.get('url'): elem_json['url'] = AlfaChannel.urljoin(host, elem_json['url']) - elif not elem_json.get('torrent_info'): + elif not elem_json.get('torrent_info') and not 'POST' in str(elem): elem_json['torrent_info'] = elem_json['size'] = elem.get_text(strip=True) if x == 3 and not elem_json.get('torrent_info'): diff --git a/plugin.video.alfa/channels/hdfull.py b/plugin.video.alfa/channels/hdfull.py index 72b29b1b3..279af714a 100644 --- a/plugin.video.alfa/channels/hdfull.py +++ b/plugin.video.alfa/channels/hdfull.py @@ -30,9 +30,9 @@ canonical = { 'channel': 'hdfull', 'host': config.get_setting("current_host", 'hdfull', default=''), - "host_alt": ["https://hd-full.info/", "https://hdfull.today/", "https://hdfull.quest/"], + "host_alt": ["https://hdfull.blog/", "https://hdfull.today/", "https://hdfull.quest/"], 'host_verification': '%slogin', - "host_black_list": ["https://hd-full.sbs/", "https://hd-full.life/", + "host_black_list": ["https://hd-full.info/", "https://hd-full.sbs/", "https://hd-full.life/", "https://hd-full.fit/", "https://hd-full.me/", "https://hd-full.vip/", "https://hd-full.lol/", "https://hd-full.co/", "https://hd-full.biz/", "https://hd-full.in/", "https://hd-full.im/", "https://hd-full.one/", @@ -954,7 +954,7 @@ def play(item): item.info = {item.url.split("###")[1].split(";")[0]: item.url.split("###")[1].split(";")[0]} item.url = item.url.split("###")[0] mediatype = '1' if item.contentType == 'tvshow' else '2' if item.contentType == 'movie' else '3' if item.contentType == 'episode' else '4' - if item.info: + if item.info and config.get_setting("videolibrary_mark_as_watched"): post = "target_id=%s&target_type=%s&target_status=1" % (list(item.info.keys())[0], mediatype) data = agrupa_datos(AlfaChannel.urljoin(host, "a/status"), post=post, hide_infobox=True) diff --git a/plugin.video.alfa/channels/pelitorrent.py b/plugin.video.alfa/channels/pelitorrent.py index 0b41257f7..3e3298695 100644 --- a/plugin.video.alfa/channels/pelitorrent.py +++ b/plugin.video.alfa/channels/pelitorrent.py @@ -360,7 +360,7 @@ def episodesxseason_matches(item, matches_int, **AHkwargs): for elem in matches_int: elem_json = {} - logger.error(elem) + #logger.error(elem) try: sxe = elem.find('span', class_='num-epi').get_text(strip=True).split('x') diff --git a/plugin.video.alfa/core/httptools.py b/plugin.video.alfa/core/httptools.py index 2a78afe9c..a35a4b94c 100644 --- a/plugin.video.alfa/core/httptools.py +++ b/plugin.video.alfa/core/httptools.py @@ -795,7 +795,7 @@ def retry_alt(url, req, response_call, proxy_data, **opt): if not canonical['host_alt']: return url, response_call host_a = scrapertools.find_single_match(url, patron_host) - url = re.sub('\?__cpo\=.*?$', '', url) + url = re.sub(r'\?__cpo\=.*?$', '', url) if not host_a: return url, response_call diff --git a/plugin.video.alfa/lib/AlfaChannelHelper.py b/plugin.video.alfa/lib/AlfaChannelHelper.py index 3ad929824..433255b4b 100644 --- a/plugin.video.alfa/lib/AlfaChannelHelper.py +++ b/plugin.video.alfa/lib/AlfaChannelHelper.py @@ -123,6 +123,7 @@ def __init__(self, host, timeout=15, channel='', movie_path="/movies", tv_path=" self.color_setting = unify.colors_file[UNIFY_PRESET] self.window = window self.Comment = None + self.SEARCH_CLEAN = '\¿|\?|\/|\$|\@|\<|\>|\.' self.httptools = httptools self.response = self.httptools.build_response(HTTPResponse=True) # crea estructura vacía de response @@ -168,7 +169,9 @@ def __init__(self, host, timeout=15, channel='', movie_path="/movies", tv_path=" if self.host != self.domains_updated[self.channel].get('host_alt', [''])[0] or self.host != self.canonical['host_alt'][0] \ or self.domains_updated[self.channel].get('host_alt', []) != self.canonical['host_alt'] \ or self.domains_updated[self.channel].get('UPDATE_CANONICAL'): - self.host = self.canonical['host'] = self.domains_updated[self.channel].get('host_alt', [self.canonical['host']])[0] + self.host = self.canonical['host'] = self.domains_updated[self.channel].get('host_alt', ([self.canonical['host']] \ + if self.canonical['host'] else []) \ + or self.canonical['host_alt'])[0] if config.get_setting("current_host", self.channel) != self.host: config.set_setting("current_host", self.host, self.channel) channel = __import__('channels.%s' % self.channel, None, None, ["channels.%s" % self.channel]) @@ -865,7 +868,7 @@ def find_language(self, elem_in, item): if self.DEBUG: logger.debug('find_LANGUAGE: %s' % language) return language - def convert_size(self, size): + def convert_size(self, size, silent=False): if isinstance(size, (str, unicode)): size = size.replace('[COLOR magenta][B]RAR-[/B][/COLOR]', '') s = 0 @@ -883,7 +886,7 @@ def convert_size(self, size): except Exception: if isinstance(size, float): s = size - if self.DEBUG: logger.debug('SIZE: %s / %s' % (size, s)) + if self.DEBUG and not silent: logger.debug('SIZE: %s / %s' % (size, s)) return s def convert_time(self, seconds): @@ -1499,14 +1502,14 @@ def list_all(self, item, data='', matches_post=None, postprocess=None, genericto self.btdigg_search = self.btdigg and finds.get('controls', {}).get('btdigg_search', False) \ and config.get_setting('find_alt_search', item.channel, default=False) #if self.btdigg: self.cnt_tot = finds_controls.get('cnt_tot', 20) - if item.texto: item.texto = item.texto.replace('%20', ' ').replace('+', ' ').strip() + if item.texto: item.texto = re.sub(self.SEARCH_CLEAN, '', item.texto).strip() if item.btdigg and item.c_type == 'search': if 'matches' in AHkwargs: del AHkwargs['matches'] - item.btdigg = item.season_search = item.texto + item.btdigg = item.season_search = item.texto.replace('%20', ' ').replace('+', ' ') item.texto = '%s%s' % (BTDIGG_URL_SEARCH, item.texto) item.matches = self.find_btdigg_list_all(item, matches, finds_controls.get('channel_alt', DOMAIN_ALT), **AHkwargs) elif item.c_type == 'search' and self.btdigg_search and ('|' in item.texto or '[' in item.texto): - item.season_search = item.texto + item.season_search = item.texto.replace('%20', ' ').replace('+', ' ') item.texto = item.texto.split('|')[0].strip() if '|' in item.texto else item.texto.split('[')[0].strip() item.url = item.url.replace(scrapertools.find_single_match(item.url, r'((?:\s|\+|%20)?[\[|\|].*?)(?:\/|\.|$)'), '') AHkwargs['url'] = item.url @@ -1886,6 +1889,7 @@ def list_all(self, item, data='', matches_post=None, postprocess=None, genericto elif ('|' in item.season_search or '[' in item.season_search) and not '|' in new_item.season_search \ and not '[' in new_item.season_search: new_item.season_search += scrapertools.find_single_match(item.season_search, r'(\s*[\[|\|][^$]+$)') + new_item.season_search = re.sub(self.SEARCH_CLEAN, '', new_item.season_search) if not isinstance(new_item.infoLabels['year'], int): new_item.infoLabels['year'] = str(new_item.infoLabels['year']).replace('-', '') if new_item.broadcast: @@ -2144,6 +2148,7 @@ def seasons(self, item, data='', action="episodesxseason", matches_post=None, po self.btdigg_search = self.btdigg and finds.get('controls', {}).get('btdigg_search', False) \ and config.get_setting('find_alt_search', item.channel, default=False) btdigg_contentSeason = 1 + if item.season_search: item.season_search = re.sub(self.SEARCH_CLEAN, '', item.season_search) AHkwargs = {'url': item.url, 'soup': soup, 'finds': finds, 'kwargs': kwargs, 'function': 'seasons'} AHkwargs['matches_post_list_all'] = kwargs.pop('matches_post_list_all', None) @@ -2378,8 +2383,15 @@ def seasons(self, item, data='', action="episodesxseason", matches_post=None, po for elem in matches: elem['season'] = int(scrapertools.find_single_match(str(elem.get('season', '1')), r'\d+') or '1') if item.infoLabels['number_of_seasons'] and elem['season'] > item.infoLabels['number_of_seasons']: - logger.error('TEMPORADA ERRONEA: WEB: %s; TMDB: %s' % (elem['season'], item.infoLabels['number_of_seasons'])) - if finds_controls.get('season_TMDB_limit', True) and not BTDIGG_URL_SEARCH in item.url: continue + config.set_setting('tmdb_cache_read', False) + item_temp = item.clone(contentSeason=elem['season'], contentType='season' if item.contentType != 'movie' else 'movie') + tmdb.set_infoLabels_item(item_temp, modo_grafico, idioma_busqueda=idioma_busqueda) + config.set_setting('tmdb_cache_read', True) + if item_temp.infoLabels['number_of_seasons'] and item_temp.infoLabels['number_of_seasons'] > item.infoLabels['number_of_seasons']: + item.infoLabels['number_of_seasons'] = item_temp.infoLabels['number_of_seasons'] + if item.infoLabels['number_of_seasons'] and elem['season'] > item.infoLabels['number_of_seasons']: + logger.error('TEMPORADA ERRONEA: WEB: %s; TMDB: %s' % (elem['season'], item.infoLabels['number_of_seasons'])) + if finds_controls.get('season_TMDB_limit', True) and not BTDIGG_URL_SEARCH in item.url: continue elem['url'] = elem.get('url', item.url) if url_base64: elem['url'] = self.convert_url_base64(elem['url'], self.host, item=item) diff --git a/plugin.video.alfa/lib/generictools.py b/plugin.video.alfa/lib/generictools.py index f5adb5680..07d2a9e86 100644 --- a/plugin.video.alfa/lib/generictools.py +++ b/plugin.video.alfa/lib/generictools.py @@ -95,6 +95,11 @@ def convert_url_base64(url, host='', referer=None, rep_blanks=True, force_host=F url_base64 = url url_sufix = '' + + if '\\' in url or url.startswith("magnet:") \ + or (item.infoLabels.get('imdb_id', '') and item.infoLabels['imdb_id'].lower() in url.lower()) \ + or (item.video_path and item.video_path.lower() in url.lower()): + return url_base64 + url_sufix if scrapertools.find_single_match(url, patron_local_torrent): return url_base64 + url_sufix @@ -298,6 +303,85 @@ def check_blocked_IP(data, itemlist, url, canonical={}, verbose=True): return (False, itemlist) # No hay bloqueo +def clean_title(title, decode=True, htmlclean=True, torrent_info=False, convert='', strict=False): + + if title: + #if DEBUG: logger.info('Title_IN: %s' % title) + + if convert: + try: + for change in convert: + change_from = change.split('=')[0] + change_to = change.split('=')[1] + title = title.replace(change_from, change_to) + except Exception: + pass + + if decode: + if isinstance(title, str): + title = scrapertools.htmlparser(title) + title = scrapertools.decode_utf8_error(title) + title = filetools.decode(title) + + if torrent_info and isinstance(title, str): + title = title.replace('GB', 'G·B').replace('Gb', 'G·b').replace('MB', 'M·B')\ + .replace('Mb', 'M·b').replace('.', ',').replace('\xa0', ' ') + + if isinstance(title, str): + if htmlclean: + title = scrapertools.htmlclean(title) + + title = title.replace('Á', 'A').replace('É', 'E').replace('Í', 'I')\ + .replace('Ó', 'O').replace('Ú', 'U').replace('Ü', 'U')\ + .replace('¡', '').replace('¿', '')\ + .replace('Ñ', 'N').replace('ñ', 'n')\ + .replace('á', 'a').replace('é', 'e').replace('í', 'i')\ + .replace('ó', 'o').replace('ú', 'u').replace('ü', 'u')\ + .replace("&", "&").replace("&", "&")\ + .replace("�", "n").replace("ñ", "n") + + title = title.replace('Á', 'A').replace('É', 'E').replace('Í', 'I').replace('Ó', 'O')\ + .replace('Ú', 'U').replace('Ü', 'U').replace('ü', 'u')\ + .replace('¡', '').replace('¿', '').replace('Ñ', 'N').replace('ñ', 'n')\ + .replace('á', 'a').replace('é', 'e').replace('í', 'i').replace('ó', 'o')\ + .replace('ú', 'u') + + if strict: + title = title.replace('ª', 'a').replace('º', 'o')\ + .replace('ª', 'a').replace('º', 'o') + title = re.compile("\s+", re.DOTALL).sub(" ", title) + title = re.compile("\s", re.DOTALL).sub("-", title.strip()) + title = re.compile("\-+", re.DOTALL).sub("-", title) + + #if DEBUG: logger.info('Title_OUT: %s' % title) + + return title.strip() + + +def set_tmdb_to_json(elem, title_search={}, title='', contentType=''): + + title = title or elem.get('title', '').replace(btdigg_label_B, '') + title_ = title.replace('_', ' ') + contentType = contentType or elem.get('mediatype', 'movie') + itemO = Item() + itemO.c_type = 'peliculas' if contentType == 'movie' else 'series' + itemO.contentType = contentType + if itemO.contentType in ['movie']: itemO.contentTitle = itemO.title = title_.capitalize() + if itemO.contentType in ['tvshow']: itemO.contentSerieName = itemO.title = title_.capitalize() + itemO.season_search = elem.get('season_search', '') or title_ + if title_search: + aliases = title_search.get('aliases', {}) + if title in aliases and '[' in aliases[title]: + itemO.season_search = elem['season_search'] = aliases[title].replace('_', ' ') + itemO.url = elem.get('url', '') + itemO.infoLabels['year'] = elem.get('year', '-') + if elem.get('tmdb_id'): itemO.infoLabels['tmdb_id'] = elem['tmdb_id'] + tmdb.set_infoLabels_item(itemO, True, idioma_busqueda='es') + if itemO.infoLabels['tmdb_id']: elem['tmdb_id'] = itemO.infoLabels['tmdb_id'] + + return itemO, elem + + def get_color_from_settings(label, default='white'): color = config.get_setting(label) @@ -516,7 +600,7 @@ def refresh_screen(item): xlistitem.setArt({"thumb": item.contentThumbnail}) # Cargamos el thumb else: xlistitem.setThumbnailImage(item.contentThumbnail) - xlistitem.setInfo("video", item.infoLabels) # Copiamos infoLabel + xlistitem.setInfo("video", item.infoLabels) # Copiamos infoLabels xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) # Preparamos el entorno para evitar error Kod1 18 time.sleep(1) # Dejamos tiempo para que se ejecute @@ -979,7 +1063,7 @@ def AH_post_tmdb_listado(self, item, itemlist, **AHkwargs): Pasada para maquillaje de los títulos obtenidos desde TMDB en Listado y Listado_Búsqueda. - Toma de infoLabel todos los datos de interés y los va situando en diferentes variables, principalmente título + Toma de infoLabels todos los datos de interés y los va situando en diferentes variables, principalmente título para que sea compatible con Unify, y si no se tienen Títulos Inteligentes, para que el formato sea lo más parecido al de Unify. @@ -1214,6 +1298,7 @@ def AH_find_seasons(self, item, matches, **AHkwargs): patron_seasons = findS.get('seasons_search_num_rgx', [[r'(?i)-(\d+)-(?:Temporada|Miniserie)', None], [r'(?i)(?:Temporada|Miniserie)-(\d+)(?:\W|$)', None]]) patron_qualities = findS.get('seasons_search_qty_rgx', [[r'(?i)(?:Temporada|Miniserie)(?:-(.*?)(?:\.|\/|-$|$))', None]]) + SEARCH_CLEAN = self.SEARCH_CLEAN if self else '\¿|\?|\/|\$|\@|\<|\>|\.' list_temps = [] list_temp_int = [] list_temp = [] @@ -1268,10 +1353,10 @@ def AH_find_seasons(self, item, matches, **AHkwargs): title = title_search or alias_out \ or scrapertools.find_single_match(item_search.season_search or item_search.contentSerieName \ or item_search.contentTitle, r'(^.*?)\s*(?:$|\(|\[|\|)').lower() # Limpiamos - title = scrapertools.quote(title, plus=True) + title = scrapertools.quote(re.sub(SEARCH_CLEAN, '', title)) title_list += [title] title_org = scrapertools.find_single_match(item_search.infoLabels['originaltitle'], r'(^.*?)\s*(?:$|\(|\[|\|)').lower() # Limpiamos - title_org = scrapertools.quote(title_org, plus=True) + title_org = scrapertools.quote(re.sub(SEARCH_CLEAN, '', title_org)) if title_org != title: title_list += [title_org] channel = __import__('channels.%s' % item_search.channel, None, None, ["channels.%s" % item_search.channel]) @@ -1472,7 +1557,7 @@ def AH_post_tmdb_episodios(self, item, itemlist, **AHkwargs): Pasada para maquillaje de los títulos obtenidos desde TMDB en Episodios. - Toma de infoLabel todos los datos de interés y los va situando en diferentes variables, principalmente título + Toma de infoLabels todos los datos de interés y los va situando en diferentes variables, principalmente título para que sea compatible con Unify, y si no se tienen Títulos Inteligentes, para que el formato sea lo más parecido al de Unify. @@ -1516,7 +1601,7 @@ def AH_post_tmdb_findvideos(self, item, itemlist, **AHkwargs): Llamada para crear un pseudo título con todos los datos relevantes del vídeo. - Toma de infoLabel todos los datos de interés y los va situando en diferentes variables, principalmente título. Lleva un control del num. de episodios por temporada + Toma de infoLabels todos los datos de interés y los va situando en diferentes variables, principalmente título. Lleva un control del num. de episodios por temporada En Itemlist devuelve un Item con el pseudotítulo. Ahí el canal irá agregando el resto. @@ -1953,6 +2038,7 @@ def AH_find_btdigg_ENTRY_from_BTDIGG(self, title='', contentType='episode', lang if not PY3: from lib.alfaresolver import get_cached_files else: from lib.alfaresolver_py3 import get_cached_files + if title: title = clean_title(title) found = {} if not title else [] timer = 15 cached_btdigg_AGE = 0.0 @@ -1984,14 +2070,16 @@ def AH_find_btdigg_ENTRY_from_BTDIGG(self, title='', contentType='episode', lang for c_type in contentType: if len(window.getProperty("alfa_cached_btdigg_%s_list" % c_type)) < 3: item.AH_find_btdigg_ENTRY_from_BTDIGG = True - if c_type == 'movie': - item.c_type = 'peliculas' + if c_type in ['movie', 'tvshow']: + item.c_type = 'series' if c_type == 'tvshow' else 'peliculas' + time_now = time.time() matches_btdigg, cached_btdigg[c_type] = AH_find_btdigg_list_all_from_BTDIGG(self, item, **AHkwargs) - window.setProperty("alfa_cached_btdigg_%s_list" % c_type, jsontools.dump(cached_btdigg[c_type], **kwargs_json)) else: + if DEBUG: logger.debug('cached_AGE: %s / %s' % (c_type, round((cached_btdigg_AGE-time.time())/60, 2))) time_now = time.time() cached_btdigg[c_type] = get_cached_files(c_type) - window.setProperty("alfa_cached_btdigg_%s_list" % c_type, jsontools.dump(cached_btdigg[c_type], **kwargs_json)) + cached_btdigg['password'] = get_cached_files('password') + window.setProperty("alfa_cached_passwords", jsontools.dump(cached_btdigg['password'])) if isinstance(cached_btdigg[c_type], dict): for key, value in cached_btdigg[c_type].items(): episode_list_len += len(value.get('episode_list', {})) @@ -2009,10 +2097,13 @@ def AH_find_btdigg_ENTRY_from_BTDIGG(self, title='', contentType='episode', lang and elem_pass.get('password'): epi['password'] = elem_pass['password'] break - logger.info('CACHED %s[%s]: %s; e: %s q: %s' \ - % (item.c_type or 'Seasons', round(time.time()-time_now, 2), len(cached_btdigg[c_type]), - episode_list_len, matches_cached_len), force=True) - episode_list_len = matches_cached_len = 0 + window.setProperty("alfa_cached_btdigg_%s_list" % c_type, jsontools.dump(cached_btdigg[c_type], **kwargs_json)) + + logger.info('CACHED %s[%s]: %s; e: %s q: %s' \ + % (c_type or 'Season', round(time.time()-time_now, 2), len(cached_btdigg[c_type]), + episode_list_len, matches_cached_len), force=True) + episode_list_len = matches_cached_len = 0 + if cached_btdigg_AGE < time.time(): cached_btdigg_AGE = time.time() + timer*60 window.setProperty("alfa_cached_btdigg_list_AGE", str(cached_btdigg_AGE)) @@ -2022,7 +2113,10 @@ def AH_find_btdigg_ENTRY_from_BTDIGG(self, title='', contentType='episode', lang if cached_btdigg[c_type]: exists += 1 continue - cached_btdigg[c_type] = jsontools.load(window.getProperty("alfa_cached_btdigg_%s_list" % c_type)) + if c_type in ['movie', 'tvshow']: + matches_btdigg, cached_btdigg[c_type] = AH_find_btdigg_list_all_from_BTDIGG(self, item.clone(c_type=c_type), **AHkwargs) + else: + cached_btdigg[c_type] = jsontools.load(window.getProperty("alfa_cached_btdigg_%s_list" % c_type)) if cached_btdigg[c_type]: exists += 1 if exists == 0: window.setProperty("alfa_cached_btdigg_list_AGE", "") @@ -2057,10 +2151,11 @@ def AH_find_btdigg_ENTRY_from_BTDIGG(self, title='', contentType='episode', lang for c_type in contentType: title_search = title if "en espa" in title_search: title_search = title_search[:-11] - title_search = alias_in or scrapertools.slugify(title_search, strict=False, convert=convert)\ + title_search = alias_in or scrapertools.slugify(title_search.replace('%20', ' '), strict=False, convert=convert)\ .strip().lower().replace(' ', '_').replace('(V)-', '') - title_alt = alias_out or scrapertools.slugify(item.infoLabels['title_alt'] or item.title, strict=False, convert=convert)\ - .strip().lower().replace(' ', '_').replace('(V)-', '').replace('class_act', '').replace('buscar', '') + title_alt = alias_out or scrapertools.slugify((item.infoLabels['title_alt'] or item.title).replace('%20', ' '), + strict=False, convert=convert).strip().lower()\ + .replace(' ', '_').replace('(V)-', '').replace('class_act', '').replace('buscar', '') if title_search == title_alt: title_alt = '' if c_type == 'movie' and not search: title_search = '%s_%s_%s' % (title_search, str(language), c_type) @@ -2213,6 +2308,15 @@ def AH_find_btdigg_list_all_from_BTDIGG(self, item, matches=[], matches_index={} if item.btdigg: quality_control = False canonical = AHkwargs.get('canonical', self.canonical if self else {}) + contentType = 'tvshow' if item.c_type == 'series' else 'movie' if item.c_type == 'peliculas' else item.c_type \ + if item.c_type != 'search' else "" + timer = 15 + cached_btdigg_AGE = float(window.getProperty("alfa_cached_btdigg_list_AGE") or 0.0) + if DEBUG: logger.debug('cached_AGE: %s / %s' % (contentType or item.c_type, round((cached_btdigg_AGE-time.time())/60, 2))) + if cached_btdigg_AGE < time.time(): + for c_type in ['movie', 'tvshow', 'episode']: + window.setProperty("alfa_cached_btdigg_%s_list" % c_type, "") + title_clean = AHkwargs.get('finds', {}).get('title_clean', []) title_clean.append([r'(?i)\s*UNCUT', '']) patron_title = r'(?i)(.*?)\s*(?:-*\s*temp|\(|\[)' @@ -2261,22 +2365,29 @@ def AH_find_btdigg_list_all_from_BTDIGG(self, item, matches=[], matches_index={} return matches_btdigg, matches_index format_tmdb_id(item) - contentType = 'tvshow' if item.c_type == 'series' else 'movie' if item.c_type == 'peliculas' else '' if item.c_type == 'search' and not item.btdigg: found = (AH_find_btdigg_ENTRY_from_BTDIGG(self, title=item.texto or item.contentTitle, contentType=item.c_type, matches=matches_btdigg, item=item.clone(), reset=False, **AHkwargs)) for found_item in found: + if item.infoLabels['tmdb_id'] and found_item.get('tmdb_id', item.infoLabels['tmdb_id']) != item.infoLabels['tmdb_id']: continue if found_item and found_item.get('matches_cached'): title = scrapertools.slugify(re.sub(r'\s*\[.*?\]', '', found_item.get('title', '')), strict=False, convert=convert).strip().lower().replace(' ', '_') key = '%s_%s_%s' % (title, found_item.get('language', ['CAST']), found_item.get('mediatype', '')) if matches_index.get(key, {}).get('quality', []): + if found_item.get('tmdb_id', item.infoLabels['tmdb_id']) and not matches_index.get(key, {}).get('tmdb_id'): + matches_index[key]['tmdb_id'] = found_item.get('tmdb_id', item.infoLabels['tmdb_id']) if found_item.get('quality', '') not in matches_index[key]['quality']: matches_index[key]['quality'] += found_item['quality'] if not matches_index[key]['quality'] \ else ', %s' % found_item['quality'] + for matches_cached in found_item['matches_cached']: + if matches_cached.get('url') and matches_cached['url'] in str(matches_index[key]): continue + matches_index[key]['matches_cached'].append(matches_cached.copy()) continue + matches_index.update({key: {'title': found_item['title'], 'mediatype': found_item['mediatype'], + 'tmdb_id': found_item.get('tmdb_id', item.infoLabels['tmdb_id']), 'quality': found_item['quality'], 'matches_cached': [], 'episode_list': {}}}) matches_btdigg.append(found_item) @@ -2295,7 +2406,7 @@ def AH_find_btdigg_list_all_from_BTDIGG(self, item, matches=[], matches_index={} if not item.btdigg: quality_alt = '720p 1080p 2160p 4kwebrip 4k' - if item.c_type in ['peliculas', 'search'] and 'HDTV' not in str(title_search['urls']): + if item.c_type in ['peliculas', 'movie', 'search'] and 'HDTV' not in str(title_search['urls']): quality_alt += ' bluray rip screener' language_alt = ['DUAL', 'CAST', 'LAT'] if item.c_type in ['search'] and channel_alt in str(title_search['urls']): @@ -2337,7 +2448,24 @@ def AH_find_btdigg_list_all_from_BTDIGG(self, item, matches=[], matches_index={} alfa_gateways = [] if (xbmc.Player().isPlaying() or ASSISTANT_REMOTE) and len(alfa_gateways) > 1: use_assistant = False - torrent_params = find_alternative_link(item, torrent_params=torrent_params, cache=disable_cache, use_assistant=use_assistant) + + if torrent_params['find_catched'] and contentType in ['movie', 'tvshow'] and window and cached_btdigg_AGE >= time.time() \ + and len(window.getProperty("alfa_cached_btdigg_%s_list" % contentType)) > 3: + try: + import ast + torrent_params = ast.literal_eval(window.getProperty("alfa_cached_btdigg_%s_list" % contentType) or []) + except Exception: + logger.error(traceback.format_exc()) + torrent_params = find_alternative_link(item, torrent_params=torrent_params, cache=disable_cache, + use_assistant=use_assistant) + window.setProperty("alfa_cached_btdigg_%s_list" % contentType, str(torrent_params)) + else: + torrent_params = find_alternative_link(item, torrent_params=torrent_params, cache=disable_cache, use_assistant=use_assistant) + if contentType in ['movie', 'tvshow'] and window and torrent_params: + window.setProperty("alfa_cached_btdigg_%s_list" % contentType, str(torrent_params)) + if cached_btdigg_AGE < time.time(): + cached_btdigg_AGE = time.time() + timer*60 + window.setProperty("alfa_cached_btdigg_list_AGE", str(cached_btdigg_AGE)) if not torrent_params.get('find_alt_link_result') and not torrent_params.get('find_alt_link_next'): x = 999999 if not torrent_params.get('find_alt_link_result') and torrent_params.get('find_alt_link_next', 0) >= limit_pages_min: x = 999999 @@ -2374,7 +2502,8 @@ def AH_find_btdigg_list_all_from_BTDIGG(self, item, matches=[], matches_index={} if elem.get('episode'): elem_json['episode'] = elem.get('episode') elem_json['title'] = elem.get('title', '').replace(btdigg_label_B, '') - elem_json['year'] = scrapertools.find_single_match(re.sub(r'(?i)cap\.\d+', '', elem_json['title']), '.+?'+patron_year) or '-' + elem_json['year'] = scrapertools.find_single_match(re.sub(r'(?i)cap\.\d+', '', elem_json['title']), + '.+?'+patron_year) or '-' if elem_json['year'] in ['720', '1080', '2160']: elem_json['year'] = '-' if scrapertools.find_single_match(elem_json['title'], patron_title).strip(): elem_json['title'] = scrapertools.find_single_match(elem_json['title'], patron_title).strip() @@ -2392,21 +2521,24 @@ def AH_find_btdigg_list_all_from_BTDIGG(self, item, matches=[], matches_index={} elem_json['title'] = re.sub(clean_org, clean_des, elem_json['title']).strip() elem_json['title'] = elem_json['title'].replace('- ', '') title = scrapertools.slugify(elem_json.get('title', ''), strict=False, convert=convert).strip().lower().replace(' ', '_') - elem_json['title'] = elem_json['title'].replace(':', '') + elem_json['title'] = clean_title(elem_json['title']).replace(':', '') if elem_json['mediatype'] != 'movie' and quality_control: title = '%s_%s' % (title, elem_json['quality']) + if elem_json['mediatype'] == 'movie': + if elem.get('tmdb_id'): elem_json['tmdb_id'] = elem['tmdb_id'] + else: itemO, elem_json = set_tmdb_to_json(elem_json, title_search=title_search) + elem_json['media_path'] = elem_json['mediatype'] if self: elem_json['media_path'] = self.movie_path.strip('/') if elem_json['mediatype'] == 'movie' else self.tv_path.strip('/') - elem_json['torrent_info'] = elem.get('torrent_info', '') - elem_json['torrent_info'] = elem.get('size', '').replace(btdigg_label_B, '').replace('GB', 'G·B').replace('Gb', 'G·b')\ - .replace('MB', 'M·B').replace('Mb', 'M·b').replace('.', ',')\ - .replace('\xa0', ' ') + elem_json['torrent_info'] = clean_title(elem.get('torrent_info', elem.get('size', '')), torrent_info=True)\ + .replace(btdigg_label_B, '') elem_json['torrent_info'] += ' (%s)' % (alias_in or elem_json['title']) elem_json['size'] = elem.get('size', '').replace(btdigg_label_B, '')\ .replace('\xa0', ' ')\ .replace('[COLOR magenta][B]RAR-[/B][/COLOR]', '') - if self and elem_json['mediatype'] in ['movie', 'episode']: elem_json['size'] = self.convert_size(elem_json['size']) + if self and elem_json['mediatype'] in ['movie', 'episode']: + elem_json['size'] = self.convert_size(elem_json['size'], silent=True) quality = elem_json['quality'].replace(btdigg_label, '') elem_json['quality'] = '%s%s' % (quality, btdigg_label) elem_json['server'] = 'torrent' @@ -2441,8 +2573,10 @@ def AH_find_btdigg_list_all_from_BTDIGG(self, item, matches=[], matches_index={} if matches_index.get(key, {}).get('quality'): if elem_json['url'] not in str(matches_index[key]): if quality not in matches_index[key]['quality'].split(', '): + if elem_json.get('tmdb_id', item.infoLabels['tmdb_id']) and not matches_index.get(key, {}).get('tmdb_id'): + matches_index[key]['tmdb_id'] = elem_json.get('tmdb_id', item.infoLabels['tmdb_id']) matches_index[key]['quality'] += ', %s' % quality - if DEBUG: logger.debug('QUALITY added: %s / %s' % (key, quality)) + if DEBUG: logger.info('QUALITY added: %s / %s' % (key, quality)) matches_index[key]['matches_cached'].append(elem_json.copy()) if elem_json['title'] not in str(matches_btdigg): matches_btdigg.append(elem_json_save.copy()) @@ -2451,6 +2585,7 @@ def AH_find_btdigg_list_all_from_BTDIGG(self, item, matches=[], matches_index={} item.btdig_in_use = True matches_btdigg.append(elem_json_save.copy()) matches_index.update({key: {'title': elem_json['title'], 'mediatype': elem_json['mediatype'], 'url': elem_json['url'], + 'tmdb_id': elem_json.get('tmdb_id', item.infoLabels['tmdb_id']), 'quality': quality, 'matches_cached': [elem_json.copy()]}}) except Exception: @@ -2462,8 +2597,6 @@ def AH_find_btdigg_list_all_from_BTDIGG(self, item, matches=[], matches_index={} except Exception: logger.error(traceback.format_exc()) - if item.c_type in ['peliculas']: - window.setProperty("alfa_cached_btdigg_%s_list" % 'movie', jsontools.dump(matches_index, **kwargs_json)) if DEBUG: logger.debug('matches_BTDIGG: %s / %s \r\n%s' % (len(matches_btdigg), str(matches_btdigg)[:SIZE_MATCHES], str(matches_index)[:SIZE_MATCHES])) return matches_btdigg, matches_index @@ -2507,6 +2640,7 @@ def AH_find_btdigg_list_all(self, item, matches=[], channel_alt=channel_py, titl for elem_json in matches: language = elem_json.get('language', '') + if elem_json.get('title', ''): elem_json['title'] = clean_title(elem_json['title']) if not language or '*' in str(language): language = elem_json['language'] = ['CAST'] mediatype = elem_json['mediatype'] = elem_json.get('mediatype', '') or ('movie' if self.movie_path in elem_json['url'] else 'tvshow') @@ -2554,11 +2688,8 @@ def AH_find_btdigg_list_all(self, item, matches=[], channel_alt=channel_py, titl season = elem_json.get('season', 0) if elem_json['mediatype'] not in ['episode'] and elem_json.get('season', 0): del elem_json['season'] # Slugify, pero más light - elem_json['title'] = scrapertools.htmlclean(elem_json['title']).strip() - elem_json['title'] = elem_json['title'].replace("á", "a").replace("é", "e").replace("í", "i")\ - .replace("ó", "o").replace("ú", "u").replace("ü", "u")\ - .replace("�", "ñ").replace("ñ", "ñ").replace(' - ', ' ') - elem_json['title'] = scrapertools.decode_utf8_error(elem_json['title']).strip() + elem_json['title'] = clean_title(elem_json['title']).replace(' - ', ' ') + if "en espa" in elem_json['title']: elem_json['title'] = elem_json['title'][:-11] language = 'latino' if 'latino/' in elem_json['url'] else '' @@ -2630,6 +2761,8 @@ def AH_find_btdigg_list_all(self, item, matches=[], channel_alt=channel_py, titl matches_index[key]['quality'] += ', %s' % q elem_json['quality'] = matches_index[key]['quality'] + if matches_index.get(key, {}).get('tmdb_id') and not elem_json.get('tmdb_id'): + elem_json['tmdb_id'] = matches_index[key]['tmdb_id'] if matches_index.get(key, {}).get('matches_cached', []) and mediatype != 'tvshow': elem_json['matches_cached'] = matches_index[key]['matches_cached'][:] if mediatype != 'movie': @@ -2653,7 +2786,7 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): from lib.AlfaChannelHelper import DictionaryAllChannel import ast - item = Item() + itemO = Item() try: titles_search = ast.literal_eval(window.getProperty("alfa_cached_btdigg_movie")) @@ -2687,19 +2820,21 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): convert = ['.=', '-= ', ':=', '&=and', ' = '] title = '_' self = {} + config.set_setting('tmdb_cache_read', False) try: if False: # Inhabilidado temporalemente channel = __import__('channels.%s' % channel_py, None, None, ["channels.%s" % channel_py]) self = DictionaryAllChannel(channel.host, channel=channel_py, finds=channel.finds, debug=DEBUG) - item.contentType = contentType = 'episode' - item.c_type = 'series' + itemO.contentType = contentType = 'episode' + itemO.c_type = 'series' - for item.page in range(1, 3): - matches, matches_index = (AH_find_btdigg_list_all_from_channel_py(self, item, matches=matches, matches_index=matches_index)) + for itemO.page in range(1, 3): + matches, matches_index = (AH_find_btdigg_list_all_from_channel_py(self, itemO, matches=matches, matches_index=matches_index)) if not matches: break if monitor.waitForAbort(10): + config.set_setting('tmdb_cache_read', True) return for elem in matches: @@ -2729,8 +2864,8 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): limit_pages = int((btdigg_entries * limit_search) / 10) limit_items_found = int(btdigg_entries * limit_search) - item.contentType = contentType - item.c_type = 'peliculas' if contentType == 'movie' else 'series' + itemO.contentType = contentType + itemO.c_type = 'peliculas' if contentType == 'movie' else 'series' cached_str = str(cached[contentType]) torrent_params = { @@ -2764,7 +2899,7 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): window.setProperty("alfa_cached_btdigg_episode", 'CANCEL') raise Exception("CANCEL") - torrent_params = find_alternative_link(item, torrent_params=torrent_params, cache=disable_cache, use_assistant=use_assistant) + torrent_params = find_alternative_link(itemO, torrent_params=torrent_params, cache=disable_cache, use_assistant=use_assistant) if torrent_params.get('find_alt_link_code', '') in ['200']: if not torrent_params.get('find_alt_link_result') and not torrent_params.get('find_alt_link_next'): x = 999999 @@ -2777,21 +2912,29 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): for elem in torrent_params.get('find_alt_link_result', []): #logger.error(elem) - if elem.get('url', '') in cached_str: continue + if elem.get('url', '') in cached_str: + logger.debug('Error en URL: %s' % elem.get('url', '')) + continue elem['size'] = elem.get('size', '').replace('\xa0', ' ') + elem['title'] = clean_title(elem.get('title', '')) + title = title_save = elem['title'].replace(btdigg_label_B, '') - if contentType == 'tvshow': - title = elem.get('title', '').replace(btdigg_label_B, '') - if scrapertools.find_single_match(title, patron_title).strip(): - title = scrapertools.find_single_match(title, patron_title).strip() - elif scrapertools.find_single_match(title, patron_title_b).strip(): - title = scrapertools.find_single_match(title, patron_title_b).strip() - else: - if DEBUG: logger.debug('Error en PATRON: %s / %s' % (elem.get('title', '').replace(btdigg_label_B, ''), patron_title)) - continue - title = title.replace('- ', '').replace('.', ' ') - if title in str(cached[contentType]): continue + if scrapertools.find_single_match(title_save, patron_title).strip(): + title_save = scrapertools.find_single_match(title_save, patron_title).strip() + elif scrapertools.find_single_match(title_save, patron_title_b).strip(): + title_save = scrapertools.find_single_match(title_save, patron_title_b).strip() + else: + logger.debug('Error en PATRON: %s / %s' % (elem.get('title_save', '').replace(btdigg_label_B, ''), patron_title)) + continue + title = title.replace('- ', '').replace('.', ' ') + if contentType == 'tvshow': title = title_save + + if title in str(cached[contentType]): + #logger.debug('Error en TÍTULO DUP: %s' % (elem['title'])) + continue + + item, elem = set_tmdb_to_json(elem, title_search=title_search, title=title_save) cached[contentType].append(elem.copy()) @@ -2800,10 +2943,13 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): break if monitor.waitForAbort(5): + config.set_setting('tmdb_cache_read', True) return window.setProperty("alfa_cached_btdigg_%s" % contentType, str(cached[contentType])) + logger.info('## %s: %s' % (contentType.capitalize(), len(cached[contentType]))) if monitor.waitForAbort(1 * 60): + config.set_setting('tmdb_cache_read', True) return contentType = 'episode' @@ -2826,16 +2972,13 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): else: if DEBUG: logger.debug('Error en PATRON: %s / %s' % (elem_show.get('title', '').replace(btdigg_label_B, ''), patron_title)) continue - elem_json['title'] = elem_json['title'].replace('- ', '').replace('.', ' ') - elem_json['title'] = scrapertools.htmlclean(elem_json['title']).strip() - elem_json['title'] = elem_json['title'].replace("á", "a").replace("é", "e").replace("í", "i")\ - .replace("ó", "o").replace("ú", "u").replace("ü", "u")\ - .replace("�", "ñ").replace("ñ", "ñ").replace(' - ', ' ') - elem_json['title'] = scrapertools.decode_utf8_error(elem_json['title']).strip() + elem_json['title'] = clean_title(elem_json['title']).replace('- ', '').replace('.', ' ').replace(' - ', ' ') if "en espa" in elem_json['title']: elem_json['title'] = elem_json['title'][:-11] - title = scrapertools.slugify(elem_json.get('title', ''), strict=False, convert=convert).strip().lower().replace(' ', '_') + title = scrapertools.slugify(elem_json['title'], strict=False, convert=convert).strip().lower().replace(' ', '_') elem_json['title'] = elem_json['title'].replace(':', '') - if title in cached[contentType]: continue + if title in cached[contentType]: + if DEBUG: logger.debug('Error en TÍTULO DUP: %s' % title) + continue elem_json['season'] = season_low = int(scrapertools.find_single_match(elem_show['title'], patron_sea)) elem_json['quality'] = '' @@ -2844,33 +2987,17 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): elem_json['episode_list'] = {} elem_json['url'] = '%sserie_btdig/%s%s' % (btdigg_url, elem_json['language'], elem_json['title'].replace(' ', '-').lower().strip()) - if not elem_json.get('title', '') or not elem_json.get('url', ''): continue - - item = Item() - item.c_type = 'series' - item.contentType = 'tvshow' - item.contentSerieName = item.title = elem_json['title'].capitalize() - item.season_search = elem_json.get('season_search', '') or elem_json['title'] - aliases = titles_search[-1].get('aliases', {}) - if title in aliases and '[' in aliases[title]: - item.season_search = elem_json['season_search'] = aliases[title] - if '#' in item.season_search: - alias = search_btdigg_free_format_parse({}, item.clone(), titles_search=BTDIGG_SEARCH)[0].get('aliases', {}) - if alias: - alias_in = list(alias.keys())[0].replace('_', ' ').capitalize() - alias_out = list(alias.values())[0].replace('_', ' ').capitalize() - item.url = elem_json['url'] - item.infoLabels['year'] = elem_json.get('year', '-') - config.set_setting('tmdb_cache_read', False) + if not elem_json.get('title', '') or not elem_json.get('url', ''): + if DEBUG: logger.debug('Error NO TÍTULO o NO URL: %s' % elem_json) + continue + + if elem_show.get('tmdb_id'): elem_json['tmdb_id'] = elem_show['tmdb_id'] + item, elem_json = set_tmdb_to_json(elem_json, title_search=titles_search[-1], title=title, contentType='tvshow') tmdb.set_infoLabels_item(item, True, idioma_busqueda='es') - if item.infoLabels['tmdb_id']: tmdb.set_infoLabels_item(item, True, idioma_busqueda='es') + if item.contentTitle and item.contentTitle.lower() != item.contentSerieName.lower(): item.contentTitle = item.contentTitle.replace('- ', '').replace(':', '') - item.contentTitle = scrapertools.htmlclean(item.contentTitle).strip() - item.contentTitle = item.contentTitle.replace("á", "a").replace("é", "e").replace("í", "i")\ - .replace("ó", "o").replace("ú", "u").replace("ü", "u")\ - .replace("�", "ñ").replace("ñ", "ñ").replace(' - ', ' ') - item.contentTitle = scrapertools.decode_utf8_error(item.contentTitle).strip() + item.contentTitle = clean_title(item.contentTitle).replace(' - ', ' ') if "en espa" in item.contentTitle: item.contentTitle = item.contentTitle[:-11] if item.infoLabels['tmdb_id']: if item.contentTitle.lower() != item.contentSerieName.lower(): item.infoLabels['title_alt'] = item.contentTitle @@ -2878,11 +3005,19 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): item.infoLabels['number_of_seasons'] = int(scrapertools.find_single_match(item.infoLabels.\ get('last_series_episode_to_air', '%sx1' \ % item.infoLabels.get('number_of_seasons', elem_json['season'])), r'(\d+)x\d+')) - item.contentSeason = elem_json['season'] = item.infoLabels['number_of_seasons'] - tmdb.set_infoLabels_item(item, True, idioma_busqueda='es') - config.set_setting('tmdb_cache_read', True) + if elem_json['season'] > item.infoLabels['number_of_seasons']: + if item.infoLabels.get('temporada_num_episodios'): del item.infoLabels['temporada_num_episodios'] + item.contentSeason = elem_json['season'] + tmdb.set_infoLabels_item(item, True, idioma_busqueda='es') + if not item.infoLabels.get('temporada_num_episodios'): + item.contentSeason = elem_json['season'] = item.infoLabels['number_of_seasons'] + tmdb.set_infoLabels_item(item, True, idioma_busqueda='es') seasons = elem_json['season'] - if seasons != season_low: seasons = '%s-%s' % (season_low, seasons) + if seasons != season_low: + if seasons > season_low: + seasons = '%s-%s' % (season_low, seasons) + else: + seasons = '%s-%s' % (seasons, season_low) item.infoLabels['temporada_num_episodios'] = item.infoLabels['temporada_num_episodios'] or 10 if not isinstance(item.infoLabels['last_episode_to_air'], int): @@ -2902,6 +3037,7 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): titles_search = search_btdigg_free_format_parse(self, item.clone(), titles_search_save, contentType) for title_search in titles_search: + z = 0 limit_search = title_search.get('limit_search', 1) if contentType != title_search.get('contentType', ''): continue if limit_search <= 0: continue @@ -2971,13 +3107,20 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): x = 999999 x += 1 + if z == 0 and not torrent_params.get('find_alt_link_result', []): + logger.debug('Error en SEARCH: %s: %s' % (item.season_search, torrent_params)) + z += 1 for elem in torrent_params.get('find_alt_link_result', []): #logger.error(elem) cached_str = str(cached[contentType][title]) if elem.get('url', '') in cached_str: - if DEBUG: logger.debug('Error en URL: %s / %s' % (elem.get('url', ''), cached_str)) + if DEBUG: + logger.debug('Error en URL: %s / %s' % (elem.get('url', ''), cached_str)) + else: + logger.debug('Error en URL: %s' % elem.get('url', '')) continue elem_episode = {} + elem['title'] = clean_title(elem.get('title', '')) try: elem_episode['title'] = elem.get('title', '').replace(btdigg_label_B, '') @@ -2986,22 +3129,27 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): elem_episode['season'] = int(scrapertools.find_single_match(elem['title'], patron_sea)) if elem_episode['season'] != elem_json['season'] and elem_episode['season'] < season_low: other_season = True - if DEBUG: logger.debug('Error en SEASON: %s / %s' % (elem_episode['season'], elem_json['season'])) + logger.debug('Error en SEASON: %s / %s' % (elem_episode['season'], elem_json['season'])) continue elem_episode['episode'] = int(scrapertools.find_single_match(elem['title'], patron_cap)) - if elem_episode['episode'] > item.infoLabels['temporada_num_episodios']: continue + if elem_episode['episode'] > item.infoLabels['temporada_num_episodios']: + logger.debug('Error en EPISODE: %s / %s: %s' % (elem_episode['episode'], + item.infoLabels['temporada_num_episodios'], + elem)) + continue sxe = '%sx%s' % (elem_episode['season'], str(elem_episode['episode']).zfill(2)) if scrapertools.find_single_match(elem_episode['title'], patron_title).strip(): elem_episode['title'] = scrapertools.find_single_match(elem_episode['title'], patron_title).strip() elif scrapertools.find_single_match(elem_episode['title'], patron_title_b).strip(): elem_episode['title'] = scrapertools.find_single_match(elem_episode['title'], patron_title_b).strip() else: - if DEBUG: logger.debug('Error en PATRON: %s / %s' % (elem_episode['title'], patron_title)) + logger.debug('Error en PATRON: %s / %s' % (elem_episode['title'], patron_title)) continue - elem_episode['title'] = elem_episode['title'].replace('- ', '').replace('.', ' ') - elem_episode['title'] = scrapertools.htmlclean(elem_episode['title']).strip() - if elem_episode['title'].lower() != elem_json['title'].lower(): - if DEBUG: logger.debug('Error en TÍTULO: %s / %s' % (elem_episode['title'], elem_json['title'])) + elem_episode['title'] = clean_title(elem_episode['title']).replace('- ', '').replace('.', ' ') + if elem_episode['title'].lower() != elem_json['title'].lower() \ + and elem_episode['title'].lower() != item.infoLabels['originaltitle'].lower() \ + and elem_episode['title'].lower() != item.infoLabels['title_alt'].lower(): + logger.debug('Error en TÍTULO: %s / %s' % (elem_episode['title'], elem_json['title'])) continue elem_episode['url'] = elem_json['url'] elem_episode['mediatype'] = 'episode' @@ -3011,10 +3159,8 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): elem_episode['url'] = elem.get('url', '') elem_episode['quality'] = elem.get('quality', '').replace('HDTV 720p', 'HDTV-720p') elem_episode['server'] = 'torrent' - elem_episode['torrent_info'] = elem.get('size', '').replace(btdigg_label_B, '').replace('GB', 'G·B')\ - .replace('Gb', 'G·b').replace('MB', 'M·B')\ - .replace('Mb', 'M·b').replace('.', ',')\ - .replace('\xa0', ' ') + elem_episode['torrent_info'] = clean_title(elem.get('torrent_info', elem.get('size', '')), torrent_info=True)\ + .replace(btdigg_label_B, '') elem_episode['torrent_info'] += ' (%s)' % (alias_in or elem_episode['title']) elem_episode['size'] = elem.get('size', '').replace(btdigg_label_B, '')\ .replace('\xa0', ' ')\ @@ -3041,8 +3187,10 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): if other_season and limit_pages < limit_search + 2: limit_pages += 1 if monitor.waitForAbort(1): + config.set_setting('tmdb_cache_read', True) return if monitor.waitForAbort(5): + config.set_setting('tmdb_cache_read', True) return if len(cached[contentType][title]['episode_list']) >= episodes: @@ -3052,6 +3200,7 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): if window.getProperty("alfa_cached_btdigg_episode") == 'TIMEOUT_CANCEL': logger.error('##### %s' % window.getProperty("alfa_cached_btdigg_episode")) window.setProperty("alfa_cached_btdigg_episode", 'CANCEL') + config.set_setting('tmdb_cache_read', True) return logger.error(traceback.format_exc()) @@ -3070,6 +3219,7 @@ def CACHING_find_btdigg_list_all_NEWS_from_BTDIGG_(options=None): except Exception as e: logger.error(traceback.format_exc()) window.setProperty("alfa_cached_btdigg_episode", 'CANCEL') + config.set_setting('tmdb_cache_read', True) def AH_find_btdigg_seasons(self, item, matches=[], domain_alt=channel_py, **AHkwargs): @@ -3143,7 +3293,10 @@ def AH_find_btdigg_seasons(self, item, matches=[], domain_alt=channel_py, **AHkw seasons = season season_low = contentSeason or season_high[-1] + 1 or season if season != season_low: - seasons = '%s-%s' % (season_low, season) + if season > season_low: + seasons = '%s-%s' % (season_low, season) + else: + seasons = '%s-%s' % (season, season_low) elif btdigg_url in item.url and BTDIGG_URL_SEARCH not in item.url and BTDIGG_URL_SEARCH not in item.url_tvshow: seasons = '1-%s' % season season_low = 1 @@ -3412,7 +3565,7 @@ def AH_find_btdigg_episodes(self, item, matches=[], domain_alt=channel_py, **AHk if DEBUG: logger.debug('EPIs ANTIGUOs: %s' % item.infoLabels['last_air_date']) matches = AH_find_btdigg_matches(item, matches, **AHkwargs) - matches = sorted(matches, key=lambda it: (it.get('episode', 0), self.convert_size(it.get('size', 0)))) \ + matches = sorted(matches, key=lambda it: (it.get('episode', 0), self.convert_size(it.get('size', 0), silent=True))) \ if matches else [] return matches @@ -3559,11 +3712,10 @@ def AH_find_btdigg_episodes(self, item, matches=[], domain_alt=channel_py, **AHk if elem.get('season_search', ''): elem_json['season_search'] = elem['season_search'] if '#' in item.season_search: elem_json['season_search'] = item.season_search elem_json['quality'] = '%s%s' % (elem_json['quality'], btdigg_label) - elem_json['size'] = elem.get('size', '').replace(btdigg_label_B, '')\ - .replace('\xa0', ' ') - elem_json['torrent_info'] = elem_json['size'] - elem_json['torrent_info'] += ' (%s)' % (alias_in or scrapertools.find_single_match(elem.get('title', '')\ - .replace(btdigg_label_B, ''), patron_title)) + elem_json['size'] = elem.get('size', '').replace(btdigg_label_B, '').replace('\xa0', ' ') + elem_json['torrent_info'] = clean_title(elem.get('size', ''), torrent_info=True) + elem_json['torrent_info'] += ' (%s)' % (alias_in or clean_title(scrapertools.find_single_match(elem.get('title', ''), + patron_title))) elem_json['language'] = elem.get('language', []) or item.language elem_json['title'] = '' elem_json['server'] = 'torrent' @@ -3597,7 +3749,7 @@ def AH_find_btdigg_episodes(self, item, matches=[], domain_alt=channel_py, **AHk if len(epis_index) >= last_episode_to_air: break if matches_len == len(matches): matches = AH_find_btdigg_matches(item, matches, **AHkwargs) - matches = sorted(matches, key=lambda it: (it.get('episode', 0), self.convert_size(it.get('size', 0)))) if matches else [] + matches = sorted(matches, key=lambda it: (it.get('episode', 0), self.convert_size(it.get('size', 0), silent=True))) if matches else [] except Exception: logger.error(traceback.format_exc()) @@ -3643,6 +3795,7 @@ def AH_find_btdigg_findvideos(self, item, matches=[], domain_alt=channel_py, **A found_list = AH_find_btdigg_ENTRY_from_BTDIGG(self, title=item.contentSerieName or item.contentTitle, contentType=item.contentType, matches=matches, item=item.clone(), reset=False, **AHkwargs) for found_item in found_list: + if item.infoLabels['tmdb_id'] and found_item.get('tmdb_id', item.infoLabels['tmdb_id']) != item.infoLabels['tmdb_id']: continue if found_item and found_item.get('matches_cached'): for matches_cached in found_item['matches_cached']: if matches_cached.get('url') and matches_cached['url'] in str(matches): continue @@ -3653,7 +3806,7 @@ def AH_find_btdigg_findvideos(self, item, matches=[], domain_alt=channel_py, **A found = True else: found_item = list(found_item.values())[0] - if found_item and found_item.get('episode_list') and found_item.get('tmdb_id', '') == item.infoLabels['tmdb_id']: + if found_item and found_item.get('episode_list'): for epi, episodio in found_item.get('episode_list', {}).items(): if episodio.get('season', 0) == item.contentSeason and episodio.get('episode', 0) == item.contentEpisodeNumber: for matches_cached in episodio.get('matches_cached', []): @@ -3804,10 +3957,9 @@ def AH_find_btdigg_findvideos(self, item, matches=[], domain_alt=channel_py, **A q_match = True if q_match: continue elem_json['quality'] = '%s%s' % (elem_json['quality'], btdigg_label) - elem_json['torrent_info'] = elem.get('size', '').replace('GB', 'G·B').replace('Gb', 'G·b')\ - .replace('MB', 'M·B').replace('Mb', 'M·b').replace('.', ',')\ - .replace(btdigg_label_B, '') - title = elem.get('title', '').replace(btdigg_label_B, '').replace('- ', '').replace(elem.get('quality', ''), '') + elem_json['torrent_info'] = clean_title(elem.get('size', ''), torrent_info=True).replace(btdigg_label_B, '') + title = clean_title(elem.get('title', '')).replace(btdigg_label_B, '').replace('- ', '')\ + .replace(elem.get('quality', ''), '') title = re.sub(r'(?i)BTDigg\s*|-\s+|\[.*?\]|Esp\w*\s*|Cast\w*\s*|Lat\w*\s*|span\w*' , '', title) elem_json['torrent_info'] += ' (%s)' % title elem_json['size'] = elem.get('size', '').replace(btdigg_label_B, '')\ @@ -3835,6 +3987,12 @@ def AH_find_btdigg_findvideos(self, item, matches=[], domain_alt=channel_py, **A and matches_cached.get('password', {}): item.password = matches_cached.get('password', {}) + if elem.get('tmdb_id'): elem_json['tmdb_id'] = elem['tmdb_id'] + else: itemO, elem_json = set_tmdb_to_json(elem_json, title_search=title_search, title=title, contentType=item.contentType) + if item.infoLabels['tmdb_id'] and item.infoLabels['tmdb_id'] != elem_json.get('tmdb_id'): + if DEBUG: logger.debug('TMDB_ID diff: %s / %s' % (item.infoLabels['tmdb_id'], elem_json.get('tmdb_id'))) + continue + matches.append(elem_json.copy()) except Exception: @@ -3842,7 +4000,7 @@ def AH_find_btdigg_findvideos(self, item, matches=[], domain_alt=channel_py, **A continue if matches_len == len(matches): matches = AH_find_btdigg_matches(item, matches, **AHkwargs) - if self: matches = sorted(matches, key=lambda it: (self.convert_size(it.get('size', 0)))) if matches else [] + if self: matches = sorted(matches, key=lambda it: (self.convert_size(it.get('size', 0), silent=True))) if matches else [] except Exception: logger.error(traceback.format_exc()) diff --git a/plugin.video.alfa/modules/downloads.py b/plugin.video.alfa/modules/downloads.py index 34eb1ac8b..5ece88924 100755 --- a/plugin.video.alfa/modules/downloads.py +++ b/plugin.video.alfa/modules/downloads.py @@ -70,6 +70,13 @@ def mainlist(item): if item.remote_download: remote_download = item.remote_download change_to_remote(item, lookup=True) + remote_download_dict = config.get_setting("downloads_remote_download", default={}) + if remote_download_dict and not isinstance(remote_download_dict, dict): + try: + remote_download_dict = eval(remote_download_dict) + except Exception as e: + logger.error('"downloads_remote_download" ERROR: %s: %s' % (str(e), str(remote_download_dict))) + remote_download_dict = [] # Lista de archivos for file in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): @@ -224,8 +231,8 @@ def mainlist(item): contentType=item.contentType, contentChannel=item.contentChannel, thumbnail=get_thumb("folder.png"), contentSerieName=item.contentSerieName, remote_download=remote_download, text_color="red")) - if config.get_setting("remote_download", "downloads", default={}) and HOST == 'Local': - for remote_domain, params in list(config.get_setting("remote_download", "downloads").items()): + if remote_download_dict and HOST == 'Local': + for remote_domain, params in list(remote_download_dict.items()): itemlist.insert(0, item.clone(action="change_to_remote", title='[COLOR limegreen]Descargas Remotas en [/COLOR][COLOR gold][B]%s[/B][/COLOR]' % remote_domain.capitalize(), thumbnail=get_thumb("on_the_air.png"), @@ -341,13 +348,18 @@ def browser(item): torrent_dirs(item) torrent_paths = TORRENT_PATHS + torrent_paths_list = [] + torrent_paths_list_seen = [] if config.get_setting("downloads_show_torrent_paths", default=True): torrent_paths_list = config.get_setting("downloads_torrent_paths_list", default=[]) + if torrent_paths_list and not isinstance(torrent_paths_list, list): + try: + torrent_paths_list = eval(torrent_paths_list) + except Exception as e: + logger.error('"downloads_torrent_paths_list" ERROR: %s: %s' % (str(e), str(torrent_paths_list))) + torrent_paths_list = [] if HOST != 'Local': torrent_paths_list = [['%s' % torrent_paths['TORR_client'].lower(), '%s' % torrent_paths[torrent_paths['TORR_client'].upper()]]] - else: - torrent_paths_list = [] - torrent_paths_list_seen = [] contentPlot = '[COLOR limegreen]Idiomas: [/COLOR]%s\n[COLOR limegreen]Calidad: [/COLOR]%s\n\n' plot = '[COLOR gold][B]Tamaño:[/COLOR][/B] %s\n\n[COLOR gold][B]Ruta de descarga:[/COLOR][/B]\n\n %s' TITLE_VIDEO = "%s %s %s %s %s" @@ -815,6 +827,12 @@ def download_auto(item, start_up=False): second_pass = False move_to_remote = config.get_setting("downloads_move_to_remote", default=[]) + if move_to_remote and not isinstance(move_to_remote, list): + try: + move_to_remote = eval(move_to_remote) + except Exception as e: + logger.error('"downloads_move_to_remote" ERROR: %s: %s' % (str(e), str(move_to_remote))) + move_to_remote = [] filelist = sorted(filetools.listdir(DOWNLOAD_LIST_PATH)) for fichero in filelist: @@ -1136,7 +1154,7 @@ def delete_torrent_session(item, delete_RAR=True, action='delete'): update_control(item.path, {"downloadStatus": item.downloadStatus, "downloadProgress": downloadProgress, "downloadQueued": 0, "downloadServer": {}, "url": item.url}, function='delete_torrent_session_aft') - config.set_setting("RESTART_DOWNLOADS", True, "downloads") # Forzamos restart downloads + config.set_setting("downloads_RESTART_DOWNLOADS", True) # Forzamos restart downloads return torr_data, deamon_url, index @@ -1426,12 +1444,12 @@ def sort_torrents(play_items, emergency_urls=False, channel='', torrent_info=[]) logger.error('Size ERROR: %s: %s' % (play_item, size)) continue play_items_torrent.append([play_item, size, quality]) - + if play_items_torrent: size_order = config.get_setting("torrent_quality", default=0) if size_order: play_items_torrent = sorted(play_items_torrent, reverse=True, key=lambda it: (float(it[1]))) # clasificamos - if size_order == 1 and len(play_items_torrent) > 2: # Tomamos la segunda calidad + if size_order == 1 and len(play_items_torrent) > 2 and '4k' in play_items_torrent[0][2].lower(): # Tomamos calidad 2 play_items_torrent[0][1] = 0.0 # Ponemos el de más calidad al final de la lista play_items_torrent = sorted(play_items_torrent, reverse=True, key=lambda it: (float(it[1]))) # RE-clasificamos else: @@ -1509,7 +1527,7 @@ def sort_torrents(play_items, emergency_urls=False, channel='', torrent_info=[]) if play_item.contentChannel not in blocked_channels: if size_order: play_items_torrent = sorted(play_items_torrent, reverse=True, key=lambda it: (float(it.size_torr))) # clasificamos - if size_order == 1 and len(play_items_torrent) > 2: # Tomamos la segunda calidad + if size_order == 1 and len(play_items_torrent) > 2 and '4k' in play_items_torrent[0].quality.lower(): # Tomamos calidad 2 play_items_torrent[0].size_torr = 0.0 # Ponemos el de más calidad al final de la lista play_items_torrent = sorted(play_items_torrent, reverse=True, key=lambda it: (float(it.size_torr))) # RE-clasificamos else: @@ -1517,7 +1535,7 @@ def sort_torrents(play_items, emergency_urls=False, channel='', torrent_info=[]) else: if size_order: play_items_torrent = sorted(play_items_torrent, reverse=True, key=lambda it: it.quality) # clasificamos - if size_order == 1 and len(play_items_torrent) > 2: # Tomamos la segunda calidad + if size_order == 1 and len(play_items_torrent) > 2 and '4k' in play_items_torrent[0].quality.lower(): # Tomamos calidad 2 play_items_torrent[0].size_torr = 0.0 # Ponemos el de más calidad al final de la lista play_items_torrent = sorted(play_items_torrent, reverse=True, key=lambda it: it.quality) # RE-clasificamos else: @@ -1545,7 +1563,7 @@ def download_from_url(url, item): return {"downloadStatus": STATUS_CODES.error} item.downloadQueued = 0 - config.set_setting("DOWNLOADER_in_use", True, "downloads") # Marcamos Downloader en uso + config.set_setting("downloads_DOWNLOADER_in_use", True) # Marcamos Downloader en uso # Obtenemos la ruta de descarga y el nombre del archivo item.downloadFilename = item.downloadFilename.replace('/','-') item.downloadFilename = scrapertools.slugify(item.downloadFilename, strict=False) @@ -1591,7 +1609,7 @@ def download_from_url(url, item): if status == STATUS_CODES.completed: move_to_library(item.clone(downloadFilename=file)) - config.set_setting("DOWNLOADER_in_use", False, "downloads") # Marcamos Downloader como disponible + config.set_setting("downloads_DOWNLOADER_in_use", False) # Marcamos Downloader como disponible return {"downloadUrl": d.download_url, "downloadStatus": status, "downloadSize": d.size[0], "downloadQueued": 0, "downloadProgress": d.progress, "downloadCompleted": d.downloaded[0], "downloadFilename": file} @@ -1965,7 +1983,9 @@ def get_episodes(item): season = item.infoLabels['season'] sesxepi = [] event = False - channel_json = {} + channel_settings = filetools.join(config.get_data_path(), "settings_channels", item.contentChannel + "_data.json") + channel_json = jsontools.load(filetools.read(channel_settings)) if filetools.exists(channel_settings) else {} + Window_IsMedia = xbmc.getCondVisibility('Window.IsMedia') if item.infoLabels['tmdb_id'] and item.infoLabels['tmdb_id'] == null: event = True # Si viene de un canal de deportes o similar @@ -1994,6 +2014,12 @@ def get_episodes(item): # Miramos si los episodio se van a mover a un site remoto, con lo que no pueden usar archivos locales move_to_remote = config.get_setting("downloads_move_to_remote", default=[]) + if move_to_remote and not isinstance(move_to_remote, list): + try: + move_to_remote = eval(move_to_remote) + except Exception as e: + logger.error('"downloads_move_to_remote" ERROR: %s: %s' % (str(e), str(move_to_remote))) + move_to_remote = [] for serie, address in move_to_remote: if serie.lower() in item.contentSerieName.lower(): # Si está en la lista es que es remoto remote = True @@ -2002,10 +2028,11 @@ def get_episodes(item): # El item que pretendemos descargar YA es un episodio if item.contentType == "episode" and (not item.sub_action or item.sub_action not in sub_action): episodes = [item.clone()] - if item.strm_path and not remote: + if item.strm_path and (not remote or Window_IsMedia == 0): episode_local = True - if xbmc.getCondVisibility('Window.IsMedia') == 1: - episode_sort = False + if Window_IsMedia == 0: remote = False + elif Window_IsMedia == 1: episode_sort = False + # El item es uma serie o temporada elif item.contentType in ["tvshow", "season"] or item.sub_action in sub_action: @@ -2100,9 +2127,6 @@ def get_episodes(item): serie_listdir = sorted(filetools.listdir(serie_path)) episodes = [] episode_local = True - channel_settings = filetools.join(config.get_data_path(), "settings_channels", item.contentChannel + "_data.json") - if filetools.exists(channel_settings): - channel_json = jsontools.load(filetools.read(channel_settings)) for file in serie_listdir: if not file.endswith('.json'): @@ -2147,7 +2171,7 @@ def get_episodes(item): if item.btdigg: del item.btdigg if item.quality != 'HDTV': item.quality = 'HDTV-720p' - episodes = getattr(channel, item.contentAction)(item) # Si no viene de Videoteca, descargamos desde la web + episodes = getattr(channel, item.contentAction)(item) # Si no viene de Videoteca, descargamos desde la web itemlist = [] diff --git a/plugin.video.alfa/modules/videolibrary.py b/plugin.video.alfa/modules/videolibrary.py index 07841a347..6252ba419 100644 --- a/plugin.video.alfa/modules/videolibrary.py +++ b/plugin.video.alfa/modules/videolibrary.py @@ -799,8 +799,9 @@ def play(item): logger.info() # logger.debug("item:\n" + item.tostring('\n')) + module_type = 'modules' if item.contentChannel in ['downloads'] else 'channels' if item.contentChannel != "local" and item.channel_recovery != 'url': - channel = __import__('channels.%s' % item.contentChannel, fromlist=["channels.%s" % item.contentChannel]) + channel = __import__('%s.%s' % (module_type, item.contentChannel), fromlist=["%s.%s" % (module_type, item.contentChannel)]) if hasattr(channel, "play"): itemlist = getattr(channel, "play")(item) diff --git a/plugin.video.alfa/platformcode/custom_code.py b/plugin.video.alfa/platformcode/custom_code.py index 3c7adf697..d1ec374e5 100644 --- a/plugin.video.alfa/platformcode/custom_code.py +++ b/plugin.video.alfa/platformcode/custom_code.py @@ -906,7 +906,7 @@ def reactivate_unrar(init=False, mute=True): if rar_control and len(rar_control['rar_files']) == 1: ret = filetools.remove(filetools.join(save_path_videos, '_rar_control.json'), silent=True) - config.set_setting("torrent_paths_list", download_paths, channel="downloads") + config.set_setting("downloads_torrent_paths_list", str(download_paths)) search_for_unrar_in_error(download_paths, init=init) diff --git a/plugin.video.alfa/platformcode/envtal.py b/plugin.video.alfa/platformcode/envtal.py index bfb4a2175..c2c952bcd 100644 --- a/plugin.video.alfa/platformcode/envtal.py +++ b/plugin.video.alfa/platformcode/envtal.py @@ -329,9 +329,6 @@ class MEMORYSTATUS(ctypes.Structure): torrent_id = config.get_setting("torrent_client", server="torrent", default=0) environment['torrentcli_option'] = str(torrent_id) torrent_options = platformtools.torrent_client_installed() - if lib_path != 'Inactivo': - torrent_options = [': MCT'] + torrent_options - torrent_options = [': BT'] + torrent_options environment['torrent_list'].append({'Torrent_opt': str(torrent_id), 'Libtorrent': lib_path, \ 'RAR_Auto': str(environment['torrentcli_rar']), \ 'RAR_backgr': str(environment['torrentcli_backgr']), \ @@ -344,9 +341,7 @@ class MEMORYSTATUS(ctypes.Structure): cliente = dict() cliente['D_load_Path'] = '' cliente['Libre'] = '?' - cliente['Plug_in'] = scrapertools.find_single_match(torrent_option, ':\s*(\w+)') - if cliente['Plug_in'] not in ['BT', 'MCT']: cliente['Plug_in'] = cliente['Plug_in'].capitalize() - + cliente['Plug_in'] = scrapertools.find_single_match(torrent_option, ':\s*(\w+)').capitalize() cliente['D_load_Path'] = torrent_paths[cliente['Plug_in'].upper()] cliente['D_load_Path_perm'] = filetools.file_info(cliente['D_load_Path']) cliente['Buffer'] = str(torrent_paths[cliente['Plug_in'].upper()+'_buffer']) diff --git a/plugin.video.alfa/platformcode/launcher.py b/plugin.video.alfa/platformcode/launcher.py index 86e91203f..3fb0b3284 100644 --- a/plugin.video.alfa/platformcode/launcher.py +++ b/plugin.video.alfa/platformcode/launcher.py @@ -128,6 +128,9 @@ def monkey_patch_modules(item): if item.module in content_modules: item.moduleContent = True + if item.contentChannel in modules: + item.contentModule = item.contentChannel + return item @@ -216,9 +219,9 @@ def run(item=None): if os.path.exists(function_file): try: function = __import__('%s.%s' % (item.folder, item.function), None, - None, ["%s.%s" % (item.folder, item.function)]) + None, ["%s.%s" % (item.folder, item.function)]) except ImportError: - exec("import %s." + item.function + " as function") + exec(("import %s." % item.folder) + item.function + " as function") if function: logger.info("Running function %s(%s) | %s" % (function.__name__, item.options, function.__file__)) @@ -338,7 +341,7 @@ def run(item=None): # logger.debug("item_toPlay: " + "\n" + item.tostring('\n')) # First checks if channel has a "play" function - if hasattr(module, 'play'): + if module and hasattr(module, 'play'): logger.info("Executing channel 'play' method") playlist = module.play(item) b_favourite = item.isFavourite @@ -367,7 +370,7 @@ def run(item=None): elif item.action == "findvideos": from core import servertools # First checks if channel has a "findvideos" function - if hasattr(module, 'findvideos'): + if module and hasattr(module, 'findvideos'): itemlist = getattr(module, item.action)(item) itemlist = servertools.filter_servers(itemlist) @@ -437,27 +440,39 @@ def run(item=None): # \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ logger.info("Executing '%s' method" % item.action) # Get itemlist from module.action - if hasattr(module, item.action): + if module and hasattr(module, item.action): itemlist = getattr(module, item.action)(item) # Run the method from the contentChannel else: + module_type = { + "folder" : "modules" if item.contentModule else "channels", + "type" : "module" if item.contentModule else "channel" + } + if item.contentModule: del item.contentModule + + logger.info("item.module") + module_name = item.contentChannel + module_package = '%s.%s' % (module_type["folder"], module_name) module_file = os.path.join(config.get_runtime_path(), - 'channels', item.contentChannel + ".py") + module_type['folder'], module_name + ".py") module = None + if os.path.exists(module_file): try: - module = __import__('channels.%s' % item.contentChannel, None, - None, ["channels.%s" % item.contentChannel]) + module = __import__(module_package, None, + None, [module_package]) except ImportError: - exec("import channels." + item.contentChannel + " as channel") + exec("import " + module_package + " as module") if not module: - logger.error('Channel "%s" missing (%s: %s) or not imported: %s' \ - % (item.contentChannel, module_file, os.path.exists(module_file), module)) + logger.error('%s "%s" missing (%s: %s) or not imported: %s' \ + % (module_type['folder'], module_name, module_file, os.path.exists(module_file), module)) - logger.info("Running channel %s | %s" % (module.__name__, module.__file__)) - itemlist = getattr(module, item.action)(item) + else: + logger.info("Running %s %s | %s" % (module_type['type'], module.__name__ if module else module_name, + module.__file__ if module else module_file)) + itemlist = getattr(module, item.action)(item) if config.get_setting('trakt_sync'): from core import trakt_tools @@ -713,18 +728,41 @@ def play_from_library(item): return else: item = videolibrary.play(itemlist[seleccion])[0] + item = monkey_patch_modules(item) if item.action == 'play': platformtools.play_video(item) + else: - channel_file = os.path.join(config.get_runtime_path(), - 'channels', item.contentChannel + ".py") - channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) - if not channel: - logger.error('Channel "%s" missing (%s: %s) or not imported: %s' \ - % (item.contentChannel, channel_file, os.path.exists(channel_file), channel)) - if hasattr(channel, item.action): - play_items = getattr(channel, item.action)(item.clone(action=item.action, - channel=item.contentChannel)) + module_type = { + "folder" : "modules" if item.contentModule else "channels", + "type" : "module" if item.contentModule else "channel" + } + if item.contentModule: del item.contentModule + + logger.info("item.%s" % module_type['type']) + module_name = item.contentChannel + module_package = '%s.%s' % (module_type["folder"], module_name) + module_file = os.path.join(config.get_runtime_path(), + module_type['folder'], module_name + ".py") + module = None + + if os.path.exists(module_file): + try: + module = __import__(module_package, None, + None, [module_package]) + except ImportError: + exec("import " + module_package + " as module") + + if not module: + logger.error('%s "%s" missing (%s: %s) or not imported: %s' \ + % (module_type['folder'], module_name, module_file, os.path.exists(module_file), module)) + + else: + logger.info("Running %s %s | %s" % (module_type['type'], module.__name__ if module else module_name, + module.__file__ if module else module_file)) + if hasattr(module, item.action): + play_items = getattr(module, item.action)(item.clone(action=item.action, channel=module_name, + module=module_name if module_type['type'] == 'module' else '')) return if (platformtools.is_playing() and item.action) or item.server == 'torrent' or autoplay.is_active(item.contentChannel): diff --git a/plugin.video.alfa/platformcode/platformtools.py b/plugin.video.alfa/platformcode/platformtools.py index 3f6c47bbe..3c982391d 100644 --- a/plugin.video.alfa/platformcode/platformtools.py +++ b/plugin.video.alfa/platformcode/platformtools.py @@ -311,7 +311,7 @@ def render_items(itemlist, parent_item): # Recorremos el itemlist categories_channel = [] - if itemlist and not itemlist[0].module and itemlist[0].channel: + if itemlist and not itemlist[0].module and itemlist[0].channel and itemlist[0].channel != 'downloads': categories_channel = channeltools.get_channel_parameters(itemlist[0].channel.lower()).get('categories', []) temp_list = list() @@ -2462,7 +2462,7 @@ def rar_control_mng(item, xlistitem, mediaurl, rar_files, torr_client, password, item.downloadProgress = 100 # ... si no, se da por terminada la monitorización item.downloadQueued = 0 update_control(item, function='rar_control_mng') - config.set_setting("RESTART_DOWNLOADS", True, "downloads") # Forzamos restart downloads + config.set_setting("downloads_RESTART_DOWNLOADS", True) # Forzamos restart downloads if item.downloadStatus not in [3, 4, 5]: itemlist_refresh() # Seleccionamos que clientes torrent soportamos para el marcado de vídeos vistos: asumimos que todos funcionan diff --git a/plugin.video.alfa/servers/torrent.py b/plugin.video.alfa/servers/torrent.py index 533cb099c..6590507c3 100755 --- a/plugin.video.alfa/servers/torrent.py +++ b/plugin.video.alfa/servers/torrent.py @@ -1692,7 +1692,7 @@ def mark_torrent_as_watched(): # Si en la actualización de la Videoteca no se ha completado, encolo las descargas AUTO pendientes try: - from channels import downloads + from modules import downloads item_dummy = Item() threading.Thread(target=downloads.download_auto, args=(item_dummy, True)).start() # Encolamos las descargas automáticas if monitor and monitor.waitForAbort(5): @@ -1748,8 +1748,8 @@ def restart_unfinished_downloads(): global torrent_paths try: - config.set_setting("DOWNLOADER_in_use", False, "downloads") # Marcamos Downloader como disponible - config.set_setting("RESTART_DOWNLOADS", False, "downloads") # Marcamos restart downloads como disponible + config.set_setting("downloads_DOWNLOADER_in_use", False) # Marcamos Downloader como disponible + config.set_setting("downloads_RESTART_DOWNLOADS", False) # Marcamos restart downloads como disponible config.set_setting("UNRAR_in_use", False, server="torrent") # Marcamos unRAR como disponible config.set_setting("CAPTURE_THRU_BROWSER_in_use", '', server="torrent") # Marcamos Capture_thru_browser como disponible init = True @@ -1825,7 +1825,7 @@ def restart_unfinished_downloads(): if item.downloadStatus in [1, 3]: continue - if item.server != 'torrent' and config.get_setting("DOWNLOADER_in_use", "downloads"): + if item.server != 'torrent' and config.get_setting("downloads_DOWNLOADER_in_use"): continue if torr_client not in ['TORRENTER', 'QUASAR', 'ELEMENTUM', 'TORREST'] and item.downloadProgress != 0: continue @@ -1867,7 +1867,7 @@ def restart_unfinished_downloads(): logger.info('RECUPERANDO descarga de %s: %s' % (torr_client, title)) logger.info("RECUPERANDO: Status: %s | Progress: %s | Queued: %s | File: %s | Title: %s: %s" % \ (item.downloadStatus, item.downloadProgress, item.downloadQueued, fichero, torr_client, title)) - from channels import downloads + from modules import downloads threading.Thread(target=downloads.start_download, args=(item,)).start() # Creamos un Thread independiente if monitor and monitor.waitForAbort(5): return @@ -1898,9 +1898,9 @@ def restart_unfinished_downloads(): if xbmc.abortRequested: return xbmc.sleep(5*1000) - if config.get_setting("RESTART_DOWNLOADS", "downloads", default=False): # ... a menos que se active externamente - logger.info('RESTART_DOWNLOADS Activado externamente') - config.set_setting("RESTART_DOWNLOADS", False, "downloads") + if config.get_setting("downloads_RESTART_DOWNLOADS", default=False): # ... a menos que se active externamente + logger.info('downloads_RESTART_DOWNLOADS Activado externamente') + config.set_setting("downloads_RESTART_DOWNLOADS", False) break except Exception: logger.error(traceback.format_exc())