diff --git a/CHANGELOG.md b/CHANGELOG.md index 23f0074..7aa3793 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +* [v2.90.0](https://github.com/a4k-openproject/a4kScrapers/releases/tag/a4kScrapers-2.90.0): + * add mediafusion + * remove torrentioelf (eol & too many issues) + * [v2.89.0](https://github.com/a4k-openproject/a4kScrapers/releases/tag/a4kScrapers-2.89.0): * bring back magnetdl diff --git a/meta.json b/meta.json index 71ed054..685d0f4 100644 --- a/meta.json +++ b/meta.json @@ -1,6 +1,6 @@ { "author": "Unknown", - "version":"2.89.0", + "version":"2.90.0", "name":"a4kScrapers", "update_directory": "https://github.com/a4k-openproject/a4kScrapers/archive/", "remote_meta": "https://raw.githubusercontent.com/newt-sc/a4kScrapers/master/meta.json", diff --git a/providerModules/a4kScrapers/source_utils.py b/providerModules/a4kScrapers/source_utils.py index f8482d4..20e1824 100644 --- a/providerModules/a4kScrapers/source_utils.py +++ b/providerModules/a4kScrapers/source_utils.py @@ -275,6 +275,7 @@ def clean_release_title_with_simple_info(title, simple_info): title = decode_text_py2(title) title = strip_non_ascii_and_unprintable(title) + title = re.sub(r'www.*? - ', '', title) year = simple_info.get('year', '') title = clean_year_range(title, year) + ' ' @@ -284,6 +285,12 @@ def clean_release_title_with_simple_info(title, simple_info): title = remove_sep(title, simple_info['query_title']) title = clean_title(title) + ' ' + # remove packs if season is currently airing due to incomplete season packs + packs = re.search(r'(?:s\d{1,2}\W|season|complete|series)', title, re.IGNORECASE) + if simple_info.get('is_airing') and packs: + if all(t not in simple_info['query_title'] for t in ['season', 'complete', 'series']): + return '' + for group in release_groups_blacklist: target = ' %s ' % group if target not in (simple_info['query_title'] + ' ') and target in (title + ' '): diff --git a/providerModules/a4kScrapers/urls.json b/providerModules/a4kScrapers/urls.json index 68e915f..0dca7ee 100644 --- a/providerModules/a4kScrapers/urls.json +++ b/providerModules/a4kScrapers/urls.json @@ -116,12 +116,12 @@ { "base": "https://torrentio.strem.fun" } ] }, - "torrentioelf": { + "mediafusion": { "search": "/stream/{{category}}/%s.json", "cat_movie": "movie", "cat_episode": "series", "domains": [ - { "base": "https://torrentio.elfhosted.com" } + { "base": "https://mediafusion.elfhosted.com" } ] }, "torrentz2": { diff --git a/providers/a4kScrapers/en/torrent/torrentioelf.py b/providers/a4kScrapers/en/torrent/mediafusion.py similarity index 79% rename from providers/a4kScrapers/en/torrent/torrentioelf.py rename to providers/a4kScrapers/en/torrent/mediafusion.py index 65ae45a..fce5e29 100644 --- a/providers/a4kScrapers/en/torrent/torrentioelf.py +++ b/providers/a4kScrapers/en/torrent/mediafusion.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- from providerModules.a4kScrapers import core +import re class sources(core.DefaultSources): def __init__(self, *args, **kwargs): @@ -32,8 +33,8 @@ def _search_request(self, url, query): query = self._imdb if not self.is_movie_query(): query += ':' + self.scraper.season_x + ':' + self.scraper.episode_x - - params = 'language=english|debridoptions=nodownloadlinks,nocatalog' + + params = '' if self._pm_apikey: params += '|premiumize=' + self._pm_apikey if self._rd_apikey: @@ -41,7 +42,9 @@ def _search_request(self, url, query): if self._ad_apikey: params += '|alldebrid=' + self._ad_apikey - request_url = url.base + '/' + core.quote_plus(params) + (url.search % core.quote_plus(query)) + config = 'eJwBYACf_92uqrL8vhmebCYzGgX6Q2BMyNhn5SMWS_XitAKLTUul8nAqQEcj0k2wPpBs1ceClvb4mT2darthTiMrk2XGFdUx3XR4MB5WJT3hZWla9v-cQY6bloboXr6BxVJfNgTC3xazL_8=' + #request_url = url.base + '/' + core.quote_plus(params) + (url.search % core.quote_plus(query)) + request_url = url.base + '/' + core.quote_plus(config) + (url.search % core.quote_plus(query)) response = self._request.get(request_url) if response.status_code != 200: @@ -51,8 +54,7 @@ def _search_request(self, url, query): results = core.json.loads(response.text) except Exception as e: self._request.exc_msg = 'Failed to parse json: %s' % response.text - return [] - + return [] if not results or 'streams' not in results or len(results['streams']) == 0: return [] else: @@ -62,14 +64,17 @@ def _soup_filter(self, response): return response def _title_filter(self, el): + el['description'] = el['description'].replace('šŸ“‚ - ', '').replace('šŸ“‚ ', '').replace('/None', '').replace(' / ', '') + el['description'] = re.sub(r'www.*? - ', '', el['description']) if not self.is_movie_query(): - parts = el['title'].split('\n') - #if len(parts) > 1 and 'šŸ‘¤' not in parts[1]: + el['description'] = re.sub(r'šŸ’¾.*?šŸ’¾', 'šŸ’¾', el['description']) + el['description'] = el['description'].replace('/', '\n') + parts = el['description'].split('\n') if len(parts) > 1 and 'šŸ’¾' not in parts[1]: parts.pop(1) return '\n'.join(parts) - return el['title'] + return el['description'] def _info(self, el, url, torrent): if 'infoHash' in el: @@ -84,14 +89,12 @@ def _info(self, el, url, torrent): if '[AD+]' in el['name']: torrent['debrid'] = 'AD' - torrent['size'] = core.source_utils.de_string_size(self.genericScraper.parse_size(el['title'])) - torrent['seeds'] = self.genericScraper.parse_seeds(el['title']) + torrent['size'] = core.source_utils.de_string_size(self.genericScraper.parse_size(el['description'])) + torrent['seeds'] = self.genericScraper.parse_seeds(el['description']) if '\n' in torrent['release_title']: - #torrent['release_title'] = torrent['release_title'].split('\nšŸ‘¤', 1)[0] torrent['release_title'] = torrent['release_title'].split('\nšŸ’¾', 1)[0] if '\n' in torrent['release_title']: torrent['release_title'] = torrent['release_title'].split('\n', 1)[1] - return torrent def _set_apikeys(self, kwargs): diff --git a/providers/a4kScrapers/en/torrent/torrentio.py b/providers/a4kScrapers/en/torrent/torrentio.py index 7ae23d4..7d6064f 100644 --- a/providers/a4kScrapers/en/torrent/torrentio.py +++ b/providers/a4kScrapers/en/torrent/torrentio.py @@ -62,6 +62,7 @@ def _soup_filter(self, response): return response def _title_filter(self, el): + el['title'] = el['title'].replace(' / ', '') if not self.is_movie_query(): parts = el['title'].split('\n') if len(parts) > 1 and 'šŸ‘¤' not in parts[1]: