diff --git a/.env-sample b/.env-sample
index 3f7c294..a513a5f 100644
--- a/.env-sample
+++ b/.env-sample
@@ -1,31 +1,35 @@
-ADDON_ID=stremio.comet.fast # for Stremio
-ADDON_NAME=Comet # for Stremio
-FASTAPI_HOST=0.0.0.0
-FASTAPI_PORT=8000
-FASTAPI_WORKERS=1 # remove to destroy CPU -> max performances :)
-DASHBOARD_ADMIN_PASSWORD=CHANGE_ME # The password to access the dashboard with active connections and soon more...
-DATABASE_TYPE=sqlite # or postgresql if you know what you're doing
-DATABASE_URL=username:password@hostname:port # to connect to PostgreSQL
-DATABASE_PATH=data/comet.db # only change it if you know what it is - folders in path must exist - ignored if PostgreSQL used
-CACHE_TTL=86400 # cache duration in seconds
-DEBRID_PROXY_URL=http://127.0.0.1:1080 # https://github.com/cmj2002/warp-docker to bypass Debrid Services and Torrentio server IP blacklist
-INDEXER_MANAGER_TYPE=None # jackett or prowlarr or None if you want to disable it completely and use Zilean or Torrentio
-INDEXER_MANAGER_URL=http://127.0.0.1:9117
-INDEXER_MANAGER_API_KEY=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-INDEXER_MANAGER_TIMEOUT=60 # maximum time to obtain search results from indexer manager in seconds
-INDEXER_MANAGER_INDEXERS='["EXAMPLE1_CHANGETHIS", "EXAMPLE2_CHANGETHIS"]' # for jackett, get the names from https://github.com/Jackett/Jackett/tree/master/src/Jackett.Common/Definitions - for prowlarr you can write them like on the web dashboard
-GET_TORRENT_TIMEOUT=5 # maximum time to obtain the torrent info hash in seconds
-ZILEAN_URL=None # for DMM search - https://github.com/iPromKnight/zilean - ex: http://127.0.0.1:8181
-ZILEAN_TAKE_FIRST=500 # only change it if you know what it is
-SCRAPE_TORRENTIO=False # scrape Torrentio
-SCRAPE_MEDIAFUSION=False # scrape MediaFusion - has better results for Indian content
-MEDIAFUSION_URL=https://mediafusion.elfhosted.com # Allows you to scrape custom instances of MediaFusion
-PROXY_DEBRID_STREAM=False # Proxy Debrid Streams (very useful to use your debrid service on multiple IPs at same time)
-PROXY_DEBRID_STREAM_PASSWORD=CHANGE_ME # Secret password to enter on configuration page to prevent people from abusing your debrid stream proxy
-PROXY_DEBRID_STREAM_MAX_CONNECTIONS=-1 # IP-Based connection limit for the Debrid Stream Proxy (-1 = disabled)
-PROXY_DEBRID_STREAM_DEBRID_DEFAULT_SERVICE=realdebrid # if you want your users who use the Debrid Stream Proxy not to have to specify Debrid information, but to use the default one instead
-PROXY_DEBRID_STREAM_DEBRID_DEFAULT_APIKEY=CHANGE_ME # if you want your users who use the Debrid Stream Proxy not to have to specify Debrid information, but to use the default one instead
-TITLE_MATCH_CHECK=True # disable if you only use Torrentio / MediaFusion and are sure you're only scraping good titles, for example (keep it True if Zilean is enabled)
-REMOVE_ADULT_CONTENT=False # detect and remove adult content
-STREMTHRU_DEFAULT_URL=None # if you want your users to use StremThru without having to specify it
-CUSTOM_HEADER_HTML=None # only set it if you know what it is
+ADDON_ID=stremio.comet.fast # for Stremio
+ADDON_NAME=Comet # for Stremio
+FASTAPI_HOST=0.0.0.0
+FASTAPI_PORT=8000
+FASTAPI_WORKERS=1
+USE_GUNICORN=True # will use uvicorn if False or if on Windows
+DASHBOARD_ADMIN_PASSWORD=CHANGE_ME # The password to access the dashboard with active connections and soon more...
+DATABASE_TYPE=sqlite # or postgresql if you're making a Comet cluster
+DATABASE_URL=username:password@hostname:port # to connect to PostgreSQL
+DATABASE_PATH=data/comet.db # only change it if you know what it is - folders in path must exist - ignored if PostgreSQL used
+METADATA_CACHE_TTL=2592000 # metadata cache duration in seconds (30 days by default)
+TORRENT_CACHE_TTL=1296000 # torrent cache duration in seconds (15 days by default)
+DEBRID_CACHE_TTL=86400 # debrid availability cache duration in seconds (1 day by default)
+DEBRID_PROXY_URL=http://127.0.0.1:1080 # https://github.com/cmj2002/warp-docker to bypass Debrid Services and Torrentio server IP blacklist
+INDEXER_MANAGER_TYPE=none # jackett or prowlarr or none if you want to disable it completely and use Zilean or Torrentio
+INDEXER_MANAGER_URL=http://127.0.0.1:9117
+INDEXER_MANAGER_API_KEY=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+INDEXER_MANAGER_TIMEOUT=60 # maximum time to obtain search results from indexer manager in seconds
+INDEXER_MANAGER_INDEXERS='["EXAMPLE1_CHANGETHIS", "EXAMPLE2_CHANGETHIS"]' # for jackett, get the names from https://github.com/Jackett/Jackett/tree/master/src/Jackett.Common/Definitions - for prowlarr you can write them like on the web dashboard
+GET_TORRENT_TIMEOUT=5 # maximum time to obtain the torrent info hash in seconds
+DOWNLOAD_TORRENT_FILES=False # set to True to enable torrent file retrieval instead of using only magnet link info (infohash and sources, ensuring file index is included in results for Jackett and Prowlarr torrents)
+SCRAPE_ZILEAN=False # scrape Zilean/DMM
+ZILEAN_URL=https://zilean.elfhosted.com # for DMM search - https://github.com/iPromKnight/zilean - ex: http://127.0.0.1:8181
+SCRAPE_TORRENTIO=False # scrape Torrentio
+TORRENTIO_URL=https://torrentio.strem.fun # or https://knightcrawler.elfhosted.com if you prefer to scrape the ElfHosted KnightCrawler instance
+SCRAPE_MEDIAFUSION=False # scrape MediaFusion - has better results for Indian content
+MEDIAFUSION_URL=https://mediafusion.elfhosted.com # Allows you to scrape custom instances of MediaFusion
+PROXY_DEBRID_STREAM=False # Proxy Debrid Streams (very useful to use your debrid service on multiple IPs at same time)
+PROXY_DEBRID_STREAM_PASSWORD=CHANGE_ME # Secret password to enter on configuration page to prevent people from abusing your debrid stream proxy
+PROXY_DEBRID_STREAM_MAX_CONNECTIONS=-1 # IP-Based connection limit for the Debrid Stream Proxy (-1 = disabled)
+PROXY_DEBRID_STREAM_DEBRID_DEFAULT_SERVICE=realdebrid # if you want your users who use the Debrid Stream Proxy not to have to specify Debrid information, but to use the default one instead
+PROXY_DEBRID_STREAM_DEBRID_DEFAULT_APIKEY=CHANGE_ME # if you want your users who use the Debrid Stream Proxy not to have to specify Debrid information, but to use the default one instead
+REMOVE_ADULT_CONTENT=False # detect and remove adult content
+CUSTOM_HEADER_HTML=None # only set it if you know what it is
+STREMTHRU_URL=https://stremthru.13377001.xyz # StremThru acts as a proxy between Comet and debrid services to support them all, so you must have it
\ No newline at end of file
diff --git a/.github/workflows/docker-build-push.yml b/.github/workflows/docker-build-push.yml
index a57e173..734518f 100644
--- a/.github/workflows/docker-build-push.yml
+++ b/.github/workflows/docker-build-push.yml
@@ -4,8 +4,7 @@ on:
push:
branches:
- main
-# release:
-# types: [created]
+ - rewrite
workflow_dispatch:
jobs:
@@ -42,6 +41,19 @@ jobs:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
+ - name: Get branch name
+ id: branch-name
+ run: echo "branch=${GITHUB_REF#refs/heads/}" >> $GITHUB_OUTPUT
+
+ - name: Set Docker tags
+ id: docker_tags
+ run: |
+ if [ "${{ steps.branch-name.outputs.branch }}" = "main" ]; then
+ echo "tags=ghcr.io/g0ldyy/comet:latest,docker.io/g0ldyy/comet:latest" >> $GITHUB_OUTPUT
+ else
+ echo "tags=ghcr.io/g0ldyy/comet:${{ steps.branch-name.outputs.branch }},docker.io/g0ldyy/comet:${{ steps.branch-name.outputs.branch }}" >> $GITHUB_OUTPUT
+ fi
+
- name: Build and push Docker image
uses: docker/build-push-action@v5.3.0
with:
@@ -51,6 +63,4 @@ jobs:
push: true
cache-from: type=gha
cache-to: type=gha,mode=max
- tags: |
- ghcr.io/g0ldyy/comet:latest
- docker.io/g0ldyy/comet:latest
\ No newline at end of file
+ tags: ${{ steps.docker_tags.outputs.tags }}
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index b5135a6..6520211 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,167 +1,177 @@
-# Project Based
-*.db
-
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-logs/
-.vscode/
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-share/python-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-MANIFEST
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.nox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-*.py,cover
-.hypothesis/
-.pytest_cache/
-cover/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-db.sqlite3
-db.sqlite3-journal
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-.pybuilder/
-target/
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# IPython
-profile_default/
-ipython_config.py
-
-# pyenv
-# For a library or package, you might want to ignore these files since the code is
-# intended to run in multiple environments; otherwise, check them in:
-# .python-version
-
-# pipenv
-# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
-# However, in case of collaboration, if having platform-specific dependencies or dependencies
-# having no cross-platform support, pipenv may install dependencies that don't work, or not
-# install all needed dependencies.
-#Pipfile.lock
-
-# poetry
-# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
-# This is especially recommended for binary packages to ensure reproducibility, and is more
-# commonly ignored for libraries.
-# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
-poetry.lock
-
-# pdm
-# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
-#pdm.lock
-# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
-# in version control.
-# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
-.pdm.toml
-.pdm-python
-.pdm-build/
-
-# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
-__pypackages__/
-
-# Celery stuff
-celerybeat-schedule
-celerybeat.pid
-
-# SageMath parsed files
-*.sage.py
-
-# Environments
-.env
-.venv
-env/
-venv/
-ENV/
-env.bak/
-venv.bak/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-
-# Pyre type checker
-.pyre/
-
-# pytype static type analyzer
-.pytype/
-
-# Cython debug symbols
-cython_debug/
-
-# PyCharm
-# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
-# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
-# and can be added to the global gitignore or merged into this file. For a more nuclear
-# option (not recommended) you can uncomment the following to ignore the entire idea folder.
-#.idea/
+# Project Based
+*.db
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# UV
+# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+uv.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
+.pdm.toml
+.pdm-python
+.pdm-build/
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
+
+# Ruff stuff:
+.ruff_cache/
+
+# PyPI configuration file
+.pypirc
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fa7bb69..97e4322 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,29 +1,5 @@
# Changelog
-## [1.53.0](https://github.com/g0ldyy/comet/compare/v1.52.0...v1.53.0) (2025-02-07)
-
-
-### Features
-
-* **debrid/stremthru:** forward client ip ([982c398](https://github.com/g0ldyy/comet/commit/982c398774480badbb60d7214a91c3a62260fee6))
-* **debrid/stremthru:** match behavior with mediafusion ([24611a2](https://github.com/g0ldyy/comet/commit/24611a2940dec82cd3cd09aab757cef06a9beb8a))
-* **debrid/stremthru:** pass stremio video id for magnet cache check ([786a298](https://github.com/g0ldyy/comet/commit/786a2985ca3ef2463b65c684bc8cef6a86ce772f))
-* **debrid/stremthru:** use default url only for specific services ([f0b029d](https://github.com/g0ldyy/comet/commit/f0b029d7933bc85a7ca794c3edea1afccc8d4f28))
-* **debrid:** add support for stremthru ([64ba2b3](https://github.com/g0ldyy/comet/commit/64ba2b39fb0faab69e16e8755f024eb82c14f888))
-
-
-### Bug Fixes
-
-* **debrid/stremthru:** handle missing file index ([32ae49e](https://github.com/g0ldyy/comet/commit/32ae49ed1dd7ef0ab52ec8f6247250b3a8e888c7))
-* direct torrent results ([dea8a90](https://github.com/g0ldyy/comet/commit/dea8a9041cb54245e2fdadd9d914c0285efebc56))
-
-## [1.52.0](https://github.com/g0ldyy/comet/compare/v1.51.0...v1.52.0) (2025-01-03)
-
-
-### Features
-
-* revert random manifest id ([f792529](https://github.com/g0ldyy/comet/commit/f7925298993e904a635041248079cf578d602818))
-
## [1.51.0](https://github.com/g0ldyy/comet/compare/v1.50.1...v1.51.0) (2024-11-28)
diff --git a/Dockerfile b/Dockerfile
index f2b2603..85bddcf 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,29 +1,16 @@
-FROM python:3.11-alpine
-LABEL name="Comet" \
- description="Stremio's fastest torrent/debrid search add-on." \
- url="https://github.com/g0ldyy/comet"
-
-WORKDIR /app
-
-ARG DATABASE_PATH
-
-ENV PYTHONUNBUFFERED=1 \
- PYTHONDONTWRITEBYTECODE=1 \
- POETRY_NO_INTERACTION=1 \
- POETRY_HOME="/usr/local" \
- FORCE_COLOR=1 \
- TERM=xterm-256color
-
-# Fix python-alpine gcc
-RUN apk add --no-cache \
- gcc \
- musl-dev \
- libffi-dev \
- make
-
-RUN pip install poetry
-COPY pyproject.toml .
-RUN poetry install --no-cache --no-root --without dev
-COPY . .
-
-ENTRYPOINT ["poetry", "run", "python", "-m", "comet.main"]
+FROM ghcr.io/astral-sh/uv:python3.11-alpine
+LABEL name="Comet" \
+ description="Stremio's fastest torrent/debrid search add-on." \
+ url="https://github.com/g0ldyy/comet"
+
+WORKDIR /app
+
+ARG DATABASE_PATH
+
+COPY pyproject.toml .
+
+RUN uv sync
+
+COPY . .
+
+ENTRYPOINT ["uv", "run", "python", "-m", "comet.main"]
diff --git a/LICENSE b/LICENSE
index 7d6b337..bc5e051 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,21 +1,21 @@
-MIT License
-
-Copyright (c) 2024 Goldy
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+MIT License
+
+Copyright (c) 2024 Goldy
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
index 668a8c7..812eaee 100644
--- a/README.md
+++ b/README.md
@@ -1,112 +1,85 @@
-
-
-
-
-
-
-
-
-# Features
-- The only Stremio addon that can Proxy Debrid Streams to allow use of the Debrid Service on multiple IPs at the same time on the same account!
-- IP-Based Max Connection Limit and Dashboard for Debrid Stream Proxier
-- Jackett and Prowlarr support (change the `INDEXER_MANAGER_TYPE` environment variable to `jackett` or `prowlarr`)
-- [Zilean](https://github.com/iPromKnight/zilean) ([DMM](https://hashlists.debridmediamanager.com/) Scraper) support for even more results
-- [Torrentio](https://torrentio.strem.fun/) Scraper
-- Caching system ft. SQLite / PostgreSQL
-- Smart Torrent Ranking powered by [RTN](https://github.com/dreulavelle/rank-torrent-name)
-- Proxy support to bypass debrid restrictions
-- Real-Debrid, All-Debrid, Premiumize, TorBox and Debrid-Link supported
-- Direct Torrent supported (do not specify a Debrid API Key on the configuration page (webui) to activate it - it will use the cached results of other users using debrid service)
-- [Kitsu](https://kitsu.io/) support (anime)
-- Adult Content Filter
-- [StremThru](https://github.com/MunifTanjim/stremthru) support
-
-# Installation
-To customize your Comet experience to suit your needs, please first take a look at all the [environment variables](https://github.com/g0ldyy/comet/blob/main/.env-sample)!
-## ElfHosted
-A free, public Comet instance is available at https://comet.elfhosted.com
-
-[ElfHosted](https://elfhosted.com) is a geeky [open-source](https://elfhosted.com/open/) PaaS which provides all the "plumbing" (*hosting, security, updates, etc*) for your self-hosted apps.
-
-ElfHosted offer "one-click" [private Comet instances](https://elfhosted.com/app/comet/), allowing you to customize your indexers, and enabling "Proxy Stream" mode, to permit streaming from multiple source IPs with the same RD token!
-
-> [!IMPORTANT]
-> Comet is a top-tier app in the [ElfHosted app catalogue](https://elfhosted.com/apps/). 30% of your subscription goes to the app developer :heart:
-
-(*[ElfHosted Discord](https://discord.elfhosted.com)*)
-
-## Self Hosted
-### From source
-- Clone the repository and enter the folder
- ```sh
- git clone https://github.com/g0ldyy/comet
- cd comet
- ```
-- Install dependencies
- ```sh
- pip install poetry
- poetry install
- ````
-- Start Comet
- ```sh
- poetry run python -m comet.main
- ````
-
-### With Docker
-- Simply run the Docker image after modifying the environment variables
- ```sh
- docker run --name comet -p 8000:8000 -d \
- -e FASTAPI_HOST=0.0.0.0 \
- -e FASTAPI_PORT=8000 \
- -e FASTAPI_WORKERS=1 \
- -e CACHE_TTL=86400 \
- -e DEBRID_PROXY_URL=http://127.0.0.1:1080 \
- -e INDEXER_MANAGER_TYPE=jackett \
- -e INDEXER_MANAGER_URL=http://127.0.0.1:9117 \
- -e INDEXER_MANAGER_API_KEY=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX \
- -e INDEXER_MANAGER_INDEXERS='["EXAMPLE1_CHANGETHIS", "EXAMPLE2_CHANGETHIS"]' \
- -e INDEXER_MANAGER_TIMEOUT=30 \
- -e GET_TORRENT_TIMEOUT=5 \
- g0ldyy/comet
- ```
- - To update your container
-
- - Find your existing container name
- ```sh
- docker ps
- ```
-
- - Stop your existing container
- ```sh
- docker stop
- ```
-
- - Remove your existing container
- ```sh
- docker rm
- ```
-
- - Pull the latest version from docker hub
- ```sh
- docker pull g0ldyy/comet
- ```
-
- - Finally, re-run the docker run command
-
-### With Docker Compose
-- Copy *compose.yaml* in a directory
-- Copy *env-sample* to *.env* in the same directory
-- Pull the latest version from docker hub
- ```sh
- docker compose pull
- ```
-- Run
- ```sh
- docker compose up -d
- ```
-
-## Debrid IP Blacklist
-To bypass Real-Debrid's (or AllDebrid) IP blacklist, start a cloudflare-warp container: https://github.com/cmj2002/warp-docker
-
-## Web UI Showcase
-
+
+
+
+
+
+
+
+
+# Features
+- The only Stremio addon that can Proxy Debrid Streams to allow use of the Debrid Service on multiple IPs at the same time on the same account!
+- IP-Based Max Connection Limit and Dashboard for Debrid Stream Proxier
+- Jackett and Prowlarr support (change the `INDEXER_MANAGER_TYPE` environment variable to `jackett` or `prowlarr`)
+- [Zilean](https://github.com/iPromKnight/zilean) ([DMM](https://hashlists.debridmediamanager.com/) Scraper) support for even more results
+- [Torrentio](https://torrentio.strem.fun/) Scraper
+- Caching system ft. SQLite / PostgreSQL
+- Smart Torrent Ranking powered by [RTN](https://github.com/dreulavelle/rank-torrent-name)
+- Proxy support to bypass debrid restrictions
+- Real-Debrid, All-Debrid, Premiumize, TorBox and Debrid-Link supported
+- Direct Torrent supported
+- [Kitsu](https://kitsu.io/) support (anime)
+- Adult Content Filter
+- [StremThru](https://github.com/MunifTanjim/stremthru) support
+
+# Installation
+To customize your Comet experience to suit your needs, please first take a look at all the [environment variables](https://github.com/g0ldyy/comet/blob/main/.env-sample)!
+
+## ElfHosted
+A free, public Comet instance is available at https://comet.elfhosted.com
+
+[ElfHosted](https://elfhosted.com) is a geeky [open-source](https://elfhosted.com/open/) PaaS which provides all the "plumbing" (*hosting, security, updates, etc*) for your self-hosted apps.
+
+ElfHosted offer "one-click" [private Comet instances](https://elfhosted.com/app/comet/), allowing you to customize your indexers, and enabling "Proxy Stream" mode, to permit streaming from multiple source IPs with the same RD token!
+
+> [!IMPORTANT]
+> Comet is a top-tier app in the [ElfHosted app catalogue](https://elfhosted.com/apps/). 30% of your subscription goes to the app developer :heart:
+
+(*[ElfHosted Discord](https://discord.elfhosted.com)*)
+
+## Self Hosted
+### From source
+- Clone the repository and enter the folder
+ ```sh
+ git clone https://github.com/g0ldyy/comet
+ cd comet
+ ```
+- Install dependencies
+ ```sh
+ pip install uv
+ uv sync
+ ````
+- Start Comet
+ ```sh
+ uv run python -m comet.main
+ ````
+
+### With Docker Compose
+- Copy *deployment/docker-compose.yml* in a directory
+- Copy *.env-sample* to *.env* in the same directory and keep only the variables you wish to modify, also remove all comments
+- Pull the latest version from docker hub
+ ```sh
+ docker compose pull
+ ```
+- Run
+ ```sh
+ docker compose up -d
+ ```
+
+### Nginx Reverse Proxy
+If you want to serve Comet via a Nginx Reverse Proxy, here's the configuration you should use.
+```
+server {
+ server_name example.com;
+
+ location / {
+ proxy_pass http://localhost:8000;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ }
+}
+```
+
+## Web UI Showcase
+
diff --git a/comet/api/core.py b/comet/api/core.py
index fea17a0..5383d6c 100644
--- a/comet/api/core.py
+++ b/comet/api/core.py
@@ -1,39 +1,32 @@
-import PTT
-import RTN
+import random
+import string
+import secrets
+import orjson
-from fastapi import APIRouter, Request
-from fastapi.responses import RedirectResponse
+from fastapi import APIRouter, Request, Depends, HTTPException
+from fastapi.responses import RedirectResponse, Response
from fastapi.templating import Jinja2Templates
+from fastapi.security import HTTPBasic, HTTPBasicCredentials
-from comet.utils.models import settings
-from comet.utils.general import config_check, get_debrid_extension
+from comet.utils.models import settings, web_config, database
+from comet.utils.general import config_check
+from comet.debrid.manager import get_debrid_extension
templates = Jinja2Templates("comet/templates")
main = APIRouter()
+security = HTTPBasic()
-@main.get("/", status_code=200)
+@main.get("/")
async def root():
return RedirectResponse("/configure")
-@main.get("/health", status_code=200)
+@main.get("/health")
async def health():
return {"status": "ok"}
-indexers = settings.INDEXER_MANAGER_INDEXERS
-languages = [language for language in PTT.parse.LANGUAGES_TRANSLATION_TABLE.values()]
-languages.insert(0, "Unknown")
-languages.insert(1, "Multi")
-web_config = {
- "indexers": [indexer.replace(" ", "_").lower() for indexer in indexers],
- "languages": languages,
- "resolutions": [resolution.value for resolution in RTN.models.Resolution],
- "resultFormat": ["Title", "Metadata", "Size", "Tracker", "Languages"],
-}
-
-
@main.get("/configure")
@main.get("/{b64config}/configure")
async def configure(request: Request):
@@ -45,25 +38,16 @@ async def configure(request: Request):
if settings.CUSTOM_HEADER_HTML
else "",
"webConfig": web_config,
- "indexerManager": settings.INDEXER_MANAGER_TYPE,
"proxyDebridStream": settings.PROXY_DEBRID_STREAM,
- "stremthruDefaultUrl": settings.STREMTHRU_DEFAULT_URL or "",
},
)
@main.get("/manifest.json")
@main.get("/{b64config}/manifest.json")
-async def manifest(b64config: str = None):
- config = config_check(b64config)
- if not config:
- config = {"debridService": None}
-
- debrid_extension = get_debrid_extension(config["debridService"], config["debridApiKey"])
-
- return {
- "id": settings.ADDON_ID,
- "name": f"{settings.ADDON_NAME}{(' | ' + debrid_extension) if debrid_extension is not None else ''}",
+async def manifest(request: Request, b64config: str = None):
+ base_manifest = {
+ "id": f"{settings.ADDON_ID}.{''.join(random.choice(string.ascii_letters) for _ in range(4))}",
"description": "Stremio's fastest torrent/debrid search add-on.",
"version": "1.0.0",
"catalogs": [],
@@ -79,3 +63,43 @@ async def manifest(b64config: str = None):
"background": "https://i.imgur.com/WwnXB3k.jpeg",
"behaviorHints": {"configurable": True, "configurationRequired": False},
}
+
+ config = config_check(b64config)
+ if not config:
+ base_manifest["name"] = "❌ | Comet"
+ base_manifest["description"] = f"⚠️ OBSOLETE CONFIGURATION, PLEASE RE-CONFIGURE ON {request.url.scheme}://{request.url.netloc} ⚠️"
+ return base_manifest
+
+ debrid_extension = get_debrid_extension(config["debridService"])
+ base_manifest["name"] = f"{settings.ADDON_NAME}{(' | ' + debrid_extension) if debrid_extension is not None else ''}"
+
+ return base_manifest
+
+
+class CustomORJSONResponse(Response):
+ media_type = "application/json"
+
+ def render(self, content) -> bytes:
+ assert orjson is not None, "orjson must be installed"
+ return orjson.dumps(content, option=orjson.OPT_INDENT_2)
+
+
+def verify_dashboard_auth(credentials: HTTPBasicCredentials = Depends(security)):
+ is_correct = secrets.compare_digest(
+ credentials.password, settings.DASHBOARD_ADMIN_PASSWORD
+ )
+
+ if not is_correct:
+ raise HTTPException(
+ status_code=401,
+ detail="Incorrect password",
+ headers={"WWW-Authenticate": "Basic"},
+ )
+
+ return True
+
+
+@main.get("/dashboard", response_class=CustomORJSONResponse)
+async def dashboard(authenticated: bool = Depends(verify_dashboard_auth)):
+ rows = await database.fetch_all("SELECT * FROM active_connections")
+ return rows
diff --git a/comet/api/stream.py b/comet/api/stream.py
index 7efb77e..33f2f9a 100644
--- a/comet/api/stream.py
+++ b/comet/api/stream.py
@@ -1,480 +1,248 @@
-import asyncio
-import time
import aiohttp
-import httpx
-import uuid
-import orjson
+import time
+import mediaflow_proxy.utils.http_utils
from fastapi import APIRouter, Request, BackgroundTasks
from fastapi.responses import (
- RedirectResponse,
- StreamingResponse,
FileResponse,
- Response,
-)
-from starlette.background import BackgroundTask
-from RTN import Torrent, sort_torrents
-
-from comet.debrid.manager import getDebrid
-from comet.utils.general import (
- config_check,
- get_debrid_extension,
- get_indexer_manager,
- get_zilean,
- get_torrentio,
- get_mediafusion,
- filter,
- get_torrent_hash,
- translate,
- get_balanced_hashes,
- format_title,
- get_client_ip,
- get_aliases,
- add_torrent_to_cache,
+ RedirectResponse,
)
-from comet.utils.config import is_proxy_stream_authed, is_proxy_stream_enabled, prepare_debrid_config, should_skip_proxy_stream
+
+from comet.utils.models import settings, database, trackers
+from comet.utils.general import parse_media_id
+from comet.metadata.manager import MetadataScraper
+from comet.scrapers.manager import TorrentManager
+from comet.utils.general import config_check, format_title, get_client_ip
+from comet.debrid.manager import get_debrid_extension, get_debrid
+from comet.utils.streaming import custom_handle_stream_request
from comet.utils.logger import logger
-from comet.utils.models import database, rtn, settings, trackers
streams = APIRouter()
-@streams.get("/stream/{type}/{id}.json")
-async def stream_noconfig(request: Request, type: str, id: str):
- return {
- "streams": [
- {
- "name": "[⚠️] Comet",
- "description": f"{request.url.scheme}://{request.url.netloc}/configure",
- "url": "https://comet.fast",
- }
- ]
- }
+async def remove_ongoing_search_from_database(media_id: str):
+ await database.execute(
+ "DELETE FROM ongoing_searches WHERE media_id = :media_id",
+ {"media_id": media_id},
+ )
+
+
+async def is_first_search(media_id: str) -> bool:
+ try:
+ await database.execute(
+ "INSERT INTO first_searches VALUES (:media_id, :timestamp)",
+ {"media_id": media_id, "timestamp": time.time()},
+ )
+
+ return True
+ except Exception:
+ return False
+
+
+async def background_scrape(
+ torrent_manager: TorrentManager, media_id: str, debrid_service: str
+):
+ try:
+ async with aiohttp.ClientSession() as new_session:
+ await torrent_manager.scrape_torrents(new_session)
+ if debrid_service != "torrent" and len(torrent_manager.torrents) > 0:
+ await torrent_manager.get_and_cache_debrid_availability(new_session)
-@streams.get("/{b64config}/stream/{type}/{id}.json")
+ logger.log(
+ "SCRAPER",
+ "📥 Background scrape + availability check complete!",
+ )
+ except Exception as e:
+ logger.log("SCRAPER", f"❌ Background scrape + availability check failed: {e}")
+ finally:
+ await remove_ongoing_search_from_database(media_id)
+
+
+@streams.get("/stream/{media_type}/{media_id}.json")
+@streams.get("/{b64config}/stream/{media_type}/{media_id}.json")
async def stream(
request: Request,
- b64config: str,
- type: str,
- id: str,
+ media_type: str,
+ media_id: str,
background_tasks: BackgroundTasks,
+ b64config: str = None,
):
config = config_check(b64config)
if not config:
return {
"streams": [
{
- "name": "[⚠️] Comet",
- "description": "Invalid Comet config.",
+ "name": "[❌] Comet",
+ "description": f"⚠️ OBSOLETE CONFIGURATION, PLEASE RE-CONFIGURE ON {request.url.scheme}://{request.url.netloc} ⚠️",
"url": "https://comet.fast",
}
]
}
- connector = aiohttp.TCPConnector(limit=0)
- async with aiohttp.ClientSession(
- connector=connector, raise_for_status=True
- ) as session:
- full_id = id
- season = None
- episode = None
- if type == "series":
- info = id.split(":")
- id = info[0]
- season = int(info[1])
- episode = int(info[2])
-
- year = None
- year_end = None
- try:
- kitsu = False
- if id == "kitsu":
- kitsu = True
- get_metadata = await session.get(
- f"https://kitsu.io/api/edge/anime/{season}"
- )
- metadata = await get_metadata.json()
- name = metadata["data"]["attributes"]["canonicalTitle"]
- season = 1
- else:
- get_metadata = await session.get(
- f"https://v3.sg.media-imdb.com/suggestion/a/{id}.json"
- )
- metadata = await get_metadata.json()
- element = metadata["d"][
- 0
- if metadata["d"][0]["id"]
- not in ["/imdbpicks/summer-watch-guide", "/emmys"]
- else 1
- ]
-
- for element in metadata["d"]:
- if "/" not in element["id"]:
- break
-
- name = element["l"]
- year = element.get("y")
-
- if "yr" in element:
- year_end = int(element["yr"].split("-")[1])
- except Exception as e:
- logger.warning(f"Exception while getting metadata for {id}: {e}")
-
- return {
- "streams": [
- {
- "name": "[⚠️] Comet",
- "description": f"Can't get metadata for {id}",
- "url": "https://comet.fast",
- }
- ]
- }
-
- name = translate(name)
- log_name = name
- if type == "series":
- log_name = f"{name} S{season:02d}E{episode:02d}"
-
- prepare_debrid_config(config)
- if config["debridApiKey"] == "":
- services = ["realdebrid", "alldebrid", "premiumize", "torbox", "debridlink", "stremthru"]
- debrid_emoji = "⬇️"
- else:
- services = [config["debridService"]]
- debrid_emoji = "⚡"
+ ongoing_search = await database.fetch_one(
+ "SELECT timestamp FROM ongoing_searches WHERE media_id = :media_id",
+ {"media_id": media_id},
+ )
- results = []
- if (
- is_proxy_stream_enabled(config)
- and not is_proxy_stream_authed(config)
- ):
- results.append(
+ if ongoing_search:
+ return {
+ "streams": [
{
- "name": "[⚠️] Comet",
- "description": "Debrid Stream Proxy Password incorrect.\nStreams will not be proxied.",
+ "name": "[🔄] Comet",
+ "description": "Search in progress, please try again in a few seconds...",
"url": "https://comet.fast",
}
- )
-
- indexers = config["indexers"].copy()
- if settings.SCRAPE_TORRENTIO:
- indexers.append("torrentio")
- if settings.SCRAPE_MEDIAFUSION:
- indexers.append("mediafusion")
- if settings.ZILEAN_URL:
- indexers.append("dmm")
- indexers_json = orjson.dumps(indexers).decode("utf-8")
-
- all_sorted_ranked_files = {}
- trackers_found = (
- set()
- ) # we want to check that we have a cache for each of the user's trackers
- the_time = time.time()
- cache_ttl = settings.CACHE_TTL
-
- for debrid_service in services:
- cached_results = await database.fetch_all(
- f"""
- SELECT info_hash, tracker, data
- FROM cache
- WHERE debridService = :debrid_service
- AND name = :name
- AND ((cast(:season as INTEGER) IS NULL AND season IS NULL) OR season = cast(:season as INTEGER))
- AND ((cast(:episode as INTEGER) IS NULL AND episode IS NULL) OR episode = cast(:episode as INTEGER))
- AND tracker IN (SELECT cast(value as TEXT) FROM {'json_array_elements_text' if settings.DATABASE_TYPE == 'postgresql' else 'json_each'}(:indexers))
- AND timestamp + :cache_ttl >= :current_time
- """,
- {
- "debrid_service": debrid_service,
- "name": name,
- "season": season,
- "episode": episode,
- "indexers": indexers_json,
- "cache_ttl": cache_ttl,
- "current_time": the_time,
- },
- )
- for result in cached_results:
- trackers_found.add(result["tracker"].lower())
-
- hash = result["info_hash"]
- if "searched" in hash:
- continue
-
- all_sorted_ranked_files[hash] = orjson.loads(result["data"])
-
- if len(all_sorted_ranked_files) != 0 and set(indexers).issubset(trackers_found):
- debrid_extension = get_debrid_extension(
- debrid_service, config["debridApiKey"]
- )
- balanced_hashes = get_balanced_hashes(all_sorted_ranked_files, config)
-
- for resolution in balanced_hashes:
- for hash in balanced_hashes[resolution]:
- data = all_sorted_ranked_files[hash]["data"]
- the_stream = {
- "name": f"[{debrid_extension}{debrid_emoji}] Comet {data['resolution']}",
- "description": format_title(data, config),
- "torrentTitle": (
- data["torrent_title"] if "torrent_title" in data else None
- ),
- "torrentSize": (
- data["torrent_size"] if "torrent_size" in data else None
- ),
- "behaviorHints": {
- "filename": data["raw_title"],
- "bingeGroup": "comet|" + hash,
- },
- }
-
- if config["debridApiKey"] != "":
- the_stream["url"] = (
- f"{request.url.scheme}://{request.url.netloc}/{b64config}/playback/{hash}/{data['index']}"
- )
- else:
- the_stream["infoHash"] = hash
- index = str(data["index"])
- the_stream["fileIdx"] = (
- 1 if "|" in index else int(index)
- ) # 1 because for Premiumize it's impossible to get the file index
- the_stream["sources"] = trackers
-
- results.append(the_stream)
-
- logger.info(
- f"{len(all_sorted_ranked_files)} cached results found for {log_name}"
- )
-
- return {"streams": results}
+ ]
+ }
- if config["debridApiKey"] == "":
+ connector = aiohttp.TCPConnector(limit=0)
+ async with aiohttp.ClientSession(connector=connector) as session:
+ metadata, aliases = await MetadataScraper(session).fetch_metadata_and_aliases(
+ media_type, media_id
+ )
+ if metadata is None:
+ logger.log("SCRAPER", f"❌ Failed to fetch metadata for {media_id}")
return {
"streams": [
{
"name": "[⚠️] Comet",
- "description": "No cache found for Direct Torrenting.",
+ "description": "Unable to get metadata.",
"url": "https://comet.fast",
}
]
}
- logger.info(f"No cache found for {log_name} with user configuration")
- debrid = getDebrid(session, config, get_client_ip(request))
+ title = metadata["title"]
+ year = metadata["year"]
+ year_end = metadata["year_end"]
+ season = metadata["season"]
+ episode = metadata["episode"]
+
+ log_title = f"({media_id}) {title}"
+ if media_type == "series":
+ log_title += f" S{season:02d}E{episode:02d}"
+
+ logger.log("SCRAPER", f"🔍 Starting search for {log_title}")
+
+ id, season, episode = parse_media_id(media_type, media_id)
+ media_only_id = id
+
+ debrid_service = config["debridService"]
+ torrent_manager = TorrentManager(
+ debrid_service,
+ config["debridApiKey"],
+ get_client_ip(request),
+ media_type,
+ media_id,
+ media_only_id,
+ title,
+ year,
+ year_end,
+ season,
+ episode,
+ aliases,
+ settings.REMOVE_ADULT_CONTENT and config["removeTrash"],
+ )
- check_premium = await debrid.check_premium()
- if not check_premium:
- additional_info = ""
- if config["debridService"] == "alldebrid":
- additional_info = "\nCheck your email!"
+ await torrent_manager.get_cached_torrents()
+ logger.log(
+ "SCRAPER", f"📦 Found cached torrents: {len(torrent_manager.torrents)}"
+ )
- return {
- "streams": [
- {
- "name": "[⚠️] Comet",
- "description": f"Invalid {config['debridService']} account.{additional_info}",
- "url": "https://comet.fast",
- }
- ]
- }
+ is_first = await is_first_search(media_id)
+ has_cached_results = len(torrent_manager.torrents) > 0
- indexer_manager_type = settings.INDEXER_MANAGER_TYPE
+ cached_results = []
+ non_cached_results = []
- search_indexer = len(config["indexers"]) != 0
- torrents = []
- tasks = []
- if indexer_manager_type and search_indexer:
- logger.info(
- f"Start of {indexer_manager_type} search for {log_name} with indexers {config['indexers']}"
+ if not has_cached_results:
+ logger.log("SCRAPER", f"🔎 Starting new search for {log_title}")
+ await database.execute(
+ f"INSERT {'OR IGNORE ' if settings.DATABASE_TYPE == 'sqlite' else ''}INTO ongoing_searches VALUES (:media_id, :timestamp){' ON CONFLICT DO NOTHING' if settings.DATABASE_TYPE == 'postgresql' else ''}",
+ {"media_id": media_id, "timestamp": time.time()},
)
+ background_tasks.add_task(remove_ongoing_search_from_database, media_id)
- search_terms = [name]
- if type == "series":
- if not kitsu:
- search_terms.append(f"{name} S{season:02d}E{episode:02d}")
- else:
- search_terms.append(f"{name} {episode}")
- tasks.extend(
- get_indexer_manager(
- session, indexer_manager_type, config["indexers"], term
- )
- for term in search_terms
+ await torrent_manager.scrape_torrents(session)
+ logger.log(
+ "SCRAPER",
+ f"📥 Scraped torrents: {len(torrent_manager.torrents)}",
)
- else:
- logger.info(
- f"No indexer {'manager ' if not indexer_manager_type else ''}{'selected by user' if indexer_manager_type else 'defined'} for {log_name}"
+ elif is_first:
+ logger.log(
+ "SCRAPER",
+ f"🔄 Starting background scrape + availability check for {log_title}",
)
-
- if settings.ZILEAN_URL:
- tasks.append(get_zilean(session, name, log_name, season, episode))
-
- if settings.SCRAPE_TORRENTIO:
- tasks.append(get_torrentio(log_name, type, full_id))
-
- if settings.SCRAPE_MEDIAFUSION:
- tasks.append(get_mediafusion(log_name, type, full_id))
-
- search_response = await asyncio.gather(*tasks)
- for results in search_response:
- for result in results:
- torrents.append(result)
-
- logger.info(
- f"{len(torrents)} unique torrents found for {log_name}"
- + (
- " with "
- + ", ".join(
- part
- for part in [
- indexer_manager_type,
- "Zilean" if settings.ZILEAN_URL else None,
- "Torrentio" if settings.SCRAPE_TORRENTIO else None,
- "MediaFusion" if settings.SCRAPE_MEDIAFUSION else None,
- ]
- if part
- )
- if any(
- [
- indexer_manager_type,
- settings.ZILEAN_URL,
- settings.SCRAPE_TORRENTIO,
- settings.SCRAPE_MEDIAFUSION,
- ]
- )
- else ""
+ await database.execute(
+ f"INSERT {'OR IGNORE ' if settings.DATABASE_TYPE == 'sqlite' else ''}INTO ongoing_searches VALUES (:media_id, :timestamp){' ON CONFLICT DO NOTHING' if settings.DATABASE_TYPE == 'postgresql' else ''}",
+ {"media_id": media_id, "timestamp": time.time()},
)
- )
-
- if len(torrents) == 0:
- return {"streams": []}
- if settings.TITLE_MATCH_CHECK:
- aliases = await get_aliases(
- session, "movies" if type == "movie" else "shows", id
+ background_tasks.add_task(
+ background_scrape, torrent_manager, media_id, debrid_service
)
- indexed_torrents = [(i, torrents[i]["Title"]) for i in range(len(torrents))]
- chunk_size = 50
- chunks = [
- indexed_torrents[i : i + chunk_size]
- for i in range(0, len(indexed_torrents), chunk_size)
- ]
-
- remove_adult_content = (
- settings.REMOVE_ADULT_CONTENT and config["removeTrash"]
+ cached_results.append(
+ {
+ "name": "[🔄] Comet",
+ "description": "First search for this media - More results will be available in a few seconds...",
+ "url": "https://comet.fast",
+ }
)
- tasks = []
- for chunk in chunks:
- tasks.append(
- filter(chunk, name, year, year_end, aliases, remove_adult_content)
- )
- filtered_torrents = await asyncio.gather(*tasks)
- index_less = 0
- for result in filtered_torrents:
- for filtered in result:
- if not filtered[1]:
- del torrents[filtered[0] - index_less]
- index_less += 1
- continue
-
- logger.info(
- f"{len(torrents)} torrents passed title match check for {log_name}"
+ await torrent_manager.get_cached_availability()
+ if (
+ (
+ not has_cached_results
+ or sum(
+ 1
+ for torrent in torrent_manager.torrents.values()
+ if torrent["cached"]
+ )
+ == 0
)
+ and len(torrent_manager.torrents) > 0
+ and debrid_service != "torrent"
+ ):
+ logger.log("SCRAPER", "🔄 Checking availability on debrid service...")
+ await torrent_manager.get_and_cache_debrid_availability(session)
- if len(torrents) == 0:
- return {"streams": []}
-
- tasks = []
- for i in range(len(torrents)):
- tasks.append(get_torrent_hash(session, (i, torrents[i])))
-
- torrent_hashes = await asyncio.gather(*tasks)
- index_less = 0
- for hash in torrent_hashes:
- if not hash[1]:
- del torrents[hash[0] - index_less]
- index_less += 1
- continue
-
- torrents[hash[0] - index_less]["InfoHash"] = hash[1]
-
- logger.info(f"{len(torrents)} info hashes found for {log_name}")
-
- if len(torrents) == 0:
- return {"streams": []}
-
- files = await debrid.get_files(
- list({hash[1] for hash in torrent_hashes if hash[1] is not None}),
- type,
- season,
- episode,
- kitsu,
- video_id=full_id,
- )
-
- ranked_files = set()
- torrents_by_hash = {torrent["InfoHash"]: torrent for torrent in torrents}
- for hash in files:
- try:
- ranked_file = rtn.rank(
- torrents_by_hash[hash]["Title"],
- hash,
- remove_trash=False, # user can choose if he wants to remove it
- )
+ if debrid_service != "torrent":
+ cached_count = sum(
+ 1 for torrent in torrent_manager.torrents.values() if torrent["cached"]
+ )
- ranked_files.add(ranked_file)
- except:
- pass
+ logger.log(
+ "SCRAPER",
+ f"💾 Available cached torrents on {debrid_service}: {cached_count}/{len(torrent_manager.torrents)}",
+ )
- sorted_ranked_files = sort_torrents(ranked_files)
+ initial_torrent_count = len(torrent_manager.torrents)
- len_sorted_ranked_files = len(sorted_ranked_files)
- logger.info(
- f"{len_sorted_ranked_files} cached files found on {config['debridService']} for {log_name}"
+ torrent_manager.rank_torrents(
+ config["rtnSettings"],
+ config["rtnRanking"],
+ config["maxResultsPerResolution"],
+ config["maxSize"],
+ config["cachedOnly"],
+ config["removeTrash"],
)
-
- if len_sorted_ranked_files == 0:
- return {"streams": []}
-
- sorted_ranked_files = {
- key: (value.model_dump() if isinstance(value, Torrent) else value)
- for key, value in sorted_ranked_files.items()
- }
- for hash in sorted_ranked_files: # needed for caching
- sorted_ranked_files[hash]["data"]["title"] = files[hash]["title"]
- sorted_ranked_files[hash]["data"]["torrent_title"] = torrents_by_hash[hash][
- "Title"
- ]
- sorted_ranked_files[hash]["data"]["tracker"] = torrents_by_hash[hash][
- "Tracker"
- ]
- torrent_size = torrents_by_hash[hash]["Size"]
- sorted_ranked_files[hash]["data"]["size"] = (
- files[hash]["size"]
- )
- sorted_ranked_files[hash]["data"]["torrent_size"] = (
- torrent_size if torrent_size else files[hash]["size"]
- )
- sorted_ranked_files[hash]["data"]["index"] = files[hash]["index"]
-
- background_tasks.add_task(
- add_torrent_to_cache, config, name, season, episode, sorted_ranked_files
+ logger.log(
+ "SCRAPER",
+ f"⚖️ Torrents after RTN filtering: {len(torrent_manager.ranked_torrents)}/{initial_torrent_count}",
)
- logger.info(f"Results have been cached for {log_name}")
+ debrid_extension = get_debrid_extension(debrid_service)
- debrid_extension = get_debrid_extension(config["debridService"], config["debridApiKey"])
-
- balanced_hashes = get_balanced_hashes(sorted_ranked_files, config)
-
- results = []
if (
- is_proxy_stream_enabled(config)
- and not is_proxy_stream_authed(config)
+ config["debridStreamProxyPassword"] != ""
+ and settings.PROXY_DEBRID_STREAM
+ and settings.PROXY_DEBRID_STREAM_PASSWORD
+ != config["debridStreamProxyPassword"]
):
- results.append(
+ cached_results.append(
{
"name": "[⚠️] Comet",
"description": "Debrid Stream Proxy Password incorrect.\nStreams will not be proxied.",
@@ -482,209 +250,137 @@ async def stream(
}
)
- for resolution in balanced_hashes:
- for hash in balanced_hashes[resolution]:
- data = sorted_ranked_files[hash]["data"]
- index = data['index']
- if index == -1:
- index = data['title']
- url = f"{request.url.scheme}://{request.url.netloc}/{b64config}/playback/{hash}/{index}"
- results.append(
- {
- "name": f"[{debrid_extension}⚡] Comet {data['resolution']}",
- "description": format_title(data, config),
- "torrentTitle": data["torrent_title"],
- "torrentSize": data["torrent_size"],
- "url": url,
- "behaviorHints": {
- "filename": data["raw_title"],
- "bingeGroup": "comet|" + hash,
- },
- }
- )
-
- return {"streams": results}
-
+ result_season = season if season is not None else "n"
+ result_episode = episode if episode is not None else "n"
-@streams.head("/{b64config}/playback/{hash}/{index}")
-async def playback(b64config: str, hash: str, index: str):
- return RedirectResponse("https://stremio.fast", status_code=302)
+ torrents = torrent_manager.torrents
+ for info_hash in torrent_manager.ranked_torrents:
+ torrent = torrents[info_hash]
+ rtn_data = torrent["parsed"]
+ debrid_emoji = (
+ "🧲"
+ if debrid_service == "torrent"
+ else ("⚡" if torrent["cached"] else "⬇️")
+ )
-class CustomORJSONResponse(Response):
- media_type = "application/json"
+ the_stream = {
+ "name": f"[{debrid_extension}{debrid_emoji}] Comet {rtn_data.resolution}",
+ "description": format_title(
+ rtn_data,
+ torrent["title"],
+ torrent["seeders"],
+ torrent["size"],
+ torrent["tracker"],
+ config["resultFormat"],
+ ),
+ "behaviorHints": {
+ "bingeGroup": "comet|" + info_hash,
+ "videoSize": torrent["size"],
+ "filename": rtn_data.raw_title,
+ },
+ }
- def render(self, content) -> bytes:
- assert orjson is not None, "orjson must be installed"
- return orjson.dumps(content, option=orjson.OPT_INDENT_2)
+ if debrid_service == "torrent":
+ the_stream["infoHash"] = info_hash
+ if torrent["fileIndex"] is not None:
+ the_stream["fileIdx"] = torrent["fileIndex"]
-@streams.get("/active-connections", response_class=CustomORJSONResponse)
-async def active_connections(request: Request, password: str):
- if password != settings.DASHBOARD_ADMIN_PASSWORD:
- return "Invalid Password"
+ if len(torrent["sources"]) == 0:
+ the_stream["sources"] = trackers
+ else:
+ the_stream["sources"] = torrent["sources"]
+ else:
+ the_stream["url"] = (
+ f"{request.url.scheme}://{request.url.netloc}/{b64config}/playback/{info_hash}/{torrent['fileIndex'] if torrent['cached'] and torrent['fileIndex'] is not None else 'n'}/{title}/{result_season}/{result_episode}"
+ )
- active_connections = await database.fetch_all("SELECT * FROM active_connections")
+ if torrent["cached"]:
+ cached_results.append(the_stream)
+ else:
+ non_cached_results.append(the_stream)
- return {
- "total_connections": len(active_connections),
- "active_connections": active_connections,
- }
+ return {"streams": cached_results + non_cached_results}
-@streams.get("/{b64config}/playback/{hash}/{index}")
-async def playback(request: Request, b64config: str, hash: str, index: str):
+@streams.get("/{b64config}/playback/{hash}/{index}/{name}/{season}/{episode}")
+async def playback(
+ request: Request,
+ b64config: str,
+ hash: str,
+ index: str,
+ name: str,
+ season: str,
+ episode: str,
+):
config = config_check(b64config)
- if not config:
- return FileResponse("comet/assets/invalidconfig.mp4")
+ # if not config:
+ # return FileResponse("comet/assets/invalidconfig.mp4")
- prepare_debrid_config(config)
+ season = int(season) if season != "n" else None
+ episode = int(episode) if episode != "n" else None
- async with aiohttp.ClientSession(raise_for_status=True) as session:
- # Check for cached download link
+ async with aiohttp.ClientSession() as session:
cached_link = await database.fetch_one(
- f"SELECT link, timestamp FROM download_links WHERE debrid_key = '{config['debridApiKey']}' AND hash = '{hash}' AND file_index = '{index}'"
+ f"SELECT download_url FROM download_links_cache WHERE debrid_key = '{config['debridApiKey']}' AND info_hash = '{hash}' AND ((cast(:season as INTEGER) IS NULL AND season IS NULL) OR season = cast(:season as INTEGER)) AND ((cast(:episode as INTEGER) IS NULL AND episode IS NULL) OR episode = cast(:episode as INTEGER)) AND timestamp + 3600 >= :current_time",
+ {
+ "current_time": time.time(),
+ "season": season,
+ "episode": season,
+ },
)
- current_time = time.time()
- download_link = None
+ download_url = None
if cached_link:
- link = cached_link["link"]
- timestamp = cached_link["timestamp"]
-
- if current_time - timestamp < 3600:
- download_link = link
- else:
- # Cache expired, remove old entry
- await database.execute(
- f"DELETE FROM download_links WHERE debrid_key = '{config['debridApiKey']}' AND hash = '{hash}' AND file_index = '{index}'"
- )
+ download_url = cached_link["download_url"]
ip = get_client_ip(request)
-
- if not download_link:
- debrid = getDebrid(
+ if download_url is None:
+ debrid = get_debrid(
session,
- config,
- ip
- if (
- not is_proxy_stream_enabled(config)
- or not is_proxy_stream_authed(config)
- )
- else "",
+ None,
+ None,
+ config["debridService"],
+ config["debridApiKey"],
+ ip,
)
- download_link = await debrid.generate_download_link(hash, index)
- if not download_link:
+ download_url = await debrid.generate_download_link(
+ hash, index, name, season, episode
+ )
+ if not download_url:
return FileResponse("comet/assets/uncached.mp4")
- # Cache the new download link
+ query = f"""
+ INSERT {"OR IGNORE " if settings.DATABASE_TYPE == "sqlite" else ""}
+ INTO download_links_cache
+ VALUES (:debrid_key, :info_hash, :season, :episode, :download_url, :timestamp)
+ {" ON CONFLICT DO NOTHING" if settings.DATABASE_TYPE == "postgresql" else ""}
+ """
+
await database.execute(
- f"INSERT {'OR IGNORE ' if settings.DATABASE_TYPE == 'sqlite' else ''}INTO download_links (debrid_key, hash, file_index, link, timestamp) VALUES (:debrid_key, :hash, :file_index, :link, :timestamp){' ON CONFLICT DO NOTHING' if settings.DATABASE_TYPE == 'postgresql' else ''}",
+ query,
{
"debrid_key": config["debridApiKey"],
- "hash": hash,
- "file_index": index,
- "link": download_link,
- "timestamp": current_time,
+ "info_hash": hash,
+ "season": season,
+ "episode": episode,
+ "download_url": download_url,
+ "timestamp": time.time(),
},
)
- if should_skip_proxy_stream(config):
- return RedirectResponse(download_link, status_code=302)
-
if (
- is_proxy_stream_enabled(config)
- and is_proxy_stream_authed(config)
+ settings.PROXY_DEBRID_STREAM
+ and settings.PROXY_DEBRID_STREAM_PASSWORD
+ == config["debridStreamProxyPassword"]
):
- if settings.PROXY_DEBRID_STREAM_MAX_CONNECTIONS != -1:
- active_ip_connections = await database.fetch_all(
- "SELECT ip, COUNT(*) as connections FROM active_connections GROUP BY ip"
- )
- if any(
- connection["ip"] == ip
- and connection["connections"]
- >= settings.PROXY_DEBRID_STREAM_MAX_CONNECTIONS
- for connection in active_ip_connections
- ):
- return FileResponse("comet/assets/proxylimit.mp4")
-
- proxy = None
-
- class Streamer:
- def __init__(self, id: str):
- self.id = id
-
- self.client = httpx.AsyncClient(proxy=proxy, timeout=None)
- self.response = None
-
- async def stream_content(self, headers: dict):
- async with self.client.stream(
- "GET", download_link, headers=headers
- ) as self.response:
- async for chunk in self.response.aiter_raw():
- yield chunk
-
- async def close(self):
- await database.execute(
- f"DELETE FROM active_connections WHERE id = '{self.id}'"
- )
-
- if self.response is not None:
- await self.response.aclose()
- if self.client is not None:
- await self.client.aclose()
-
- range_header = request.headers.get("range", "bytes=0-")
-
- try:
- if config["debridService"] != "torbox":
- response = await session.head(
- download_link, headers={"Range": range_header}
- )
- else:
- response = await session.get(
- download_link, headers={"Range": range_header}
- )
- except aiohttp.ClientResponseError as e:
- if e.status == 503 and config["debridService"] == "alldebrid":
- proxy = (
- settings.DEBRID_PROXY_URL
- ) # proxy is needed only to proxy alldebrid streams
-
- response = await session.head(
- download_link, headers={"Range": range_header}, proxy=proxy
- )
- else:
- logger.warning(f"Exception while proxying {download_link}: {e}")
- return
-
- if response.status == 206 or (
- response.status == 200 and config["debridService"] == "torbox"
- ):
- id = str(uuid.uuid4())
- await database.execute(
- f"INSERT {'OR IGNORE ' if settings.DATABASE_TYPE == 'sqlite' else ''}INTO active_connections (id, ip, content, timestamp) VALUES (:id, :ip, :content, :timestamp){' ON CONFLICT DO NOTHING' if settings.DATABASE_TYPE == 'postgresql' else ''}",
- {
- "id": id,
- "ip": ip,
- "content": str(response.url),
- "timestamp": current_time,
- },
- )
-
- streamer = Streamer(id)
-
- return StreamingResponse(
- streamer.stream_content({"Range": range_header}),
- status_code=206,
- headers={
- "Content-Range": response.headers["Content-Range"],
- "Content-Length": response.headers["Content-Length"],
- "Accept-Ranges": "bytes",
- },
- background=BackgroundTask(streamer.close),
- )
-
- return FileResponse("comet/assets/uncached.mp4")
+ return await custom_handle_stream_request(
+ request.method,
+ download_url,
+ mediaflow_proxy.utils.http_utils.get_proxy_headers(request),
+ media_id=hash,
+ ip=ip,
+ )
- return RedirectResponse(download_link, status_code=302)
+ return RedirectResponse(download_url, status_code=302)
diff --git a/comet/debrid/alldebrid.py b/comet/debrid/alldebrid.py
index cb433a8..b50a910 100644
--- a/comet/debrid/alldebrid.py
+++ b/comet/debrid/alldebrid.py
@@ -1,172 +1,8 @@
import aiohttp
-import asyncio
-
-from RTN import parse
-
-from comet.utils.general import is_video
-from comet.utils.logger import logger
-from comet.utils.models import settings
class AllDebrid:
- def __init__(self, session: aiohttp.ClientSession, debrid_api_key: str):
- session.headers["Authorization"] = f"Bearer {debrid_api_key}"
- self.session = session
- self.proxy = None
-
- self.api_url = "http://api.alldebrid.com/v4"
- self.agent = "comet"
-
- async def check_premium(self):
- try:
- check_premium = await self.session.get(
- f"{self.api_url}/user?agent={self.agent}"
- )
- check_premium = await check_premium.text()
- if '"isPremium":true' in check_premium:
- return True
- except Exception as e:
- logger.warning(
- f"Exception while checking premium status on All-Debrid: {e}"
- )
-
- return False
-
- async def get_instant(self, chunk: list):
- try:
- get_instant = await self.session.get(
- f"{self.api_url}/magnet/instant?agent={self.agent}&magnets[]={'&magnets[]='.join(chunk)}"
- )
- return await get_instant.json()
- except Exception as e:
- logger.warning(
- f"Exception while checking hashes instant availability on All-Debrid: {e}"
- )
-
- async def get_files(
- self, torrent_hashes: list, type: str, season: str, episode: str, kitsu: bool, **kwargs
+ def __init__(
+ self, session: aiohttp.ClientSession, video_id, debrid_api_key: str, ip: str
):
- chunk_size = 500
- chunks = [
- torrent_hashes[i : i + chunk_size]
- for i in range(0, len(torrent_hashes), chunk_size)
- ]
-
- tasks = []
- for chunk in chunks:
- tasks.append(self.get_instant(chunk))
-
- responses = await asyncio.gather(*tasks)
-
- availability = [response for response in responses if response]
-
- files = {}
-
- if type == "series":
- for result in availability:
- if "status" not in result or result["status"] != "success":
- continue
-
- for magnet in result["data"]["magnets"]:
- if not magnet["instant"]:
- continue
-
- for file in magnet["files"]:
- filename = file["n"]
- pack = False
- if "e" in file: # PACK
- filename = file["e"][0]["n"]
- pack = True
-
- if not is_video(filename):
- continue
-
- if "sample" in filename.lower():
- continue
-
- filename_parsed = parse(filename)
- if episode not in filename_parsed.episodes:
- continue
-
- if kitsu:
- if filename_parsed.seasons:
- continue
- else:
- if season not in filename_parsed.seasons:
- continue
-
- files[magnet["hash"]] = {
- "index": magnet["files"].index(file),
- "title": filename,
- "size": file["e"][0]["s"] if pack else file["s"],
- }
-
- break
- else:
- for result in availability:
- if "status" not in result or result["status"] != "success":
- continue
-
- for magnet in result["data"]["magnets"]:
- if not magnet["instant"]:
- continue
-
- for file in magnet["files"]:
- filename = file["n"]
-
- if not is_video(filename):
- continue
-
- if "sample" in filename.lower():
- continue
-
- files[magnet["hash"]] = {
- "index": magnet["files"].index(file),
- "title": filename,
- "size": file["s"],
- }
-
- break
-
- return files
-
- async def generate_download_link(self, hash: str, index: str):
- try:
- check_blacklisted = await self.session.get(
- f"{self.api_url}/magnet/upload?agent=comet&magnets[]={hash}"
- )
- check_blacklisted = await check_blacklisted.text()
- if "NO_SERVER" in check_blacklisted:
- self.proxy = settings.DEBRID_PROXY_URL
- if not self.proxy:
- logger.warning(
- "All-Debrid blacklisted server's IP. No proxy found."
- )
- else:
- logger.warning(
- f"All-Debrid blacklisted server's IP. Switching to proxy {self.proxy} for {hash}|{index}"
- )
-
- upload_magnet = await self.session.get(
- f"{self.api_url}/magnet/upload?agent=comet&magnets[]={hash}",
- proxy=self.proxy,
- )
- upload_magnet = await upload_magnet.json()
-
- get_magnet_status = await self.session.get(
- f"{self.api_url}/magnet/status?agent=comet&id={upload_magnet['data']['magnets'][0]['id']}",
- proxy=self.proxy,
- )
- get_magnet_status = await get_magnet_status.json()
-
- unlock_link = await self.session.get(
- f"{self.api_url}/link/unlock?agent=comet&link={get_magnet_status['data']['magnets']['links'][int(index)]['link']}",
- proxy=self.proxy,
- )
- unlock_link = await unlock_link.json()
-
- return unlock_link["data"]["link"]
- except Exception as e:
- logger.warning(
- f"Exception while getting download link from All-Debrid for {hash}|{index}: {e}"
- )
+ pass
diff --git a/comet/debrid/debridlink.py b/comet/debrid/debridlink.py
index 1d65bd2..15ef009 100644
--- a/comet/debrid/debridlink.py
+++ b/comet/debrid/debridlink.py
@@ -1,140 +1,8 @@
import aiohttp
-import asyncio
-
-from RTN import parse
-
-from comet.utils.general import is_video
-from comet.utils.logger import logger
class DebridLink:
- def __init__(self, session: aiohttp.ClientSession, debrid_api_key: str):
- session.headers["Authorization"] = f"Bearer {debrid_api_key}"
- self.session = session
- self.proxy = None
-
- self.api_url = "https://debrid-link.com/api/v2"
-
- async def check_premium(self):
- try:
- check_premium = await self.session.get(f"{self.api_url}/account/infos")
- check_premium = await check_premium.text()
- if '"accountType":1' in check_premium:
- return True
- except Exception as e:
- logger.warning(
- f"Exception while checking premium status on Debrid-Link: {e}"
- )
-
- return False
-
- async def get_instant(self, chunk: list):
- responses = []
- for hash in chunk:
- try:
- add_torrent = await self.session.post(
- f"{self.api_url}/seedbox/add",
- data={"url": hash, "wait": True, "async": True},
- )
- add_torrent = await add_torrent.json()
-
- torrent_id = add_torrent["value"]["id"]
- await self.session.delete(f"{self.api_url}/seedbox/{torrent_id}/remove")
-
- responses.append(add_torrent)
- except:
- pass
-
- return responses
-
- async def get_files(
- self, torrent_hashes: list, type: str, season: str, episode: str, kitsu: bool, **kwargs
+ def __init__(
+ self, session: aiohttp.ClientSession, video_id, debrid_api_key: str, ip: str
):
- chunk_size = 10
- chunks = [
- torrent_hashes[i : i + chunk_size]
- for i in range(0, len(torrent_hashes), chunk_size)
- ]
-
- tasks = []
- for chunk in chunks:
- tasks.append(self.get_instant(chunk))
-
- responses = await asyncio.gather(*tasks)
-
- availability = []
- for response_list in responses:
- for response in response_list:
- availability.append(response)
-
- files = {}
-
- if type == "series":
- for result in availability:
- torrent_files = result["value"]["files"]
- for file in torrent_files:
- if file["downloadPercent"] != 100:
- continue
-
- filename = file["name"]
-
- if not is_video(filename):
- continue
-
- if "sample" in filename.lower():
- continue
-
- filename_parsed = parse(filename)
- if episode not in filename_parsed.episodes:
- continue
-
- if kitsu:
- if filename_parsed.seasons:
- continue
- else:
- if season not in filename_parsed.seasons:
- continue
-
- files[result["value"]["hashString"]] = {
- "index": torrent_files.index(file),
- "title": filename,
- "size": file["size"],
- }
-
- break
- else:
- for result in availability:
- value = result["value"]
- torrent_files = value["files"]
- for file in torrent_files:
- if file["downloadPercent"] != 100:
- continue
-
- filename = file["name"]
-
- if not is_video(filename):
- continue
-
- if "sample" in filename.lower():
- continue
-
- files[value["hashString"]] = {
- "index": torrent_files.index(file),
- "title": filename,
- "size": file["size"],
- }
-
- return files
-
- async def generate_download_link(self, hash: str, index: str):
- try:
- add_torrent = await self.session.post(
- f"{self.api_url}/seedbox/add", data={"url": hash, "async": True}
- )
- add_torrent = await add_torrent.json()
-
- return add_torrent["value"]["files"][int(index)]["downloadUrl"]
- except Exception as e:
- logger.warning(
- f"Exception while getting download link from Debrid-Link for {hash}|{index}: {e}"
- )
+ pass
diff --git a/comet/debrid/easydebrid.py b/comet/debrid/easydebrid.py
new file mode 100644
index 0000000..0da4ed3
--- /dev/null
+++ b/comet/debrid/easydebrid.py
@@ -0,0 +1,8 @@
+import aiohttp
+
+
+class EasyDebrid:
+ def __init__(
+ self, session: aiohttp.ClientSession, video_id, debrid_api_key: str, ip: str
+ ):
+ pass
diff --git a/comet/debrid/manager.py b/comet/debrid/manager.py
index 3646dda..3cd8786 100644
--- a/comet/debrid/manager.py
+++ b/comet/debrid/manager.py
@@ -1,35 +1,109 @@
import aiohttp
-from comet.utils.config import should_use_stremthru
-
from .realdebrid import RealDebrid
from .alldebrid import AllDebrid
from .premiumize import Premiumize
from .torbox import TorBox
from .debridlink import DebridLink
+from .torrent import Torrent
from .stremthru import StremThru
+from .easydebrid import EasyDebrid
+from .offcloud import Offcloud
+from .pikpak import PikPak
+
+debrid_services = {
+ "realdebrid": {
+ "extension": "RD",
+ "cache_availability_endpoint": False,
+ "class": RealDebrid,
+ },
+ "alldebrid": {
+ "extension": "AD",
+ "cache_availability_endpoint": False,
+ "class": AllDebrid,
+ },
+ "premiumize": {
+ "extension": "PM",
+ "cache_availability_endpoint": True,
+ "class": Premiumize,
+ },
+ "torbox": {"extension": "TB", "cache_availability_endpoint": True, "class": TorBox},
+ "debridlink": {
+ "extension": "DL",
+ "cache_availability_endpoint": False,
+ "class": DebridLink,
+ },
+ "stremthru": {
+ "extension": "ST",
+ "cache_availability_endpoint": True,
+ "class": StremThru,
+ },
+ "easydebrid": {
+ "extension": "ED",
+ "cache_availability_endpoint": True,
+ "class": EasyDebrid,
+ },
+ "offcloud": {
+ "extension": "OC",
+ "cache_availability_endpoint": False,
+ "class": Offcloud,
+ },
+ "pikpak": {
+ "extension": "PP",
+ "cache_availability_endpoint": False,
+ "class": PikPak,
+ },
+ "torrent": {
+ "extension": "TORRENT",
+ "cache_availability_endpoint": False,
+ "class": Torrent,
+ },
+}
+
+
+def get_debrid_extension(debrid_service: str):
+ original_extension = debrid_services[debrid_service]["extension"]
+ return original_extension
-def getDebrid(session: aiohttp.ClientSession, config: dict, ip: str):
- debrid_service = config["debridService"]
- debrid_api_key = config["debridApiKey"]
- if should_use_stremthru(config):
- return StremThru(
- session=session,
- url=config["stremthruUrl"],
- debrid_service=debrid_service,
- token=debrid_api_key,
- ip=ip,
+def build_stremthru_token(debrid_service: str, debrid_api_key: str):
+ return f"{debrid_service}:{debrid_api_key}"
+
+
+def get_debrid(
+ session: aiohttp.ClientSession,
+ video_id: str,
+ media_only_id: str,
+ debrid_service: str,
+ debrid_api_key: str,
+ ip: str,
+):
+ if debrid_service != "torrent":
+ return debrid_services["stremthru"]["class"](
+ session,
+ video_id,
+ media_only_id,
+ build_stremthru_token(debrid_service, debrid_api_key),
+ ip,
)
- if debrid_service == "realdebrid":
- return RealDebrid(session, debrid_api_key, ip)
- elif debrid_service == "alldebrid":
- return AllDebrid(session, debrid_api_key)
- elif debrid_service == "premiumize":
- return Premiumize(session, debrid_api_key)
- elif debrid_service == "torbox":
- return TorBox(session, debrid_api_key)
- elif debrid_service == "debridlink":
- return DebridLink(session, debrid_api_key)
\ No newline at end of file
+
+async def retrieve_debrid_availability(
+ session: aiohttp.ClientSession,
+ video_id: str,
+ media_only_id: str,
+ debrid_service: str,
+ debrid_api_key: str,
+ ip: str,
+ info_hashes: list,
+ seeders_map: dict,
+ tracker_map: dict,
+ sources_map: dict,
+):
+ if debrid_service == "torrent":
+ return []
+
+ return await get_debrid(
+ session, video_id, media_only_id, debrid_service, debrid_api_key, ip
+ ).get_availability(info_hashes, seeders_map, tracker_map, sources_map)
diff --git a/comet/debrid/offcloud.py b/comet/debrid/offcloud.py
new file mode 100644
index 0000000..902ef10
--- /dev/null
+++ b/comet/debrid/offcloud.py
@@ -0,0 +1,8 @@
+import aiohttp
+
+
+class Offcloud:
+ def __init__(
+ self, session: aiohttp.ClientSession, video_id, debrid_api_key: str, ip: str
+ ):
+ pass
diff --git a/comet/debrid/pikpak.py b/comet/debrid/pikpak.py
new file mode 100644
index 0000000..359e525
--- /dev/null
+++ b/comet/debrid/pikpak.py
@@ -0,0 +1,8 @@
+import aiohttp
+
+
+class PikPak:
+ def __init__(
+ self, session: aiohttp.ClientSession, video_id, debrid_api_key: str, ip: str
+ ):
+ pass
diff --git a/comet/debrid/premiumize.py b/comet/debrid/premiumize.py
index 27fdc93..c7c99b2 100644
--- a/comet/debrid/premiumize.py
+++ b/comet/debrid/premiumize.py
@@ -1,177 +1,8 @@
import aiohttp
-import asyncio
-
-from RTN import parse
-
-from comet.utils.general import is_video
-from comet.utils.logger import logger
class Premiumize:
- def __init__(self, session: aiohttp.ClientSession, debrid_api_key: str):
- self.session = session
- self.proxy = None
-
- self.api_url = "https://premiumize.me/api"
- self.debrid_api_key = debrid_api_key
-
- async def check_premium(self):
- try:
- check_premium = await self.session.get(
- f"{self.api_url}/account/info?apikey={self.debrid_api_key}"
- )
- check_premium = await check_premium.text()
- if (
- '"status":"success"' in check_premium
- and '"premium_until":null' not in check_premium
- ):
- return True
- except Exception as e:
- logger.warning(
- f"Exception while checking premium status on Premiumize: {e}"
- )
-
- return False
-
- async def get_instant(self, chunk: list):
- try:
- response = await self.session.get(
- f"{self.api_url}/cache/check?apikey={self.debrid_api_key}&items[]={'&items[]='.join(chunk)}"
- )
-
- response = await response.json()
- response["hashes"] = chunk
-
- return response
- except Exception as e:
- logger.warning(
- f"Exception while checking hash instant availability on Premiumize: {e}"
- )
-
- async def get_files(
- self, torrent_hashes: list, type: str, season: str, episode: str, kitsu: bool, **kwargs
+ def __init__(
+ self, session: aiohttp.ClientSession, video_id, debrid_api_key: str, ip: str
):
- chunk_size = 100
- chunks = [
- torrent_hashes[i : i + chunk_size]
- for i in range(0, len(torrent_hashes), chunk_size)
- ]
-
- tasks = []
- for chunk in chunks:
- tasks.append(self.get_instant(chunk))
-
- responses = await asyncio.gather(*tasks)
-
- availability = []
- for response in responses:
- if not response:
- continue
-
- availability.append(response)
-
- files = {}
-
- if type == "series":
- for result in availability:
- if result["status"] != "success":
- continue
-
- responses = result["response"]
- filenames = result["filename"]
- filesizes = result["filesize"]
- hashes = result["hashes"]
- for index, response in enumerate(responses):
- if not response:
- continue
-
- if not filesizes[index]:
- continue
-
- filename = filenames[index]
-
- if "sample" in filename.lower():
- continue
-
- filename_parsed = parse(filename)
- if episode not in filename_parsed.episodes:
- continue
-
- if kitsu:
- if filename_parsed.seasons:
- continue
- else:
- if season not in filename_parsed.seasons:
- continue
-
- files[hashes[index]] = {
- "index": f"{season}|{episode}",
- "title": filename,
- "size": int(filesizes[index]),
- }
- else:
- for result in availability:
- if result["status"] != "success":
- continue
-
- responses = result["response"]
- filenames = result["filename"]
- filesizes = result["filesize"]
- hashes = result["hashes"]
- for index, response in enumerate(responses):
- if response is False:
- continue
-
- if not filesizes[index]:
- continue
-
- filename = filenames[index]
-
- if "sample" in filename.lower():
- continue
-
- files[hashes[index]] = {
- "index": 0,
- "title": filename,
- "size": int(filesizes[index]),
- }
-
- return files
-
- async def generate_download_link(self, hash: str, index: str):
- try:
- add_magnet = await self.session.post(
- f"{self.api_url}/transfer/directdl?apikey={self.debrid_api_key}&src=magnet:?xt=urn:btih:{hash}",
- )
- add_magnet = await add_magnet.json()
-
- season = None
- if "|" in index:
- index = index.split("|")
- season = int(index[0])
- episode = int(index[1])
-
- content = add_magnet["content"]
- for file in content:
- filename = file["path"]
- if "/" in filename:
- filename = filename.split("/")[1]
-
- if not is_video(filename):
- content.remove(file)
- continue
-
- if season is not None:
- filename_parsed = parse(filename)
- if (
- season in filename_parsed.seasons
- and episode in filename_parsed.episodes
- ):
- return file["link"]
-
- max_size_item = max(content, key=lambda x: x["size"])
- return max_size_item["link"]
- except Exception as e:
- logger.warning(
- f"Exception while getting download link from Premiumize for {hash}|{index}: {e}"
- )
+ pass
diff --git a/comet/debrid/realdebrid.py b/comet/debrid/realdebrid.py
index 33b993e..cebd75e 100644
--- a/comet/debrid/realdebrid.py
+++ b/comet/debrid/realdebrid.py
@@ -1,192 +1,8 @@
import aiohttp
-import asyncio
-
-from RTN import parse
-
-from comet.utils.general import is_video
-from comet.utils.logger import logger
-from comet.utils.models import settings
class RealDebrid:
- def __init__(self, session: aiohttp.ClientSession, debrid_api_key: str, ip: str):
- session.headers["Authorization"] = f"Bearer {debrid_api_key}"
- self.session = session
- self.ip = ip
- self.proxy = None
-
- self.api_url = "https://api.real-debrid.com/rest/1.0"
-
- async def check_premium(self):
- try:
- check_premium = await self.session.get(f"{self.api_url}/user")
- check_premium = await check_premium.text()
- if '"type": "premium"' in check_premium:
- return True
- except Exception as e:
- logger.warning(
- f"Exception while checking premium status on Real-Debrid: {e}"
- )
-
- return False
-
- async def get_instant(self, chunk: list):
- try:
- response = await self.session.get(
- f"{self.api_url}/torrents/instantAvailability/{'/'.join(chunk)}"
- )
- return await response.json()
- except Exception as e:
- logger.warning(
- f"Exception while checking hash instant availability on Real-Debrid: {e}"
- )
-
- async def get_files(
- self, torrent_hashes: list, type: str, season: str, episode: str, kitsu: bool, **kwargs
+ def __init__(
+ self, session: aiohttp.ClientSession, video_id, debrid_api_key: str, ip: str
):
- chunk_size = 100
- chunks = [
- torrent_hashes[i : i + chunk_size]
- for i in range(0, len(torrent_hashes), chunk_size)
- ]
-
- tasks = []
- for chunk in chunks:
- tasks.append(self.get_instant(chunk))
-
- responses = await asyncio.gather(*tasks)
-
- availability = {}
- for response in responses:
- if response is not None:
- availability.update(response)
-
- files = {}
-
- if type == "series":
- for hash, details in availability.items():
- if "rd" not in details:
- continue
-
- for variants in details["rd"]:
- for index, file in variants.items():
- filename = file["filename"]
-
- if not is_video(filename):
- continue
-
- if "sample" in filename.lower():
- continue
-
- filename_parsed = parse(filename)
- if episode not in filename_parsed.episodes:
- continue
-
- if kitsu:
- if filename_parsed.seasons:
- continue
- else:
- if season not in filename_parsed.seasons:
- continue
-
- files[hash] = {
- "index": index,
- "title": filename,
- "size": file["filesize"],
- }
-
- break
- else:
- for hash, details in availability.items():
- if "rd" not in details:
- continue
-
- for variants in details["rd"]:
- for index, file in variants.items():
- filename = file["filename"]
-
- if not is_video(filename):
- continue
-
- if "sample" in filename.lower():
- continue
-
- files[hash] = {
- "index": index,
- "title": filename,
- "size": file["filesize"],
- }
-
- break
-
- return files
-
- async def generate_download_link(self, hash: str, index: str):
- try:
- check_blacklisted = await self.session.get("https://real-debrid.com/vpn")
- check_blacklisted = await check_blacklisted.text()
- if (
- "Your ISP or VPN provider IP address is currently blocked on our website"
- in check_blacklisted
- ):
- self.proxy = settings.DEBRID_PROXY_URL
- if not self.proxy:
- logger.warning(
- "Real-Debrid blacklisted server's IP. No proxy found."
- )
- else:
- logger.warning(
- f"Real-Debrid blacklisted server's IP. Switching to proxy {self.proxy} for {hash}|{index}"
- )
-
- add_magnet = await self.session.post(
- f"{self.api_url}/torrents/addMagnet",
- data={"magnet": f"magnet:?xt=urn:btih:{hash}", "ip": self.ip},
- proxy=self.proxy,
- )
- add_magnet = await add_magnet.json()
-
- get_magnet_info = await self.session.get(
- add_magnet["uri"], proxy=self.proxy
- )
- get_magnet_info = await get_magnet_info.json()
-
- await self.session.post(
- f"{self.api_url}/torrents/selectFiles/{add_magnet['id']}",
- data={
- "files": ",".join(
- str(file["id"])
- for file in get_magnet_info["files"]
- if is_video(file["path"])
- ),
- "ip": self.ip,
- },
- proxy=self.proxy,
- )
-
- get_magnet_info = await self.session.get(
- add_magnet["uri"], proxy=self.proxy
- )
- get_magnet_info = await get_magnet_info.json()
-
- index = int(index)
- realIndex = index
- for file in get_magnet_info["files"]:
- if file["id"] == realIndex:
- break
-
- if file["selected"] != 1:
- index -= 1
-
- unrestrict_link = await self.session.post(
- f"{self.api_url}/unrestrict/link",
- data={"link": get_magnet_info["links"][index - 1], "ip": self.ip},
- proxy=self.proxy,
- )
- unrestrict_link = await unrestrict_link.json()
-
- return unrestrict_link["download"]
- except Exception as e:
- logger.warning(
- f"Exception while getting download link from Real-Debrid for {hash}|{index}: {e}"
- )
+ pass
diff --git a/comet/debrid/stremthru.py b/comet/debrid/stremthru.py
index 5b58734..a4972a9 100644
--- a/comet/debrid/stremthru.py
+++ b/comet/debrid/stremthru.py
@@ -1,59 +1,44 @@
+import aiohttp
import asyncio
-from typing import Optional
-import aiohttp
-from RTN import parse
+from RTN import parse, title_match
+from comet.utils.models import settings
from comet.utils.general import is_video
+from comet.utils.debrid import cache_availability
from comet.utils.logger import logger
+from comet.utils.torrent import torrent_update_queue
class StremThru:
def __init__(
self,
session: aiohttp.ClientSession,
- url: str,
+ video_id: str,
+ media_only_id: str,
token: str,
- debrid_service: str,
ip: str,
):
- if not self.is_supported_store(debrid_service):
- raise ValueError(f"unsupported store: {debrid_service}")
-
- store, token = self.parse_store_creds(debrid_service, token)
- if store == "stremthru":
- session.headers["Proxy-Authorization"] = f"Basic {token}"
- else:
- session.headers["X-StremThru-Store-Name"] = store
- session.headers["X-StremThru-Store-Authorization"] = f"Bearer {token}"
+ store, token = self.parse_store_creds(token)
+ session.headers["X-StremThru-Store-Name"] = store
+ session.headers["X-StremThru-Store-Authorization"] = f"Bearer {token}"
session.headers["User-Agent"] = "comet"
self.session = session
- self.base_url = f"{url}/v0/store"
- self.name = f"StremThru[{debrid_service}]" if debrid_service else "StremThru"
+ self.base_url = f"{settings.STREMTHRU_URL}/v0/store"
+ self.name = f"StremThru-{store}"
+ self.real_debrid_name = store
self.client_ip = ip
+ self.sid = video_id
+ self.media_only_id = media_only_id
- @staticmethod
- def parse_store_creds(debrid_service, token: str = ""):
- if debrid_service != "stremthru":
- return debrid_service, token
+ def parse_store_creds(self, token: str):
if ":" in token:
- parts = token.split(":")
+ parts = token.split(":", 1)
return parts[0], parts[1]
- return debrid_service, token
-
- @staticmethod
- def is_supported_store(name: Optional[str]):
- return (
- name == "stremthru"
- or name == "alldebrid"
- or name == "debridlink"
- or name == "easydebrid"
- or name == "premiumize"
- or name == "realdebrid"
- or name == "torbox"
- )
+
+ return token, ""
async def check_premium(self):
try:
@@ -69,11 +54,9 @@ async def check_premium(self):
return False
- async def get_instant(self, magnets: list, sid: Optional[str] = None):
+ async def get_instant(self, magnets: list):
try:
- url = f"{self.base_url}/magnets/check?magnet={','.join(magnets)}&client_ip={self.client_ip}"
- if sid:
- url = f"{url}&sid={sid}"
+ url = f"{self.base_url}/magnets/check?magnet={','.join(magnets)}&client_ip={self.client_ip}&sid={self.sid}"
magnet = await self.session.get(url)
return await magnet.json()
except Exception as e:
@@ -81,17 +64,17 @@ async def get_instant(self, magnets: list, sid: Optional[str] = None):
f"Exception while checking hash instant availability on {self.name}: {e}"
)
- async def get_files(
+ async def get_availability(
self,
torrent_hashes: list,
- type: str,
- season: str,
- episode: str,
- kitsu: bool,
- video_id: Optional[str] = None,
- **kwargs,
+ seeders_map: dict,
+ tracker_map: dict,
+ sources_map: dict,
):
- chunk_size = 25
+ if not await self.check_premium():
+ return []
+
+ chunk_size = 50
chunks = [
torrent_hashes[i : i + chunk_size]
for i in range(0, len(torrent_hashes), chunk_size)
@@ -99,7 +82,7 @@ async def get_files(
tasks = []
for chunk in chunks:
- tasks.append(self.get_instant(chunk, sid=video_id))
+ tasks.append(self.get_instant(chunk))
responses = await asyncio.gather(*tasks)
@@ -109,62 +92,83 @@ async def get_files(
if response and "data" in response
]
- files = {}
-
- if type == "series":
- for magnets in availability:
- for magnet in magnets:
- if magnet["status"] != "cached":
- continue
-
- for file in magnet["files"]:
- filename = file["name"]
-
- if not is_video(filename) or "sample" in filename:
+ is_offcloud = self.real_debrid_name == "offcloud"
+
+ files = []
+ cached_count = 0
+ for result in availability:
+ for torrent in result:
+ if torrent["status"] != "cached":
+ continue
+
+ cached_count += 1
+ hash = torrent["hash"]
+ seeders = seeders_map[hash]
+ tracker = tracker_map[hash]
+ sources = sources_map[hash]
+
+ if is_offcloud:
+ file_info = {
+ "info_hash": hash,
+ "index": None,
+ "title": None,
+ "size": None,
+ "season": None,
+ "episode": None,
+ "parsed": None,
+ }
+
+ files.append(file_info)
+ else:
+ for file in torrent["files"]:
+ filename = file["name"].split("/")[-1]
+
+ if not is_video(filename) or "sample" in filename.lower():
continue
filename_parsed = parse(filename)
- if episode not in filename_parsed.episodes:
+ season = (
+ filename_parsed.seasons[0]
+ if filename_parsed.seasons
+ else None
+ )
+ episode = (
+ filename_parsed.episodes[0]
+ if filename_parsed.episodes
+ else None
+ )
+ if ":" in self.sid and (season is None or episode is None):
continue
- if kitsu:
- if filename_parsed.seasons:
- continue
- else:
- if season not in filename_parsed.seasons:
- continue
-
- files[magnet["hash"]] = {
- "index": file["index"],
- "title": filename,
- "size": file["size"],
- }
-
- break
- else:
- for magnets in availability:
- for magnet in magnets:
- if magnet["status"] != "cached":
- continue
-
- for file in magnet["files"]:
- filename = file["name"]
+ index = file["index"] if file["index"] != -1 else None
+ size = file["size"] if file["size"] != -1 else None
- if not is_video(filename) or "sample" in filename:
- continue
-
- files[magnet["hash"]] = {
- "index": file["index"],
+ file_info = {
+ "info_hash": hash,
+ "index": index,
"title": filename,
- "size": file["size"],
+ "size": size,
+ "season": season,
+ "episode": episode,
+ "parsed": filename_parsed,
+ "seeders": seeders,
+ "tracker": tracker,
+ "sources": sources,
}
- break
+ files.append(file_info)
+ await torrent_update_queue.add_torrent_info(file_info, self.media_only_id)
+ logger.log(
+ "SCRAPER",
+ f"{self.name}: Found {cached_count} cached torrents with {len(files)} valid files",
+ )
return files
- async def generate_download_link(self, hash: str, index: str):
+ async def generate_download_link(
+ self, hash: str, index: str, name: str, season: int, episode: int
+ ):
try:
magnet = await self.session.post(
f"{self.base_url}/magnets?client_ip={self.client_ip}",
@@ -175,26 +179,57 @@ async def generate_download_link(self, hash: str, index: str):
if magnet["data"]["status"] != "downloaded":
return
- file = next(
- (
- file
- for file in magnet["data"]["files"]
- if str(file["index"]) == index or file["name"] == index
- ),
- None,
- )
-
- if not file:
+ name_parsed = parse(name)
+ target_file = None
+
+ files = []
+ for file in magnet["data"]["files"]:
+ filename = file["name"]
+ filename_parsed = parse(filename)
+
+ if not is_video(filename) or not title_match(
+ name_parsed.parsed_title, filename_parsed.parsed_title
+ ):
+ continue
+
+ file_season = (
+ filename_parsed.seasons[0] if filename_parsed.seasons else None
+ )
+ file_episode = (
+ filename_parsed.episodes[0] if filename_parsed.episodes else None
+ )
+ file_index = file["index"] if file["index"] != -1 else None
+ file_size = file["size"] if file["size"] != -1 else None
+
+ file_info = {
+ "info_hash": hash,
+ "index": file_index,
+ "title": filename,
+ "size": file_size,
+ "season": file_season,
+ "episode": file_episode,
+ "parsed": filename_parsed,
+ }
+ files.append(file_info)
+
+ if str(file["index"]) == index:
+ target_file = file
+
+ if season == file_season and episode == file_episode:
+ target_file = file
+
+ if len(files) > 0:
+ asyncio.create_task(cache_availability(self.real_debrid_name, files))
+
+ if not target_file:
return
link = await self.session.post(
f"{self.base_url}/link/generate?client_ip={self.client_ip}",
- json={"link": file["link"]},
+ json={"link": target_file["link"]},
)
link = await link.json()
return link["data"]["link"]
except Exception as e:
- logger.warning(
- f"Exception while getting download link from {self.name} for {hash}|{index}: {e}"
- )
+ logger.warning(f"Exception while getting download link for {hash}: {e}")
diff --git a/comet/debrid/torbox.py b/comet/debrid/torbox.py
index 7e17054..06960c9 100644
--- a/comet/debrid/torbox.py
+++ b/comet/debrid/torbox.py
@@ -1,161 +1,8 @@
import aiohttp
-import asyncio
-
-from RTN import parse
-
-from comet.utils.general import is_video
-from comet.utils.logger import logger
class TorBox:
- def __init__(self, session: aiohttp.ClientSession, debrid_api_key: str):
- session.headers["Authorization"] = f"Bearer {debrid_api_key}"
- self.session = session
- self.proxy = None
-
- self.api_url = "https://api.torbox.app/v1/api"
- self.debrid_api_key = debrid_api_key
-
- async def check_premium(self):
- try:
- check_premium = await self.session.get(
- f"{self.api_url}/user/me?settings=false"
- )
- check_premium = await check_premium.text()
- if '"success":true' in check_premium:
- return True
- except Exception as e:
- logger.warning(f"Exception while checking premium status on TorBox: {e}")
-
- return False
-
- async def get_instant(self, chunk: list):
- try:
- response = await self.session.get(
- f"{self.api_url}/torrents/checkcached?hash={','.join(chunk)}&format=list&list_files=true"
- )
- return await response.json()
- except Exception as e:
- logger.warning(
- f"Exception while checking hash instant availability on TorBox: {e}"
- )
-
- async def get_files(
- self, torrent_hashes: list, type: str, season: str, episode: str, kitsu: bool, **kwargs
+ def __init__(
+ self, session: aiohttp.ClientSession, video_id, debrid_api_key: str, ip: str
):
- chunk_size = 50
- chunks = [
- torrent_hashes[i : i + chunk_size]
- for i in range(0, len(torrent_hashes), chunk_size)
- ]
-
- tasks = []
- for chunk in chunks:
- tasks.append(self.get_instant(chunk))
-
- responses = await asyncio.gather(*tasks)
-
- availability = [response for response in responses if response is not None]
-
- files = {}
-
- if type == "series":
- for result in availability:
- if not result["success"] or not result["data"]:
- continue
-
- for torrent in result["data"]:
- torrent_files = torrent["files"]
- for file in torrent_files:
- filename = file["name"].split("/")[1]
-
- if not is_video(filename):
- continue
-
- if "sample" in filename.lower():
- continue
-
- filename_parsed = parse(filename)
- if episode not in filename_parsed.episodes:
- continue
-
- if kitsu:
- if filename_parsed.seasons:
- continue
- else:
- if season not in filename_parsed.seasons:
- continue
-
- files[torrent["hash"]] = {
- "index": torrent_files.index(file),
- "title": filename,
- "size": file["size"],
- }
-
- break
- else:
- for result in availability:
- if not result["success"] or not result["data"]:
- continue
-
- for torrent in result["data"]:
- torrent_files = torrent["files"]
- for file in torrent_files:
- filename = file["name"].split("/")[1]
-
- if not is_video(filename):
- continue
-
- if "sample" in filename.lower():
- continue
-
- files[torrent["hash"]] = {
- "index": torrent_files.index(file),
- "title": filename,
- "size": file["size"],
- }
-
- break
-
- return files
-
- async def generate_download_link(self, hash: str, index: str):
- try:
- get_torrents = await self.session.get(
- f"{self.api_url}/torrents/mylist?bypass_cache=true"
- )
- get_torrents = await get_torrents.json()
- exists = False
- for torrent in get_torrents["data"]:
- if torrent["hash"] == hash:
- torrent_id = torrent["id"]
- exists = True
- break
- if not exists:
- create_torrent = await self.session.post(
- f"{self.api_url}/torrents/createtorrent",
- data={"magnet": f"magnet:?xt=urn:btih:{hash}"},
- )
- create_torrent = await create_torrent.json()
- torrent_id = create_torrent["data"]["torrent_id"]
-
- # get_torrents = await self.session.get(
- # f"{self.api_url}/torrents/mylist?bypass_cache=true"
- # )
- # get_torrents = await get_torrents.json()
-
- # for torrent in get_torrents["data"]:
- # if torrent["id"] == torrent_id:
- # file_id = torrent["files"][int(index)]["id"]
- # Useless, we already have file index
-
- get_download_link = await self.session.get(
- f"{self.api_url}/torrents/requestdl?token={self.debrid_api_key}&torrent_id={torrent_id}&file_id={index}&zip=false",
- )
- get_download_link = await get_download_link.json()
-
- return get_download_link["data"]
- except Exception as e:
- logger.warning(
- f"Exception while getting download link from TorBox for {hash}|{index}: {e}"
- )
+ pass
diff --git a/comet/debrid/torrent.py b/comet/debrid/torrent.py
new file mode 100644
index 0000000..b0c915a
--- /dev/null
+++ b/comet/debrid/torrent.py
@@ -0,0 +1,3 @@
+class Torrent:
+ def __init__(self):
+ pass
diff --git a/comet/main.py b/comet/main.py
index 7a732bc..0868473 100644
--- a/comet/main.py
+++ b/comet/main.py
@@ -1,177 +1,236 @@
-import contextlib
-import signal
-import sys
-import threading
-import time
-import traceback
-from contextlib import asynccontextmanager
-
-import uvicorn
-from fastapi import FastAPI
-from fastapi.middleware.cors import CORSMiddleware
-from fastapi.staticfiles import StaticFiles
-from starlette.middleware.base import BaseHTTPMiddleware
-from starlette.requests import Request
-
-from comet.api.core import main
-from comet.api.stream import streams
-from comet.utils.db import setup_database, teardown_database
-from comet.utils.logger import logger
-from comet.utils.models import settings
-
-
-class LoguruMiddleware(BaseHTTPMiddleware):
- async def dispatch(self, request: Request, call_next):
- start_time = time.time()
- try:
- response = await call_next(request)
- except Exception as e:
- logger.exception(f"Exception during request processing: {e}")
- raise
- finally:
- process_time = time.time() - start_time
- logger.log(
- "API",
- f"{request.method} {request.url.path} - {response.status_code if 'response' in locals() else '500'} - {process_time:.2f}s",
- )
- return response
-
-
-@asynccontextmanager
-async def lifespan(app: FastAPI):
- await setup_database()
- yield
- await teardown_database()
-
-
-app = FastAPI(
- title="Comet",
- summary="Stremio's fastest torrent/debrid search add-on.",
- version="1.0.0",
- lifespan=lifespan,
- redoc_url=None,
-)
-
-
-app.add_middleware(LoguruMiddleware)
-app.add_middleware(
- CORSMiddleware,
- allow_origins=["*"],
- allow_credentials=True,
- allow_methods=["*"],
- allow_headers=["*"],
-)
-
-app.mount("/static", StaticFiles(directory="comet/templates"), name="static")
-
-app.include_router(main)
-app.include_router(streams)
-
-
-class Server(uvicorn.Server):
- def install_signal_handlers(self):
- pass
-
- @contextlib.contextmanager
- def run_in_thread(self):
- thread = threading.Thread(target=self.run, name="Comet")
- thread.start()
- try:
- while not self.started:
- time.sleep(1e-3)
- yield
- except Exception as e:
- logger.error(f"Error in server thread: {e}")
- logger.exception(traceback.format_exc())
- raise e
- finally:
- self.should_exit = True
- sys.exit(0)
-
-
-def signal_handler(sig, frame):
- # This will handle kubernetes/docker shutdowns better
- # Toss anything that needs to be gracefully shutdown here
- logger.log("COMET", "Exiting Gracefully.")
- sys.exit(0)
-
-
-signal.signal(signal.SIGINT, signal_handler)
-signal.signal(signal.SIGTERM, signal_handler)
-
-config = uvicorn.Config(
- app,
- host=settings.FASTAPI_HOST,
- port=settings.FASTAPI_PORT,
- proxy_headers=True,
- forwarded_allow_ips="*",
- workers=settings.FASTAPI_WORKERS,
- log_config=None,
-)
-server = Server(config=config)
-
-
-def start_log():
- logger.log(
- "COMET",
- f"Server started on http://{settings.FASTAPI_HOST}:{settings.FASTAPI_PORT} - {settings.FASTAPI_WORKERS} workers",
- )
- logger.log(
- "COMET",
- f"Dashboard Admin Password: {settings.DASHBOARD_ADMIN_PASSWORD} - http://{settings.FASTAPI_HOST}:{settings.FASTAPI_PORT}/active-connections?password={settings.DASHBOARD_ADMIN_PASSWORD}",
- )
- logger.log(
- "COMET",
- f"Database ({settings.DATABASE_TYPE}): {settings.DATABASE_PATH if settings.DATABASE_TYPE == 'sqlite' else settings.DATABASE_URL} - TTL: {settings.CACHE_TTL}s",
- )
- logger.log("COMET", f"Debrid Proxy: {settings.DEBRID_PROXY_URL}")
-
- if settings.INDEXER_MANAGER_TYPE:
- logger.log(
- "COMET",
- f"Indexer Manager: {settings.INDEXER_MANAGER_TYPE}|{settings.INDEXER_MANAGER_URL} - Timeout: {settings.INDEXER_MANAGER_TIMEOUT}s",
- )
- logger.log("COMET", f"Indexers: {', '.join(settings.INDEXER_MANAGER_INDEXERS)}")
- logger.log("COMET", f"Get Torrent Timeout: {settings.GET_TORRENT_TIMEOUT}s")
- else:
- logger.log("COMET", "Indexer Manager: False")
-
- if settings.ZILEAN_URL:
- logger.log(
- "COMET",
- f"Zilean: {settings.ZILEAN_URL} - Take first: {settings.ZILEAN_TAKE_FIRST}",
- )
- else:
- logger.log("COMET", "Zilean: False")
-
- logger.log("COMET", f"Torrentio Scraper: {bool(settings.SCRAPE_TORRENTIO)}")
-
- mediafusion_url = f" - {settings.MEDIAFUSION_URL}"
- logger.log(
- "COMET",
- f"MediaFusion Scraper: {bool(settings.SCRAPE_MEDIAFUSION)}{mediafusion_url if settings.SCRAPE_MEDIAFUSION else ''}",
- )
-
- logger.log(
- "COMET",
- f"Debrid Stream Proxy: {bool(settings.PROXY_DEBRID_STREAM)} - Password: {settings.PROXY_DEBRID_STREAM_PASSWORD} - Max Connections: {settings.PROXY_DEBRID_STREAM_MAX_CONNECTIONS} - Default Debrid Service: {settings.PROXY_DEBRID_STREAM_DEBRID_DEFAULT_SERVICE} - Default Debrid API Key: {settings.PROXY_DEBRID_STREAM_DEBRID_DEFAULT_APIKEY}",
- )
- if settings.STREMTHRU_DEFAULT_URL:
- logger.log("COMET", f"Default StremThru URL: {settings.STREMTHRU_DEFAULT_URL}")
- logger.log("COMET", f"Title Match Check: {bool(settings.TITLE_MATCH_CHECK)}")
- logger.log("COMET", f"Remove Adult Content: {bool(settings.REMOVE_ADULT_CONTENT)}")
- logger.log("COMET", f"Custom Header HTML: {bool(settings.CUSTOM_HEADER_HTML)}")
-
-
-with server.run_in_thread():
- start_log()
- try:
- while True:
- time.sleep(1) # Keep the main thread alive
- except KeyboardInterrupt:
- logger.log("COMET", "Server stopped by user")
- except Exception as e:
- logger.error(f"Unexpected error: {e}")
- logger.exception(traceback.format_exc())
- finally:
- logger.log("COMET", "Server Shutdown")
+import contextlib
+import signal
+import sys
+import threading
+import time
+import traceback
+import uvicorn
+import os
+
+from contextlib import asynccontextmanager
+
+from fastapi import FastAPI
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.staticfiles import StaticFiles
+from starlette.middleware.base import BaseHTTPMiddleware
+from starlette.requests import Request
+
+from comet.api.core import main
+from comet.api.stream import streams
+from comet.utils.database import setup_database, teardown_database
+from comet.utils.trackers import download_best_trackers
+from comet.utils.logger import logger
+from comet.utils.models import settings
+
+
+class LoguruMiddleware(BaseHTTPMiddleware):
+ async def dispatch(self, request: Request, call_next):
+ start_time = time.time()
+ try:
+ response = await call_next(request)
+ except Exception as e:
+ logger.exception(f"Exception during request processing: {e}")
+ raise
+ finally:
+ process_time = time.time() - start_time
+ logger.log(
+ "API",
+ f"{request.method} {request.url.path} - {response.status_code if 'response' in locals() else '500'} - {process_time:.2f}s",
+ )
+ return response
+
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ await setup_database()
+ await download_best_trackers()
+ yield
+ await teardown_database()
+
+
+app = FastAPI(
+ title="Comet",
+ summary="Stremio's fastest torrent/debrid search add-on.",
+ lifespan=lifespan,
+ redoc_url=None,
+)
+
+
+app.add_middleware(LoguruMiddleware)
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"],
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+app.mount("/static", StaticFiles(directory="comet/templates"), name="static")
+
+app.include_router(main)
+app.include_router(streams)
+
+
+class Server(uvicorn.Server):
+ def install_signal_handlers(self):
+ pass
+
+ @contextlib.contextmanager
+ def run_in_thread(self):
+ thread = threading.Thread(target=self.run, name="Comet")
+ thread.start()
+ try:
+ while not self.started:
+ time.sleep(1e-3)
+ yield
+ except Exception as e:
+ logger.error(f"Error in server thread: {e}")
+ logger.exception(traceback.format_exc())
+ raise e
+ finally:
+ self.should_exit = True
+ sys.exit(0)
+
+
+def signal_handler(sig, frame):
+ # This will handle kubernetes/docker shutdowns better
+ # Toss anything that needs to be gracefully shutdown here
+ logger.log("COMET", "Exiting Gracefully.")
+ sys.exit(0)
+
+
+signal.signal(signal.SIGINT, signal_handler)
+signal.signal(signal.SIGTERM, signal_handler)
+
+
+def start_log():
+ logger.log(
+ "COMET",
+ f"Server started on http://{settings.FASTAPI_HOST}:{settings.FASTAPI_PORT} - {settings.FASTAPI_WORKERS} workers",
+ )
+ logger.log(
+ "COMET",
+ f"Dashboard Admin Password: {settings.DASHBOARD_ADMIN_PASSWORD} - http://{settings.FASTAPI_HOST}:{settings.FASTAPI_PORT}/dashboard",
+ )
+ logger.log(
+ "COMET",
+ f"Database ({settings.DATABASE_TYPE}): {settings.DATABASE_PATH if settings.DATABASE_TYPE == 'sqlite' else settings.DATABASE_URL} - TTL: metadata={settings.METADATA_CACHE_TTL}s, torrents={settings.TORRENT_CACHE_TTL}s, debrid={settings.DEBRID_CACHE_TTL}s",
+ )
+ logger.log("COMET", f"Debrid Proxy: {settings.DEBRID_PROXY_URL}")
+
+ if settings.INDEXER_MANAGER_TYPE:
+ logger.log(
+ "COMET",
+ f"Indexer Manager: {settings.INDEXER_MANAGER_TYPE}|{settings.INDEXER_MANAGER_URL} - Timeout: {settings.INDEXER_MANAGER_TIMEOUT}s",
+ )
+ logger.log("COMET", f"Indexers: {', '.join(settings.INDEXER_MANAGER_INDEXERS)}")
+ logger.log("COMET", f"Get Torrent Timeout: {settings.GET_TORRENT_TIMEOUT}s")
+ logger.log(
+ "COMET", f"Download Torrent Files: {bool(settings.DOWNLOAD_TORRENT_FILES)}"
+ )
+ else:
+ logger.log("COMET", "Indexer Manager: False")
+
+ zilean_url = f" - {settings.ZILEAN_URL}"
+ logger.log(
+ "COMET",
+ f"Zilean Scraper: {bool(settings.SCRAPE_ZILEAN)}{zilean_url if settings.SCRAPE_ZILEAN else ''}",
+ )
+
+ torrentio_url = f" - {settings.TORRENTIO_URL}"
+ logger.log(
+ "COMET",
+ f"Torrentio Scraper: {bool(settings.SCRAPE_TORRENTIO)}{torrentio_url if settings.SCRAPE_TORRENTIO else ''}",
+ )
+
+ mediafusion_url = f" - {settings.MEDIAFUSION_URL}"
+ logger.log(
+ "COMET",
+ f"MediaFusion Scraper: {bool(settings.SCRAPE_MEDIAFUSION)}{mediafusion_url if settings.SCRAPE_MEDIAFUSION else ''}",
+ )
+
+ logger.log(
+ "COMET",
+ f"Debrid Stream Proxy: {bool(settings.PROXY_DEBRID_STREAM)} - Password: {settings.PROXY_DEBRID_STREAM_PASSWORD} - Max Connections: {settings.PROXY_DEBRID_STREAM_MAX_CONNECTIONS} - Default Debrid Service: {settings.PROXY_DEBRID_STREAM_DEBRID_DEFAULT_SERVICE} - Default Debrid API Key: {settings.PROXY_DEBRID_STREAM_DEBRID_DEFAULT_APIKEY}",
+ )
+
+ logger.log("COMET", f"StremThru URL: {settings.STREMTHRU_URL}")
+
+ logger.log("COMET", f"Remove Adult Content: {bool(settings.REMOVE_ADULT_CONTENT)}")
+ logger.log("COMET", f"Custom Header HTML: {bool(settings.CUSTOM_HEADER_HTML)}")
+
+
+def run_with_uvicorn():
+ """Run the server with uvicorn only"""
+ config = uvicorn.Config(
+ app,
+ host=settings.FASTAPI_HOST,
+ port=settings.FASTAPI_PORT,
+ proxy_headers=True,
+ forwarded_allow_ips="*",
+ workers=settings.FASTAPI_WORKERS,
+ log_config=None,
+ )
+ server = Server(config=config)
+
+ with server.run_in_thread():
+ start_log()
+ try:
+ while True:
+ time.sleep(1) # Keep the main thread alive
+ except KeyboardInterrupt:
+ logger.log("COMET", "Server stopped by user")
+ except Exception as e:
+ logger.error(f"Unexpected error: {e}")
+ logger.exception(traceback.format_exc())
+ finally:
+ logger.log("COMET", "Server Shutdown")
+
+
+def run_with_gunicorn():
+ """Run the server with gunicorn and uvicorn workers"""
+ import gunicorn.app.base
+
+ class StandaloneApplication(gunicorn.app.base.BaseApplication):
+ def __init__(self, app, options=None):
+ self.options = options or {}
+ self.application = app
+ super().__init__()
+
+ def load_config(self):
+ config = {
+ key: value for key, value in self.options.items()
+ if key in self.cfg.settings and value is not None
+ }
+ for key, value in config.items():
+ self.cfg.set(key.lower(), value)
+
+ def load(self):
+ return self.application
+
+ workers = settings.FASTAPI_WORKERS
+ if workers <= 1:
+ workers = (os.cpu_count() or 1) * 2 + 1
+
+ options = {
+ "bind": f"{settings.FASTAPI_HOST}:{settings.FASTAPI_PORT}",
+ "workers": workers,
+ "worker_class": "uvicorn.workers.UvicornWorker",
+ "timeout": 120,
+ "keepalive": 5,
+ "preload_app": True,
+ "proxy_protocol": True,
+ "forwarded_allow_ips": "*",
+ }
+
+ start_log()
+ logger.log("COMET", f"Starting with gunicorn using {workers} workers")
+
+ StandaloneApplication(app, options).run()
+
+
+if __name__ == "__main__":
+ if os.name == "nt" or not settings.USE_GUNICORN:
+ run_with_uvicorn()
+ else:
+ run_with_gunicorn()
\ No newline at end of file
diff --git a/comet/metadata/__init__.py b/comet/metadata/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/comet/metadata/imdb.py b/comet/metadata/imdb.py
new file mode 100644
index 0000000..dee9508
--- /dev/null
+++ b/comet/metadata/imdb.py
@@ -0,0 +1,20 @@
+import aiohttp
+
+from comet.utils.logger import logger
+
+
+async def get_imdb_metadata(session: aiohttp.ClientSession, id: str):
+ try:
+ response = await session.get(
+ f"https://v3.sg.media-imdb.com/suggestion/a/{id}.json"
+ )
+ metadata = await response.json()
+ for element in metadata["d"]:
+ if "/" not in element["id"]:
+ title = element["l"]
+ year = element.get("y")
+ year_end = int(element["yr"].split("-")[1]) if "yr" in element else None
+ return title, year, year_end
+ except Exception as e:
+ logger.warning(f"Exception while getting IMDB metadata for {id}: {e}")
+ return None, None, None, None, None
diff --git a/comet/metadata/kitsu.py b/comet/metadata/kitsu.py
new file mode 100644
index 0000000..f0023f8
--- /dev/null
+++ b/comet/metadata/kitsu.py
@@ -0,0 +1,44 @@
+import aiohttp
+
+from comet.utils.logger import logger
+
+
+async def get_kitsu_metadata(session: aiohttp.ClientSession, id: str):
+ try:
+ response = await session.get(f"https://kitsu.io/api/edge/anime/{id}")
+ metadata = await response.json()
+
+ attributes = metadata["data"]["attributes"]
+ year = int(attributes["createdAt"].split("-")[0])
+ year_end = int(attributes["updatedAt"].split("-")[0])
+
+ return attributes["canonicalTitle"], year, year_end
+ except Exception as e:
+ logger.warning(f"Exception while getting Kitsu metadata for {id}: {e}")
+ return None, None, None
+
+
+async def get_kitsu_aliases(session: aiohttp.ClientSession, id: str):
+ aliases = {}
+ try:
+ response = await session.get(f"https://find-my-anime.dtimur.de/api?id={id}&provider=Kitsu")
+ data = await response.json()
+
+ aliases["ez"] = []
+ aliases["ez"].append(data[0]["title"])
+ for synonym in data[0]["synonyms"]:
+ aliases["ez"].append(synonym)
+
+ total_aliases = len(aliases["ez"])
+ if total_aliases > 0:
+ logger.log(
+ "SCRAPER",
+ f"📜 Found {total_aliases} Kitsu aliases for {id}",
+ )
+ return aliases
+ except Exception:
+ pass
+
+ logger.log("SCRAPER", f"📜 No Kitsu aliases found for {id}")
+
+ return {}
diff --git a/comet/metadata/manager.py b/comet/metadata/manager.py
new file mode 100644
index 0000000..be722ca
--- /dev/null
+++ b/comet/metadata/manager.py
@@ -0,0 +1,107 @@
+import aiohttp
+import asyncio
+import time
+import orjson
+
+from RTN.patterns import normalize_title
+
+from comet.utils.models import database, settings
+from comet.utils.general import parse_media_id
+
+from .kitsu import get_kitsu_metadata, get_kitsu_aliases
+from .imdb import get_imdb_metadata
+from .trakt import get_trakt_aliases
+
+
+class MetadataScraper:
+ def __init__(self, session: aiohttp.ClientSession):
+ self.session = session
+
+ async def fetch_metadata_and_aliases(self, media_type: str, media_id: str):
+ id, season, episode = parse_media_id(media_type, media_id)
+
+ get_cached = await self.get_cached(
+ id, season if not "kitsu" in media_id else 1, episode
+ )
+ if get_cached is not None:
+ return get_cached[0], get_cached[1]
+
+ is_kitsu = "kitsu" in media_id
+ metadata_task = asyncio.create_task(self.get_metadata(id, season, episode, is_kitsu))
+ aliases_task = asyncio.create_task(self.get_aliases(media_type, id, is_kitsu))
+ metadata, aliases = await asyncio.gather(metadata_task, aliases_task)
+ await self.cache_metadata(id, metadata, aliases)
+
+ return metadata, aliases
+
+ async def get_cached(self, media_id: str, season: int, episode: int):
+ row = await database.fetch_one(
+ """
+ SELECT title, year, year_end, aliases
+ FROM metadata_cache
+ WHERE media_id = :media_id
+ AND timestamp + :cache_ttl >= :current_time
+ """,
+ {
+ "media_id": media_id,
+ "cache_ttl": settings.METADATA_CACHE_TTL,
+ "current_time": time.time(),
+ },
+ )
+ if row is not None:
+ metadata = {
+ "title": row["title"],
+ "year": row["year"],
+ "year_end": row["year_end"],
+ "season": season,
+ "episode": episode,
+ }
+ return metadata, orjson.loads(row["aliases"])
+
+ return None
+
+ async def cache_metadata(self, media_id: str, metadata: dict, aliases: dict):
+ await database.execute(
+ f"""
+ INSERT {"OR IGNORE " if settings.DATABASE_TYPE == "sqlite" else ""}
+ INTO metadata_cache
+ VALUES (:media_id, :title, :year, :year_end, :aliases, :timestamp)
+ {" ON CONFLICT DO NOTHING" if settings.DATABASE_TYPE == "postgresql" else ""}
+ """,
+ {
+ "media_id": media_id,
+ "title": metadata["title"],
+ "year": metadata["year"],
+ "year_end": metadata["year_end"],
+ "aliases": orjson.dumps(aliases).decode("utf-8"),
+ "timestamp": time.time(),
+ },
+ )
+
+ def normalize_metadata(self, metadata: dict, season: int, episode: int):
+ title, year, year_end = metadata
+
+ if title is None: # metadata retrieving failed
+ return None
+
+ return {
+ "title": normalize_title(title),
+ "year": year,
+ "year_end": year_end,
+ "season": season,
+ "episode": episode,
+ }
+
+ async def get_metadata(self, id: str, season: int, episode: int, is_kitsu: bool):
+ if is_kitsu:
+ raw_metadata = await get_kitsu_metadata(self.session, id)
+ return self.normalize_metadata(raw_metadata, 1, episode)
+ else:
+ raw_metadata = await get_imdb_metadata(self.session, id)
+ return self.normalize_metadata(raw_metadata, season, episode)
+
+ async def get_aliases(self, media_type: str, media_id: str, is_kitsu: bool):
+ if is_kitsu:
+ return await get_kitsu_aliases(self.session, media_id)
+
+ return await get_trakt_aliases(self.session, media_type, media_id)
diff --git a/comet/metadata/trakt.py b/comet/metadata/trakt.py
new file mode 100644
index 0000000..2306d37
--- /dev/null
+++ b/comet/metadata/trakt.py
@@ -0,0 +1,31 @@
+import aiohttp
+
+from comet.utils.logger import logger
+
+
+async def get_trakt_aliases(
+ session: aiohttp.ClientSession, media_type: str, media_id: str
+):
+ aliases = set()
+ try:
+ response = await session.get(
+ f"https://api.trakt.tv/{'movies' if media_type == 'movie' else 'shows'}/{media_id}/aliases"
+ )
+ data = await response.json()
+
+ for aliase in data:
+ aliases.add(aliase["title"])
+
+ total_aliases = len(aliases)
+ if total_aliases > 0:
+ logger.log(
+ "SCRAPER",
+ f"📜 Found {total_aliases} Trakt aliases for {media_id}",
+ )
+ return {"ez": list(aliases)}
+ except Exception:
+ pass
+
+ logger.log("SCRAPER", f"📜 No Trakt aliases found for {media_id}")
+
+ return {}
diff --git a/comet/scrapers/__init__.py b/comet/scrapers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/comet/scrapers/jackett.py b/comet/scrapers/jackett.py
new file mode 100644
index 0000000..00458c7
--- /dev/null
+++ b/comet/scrapers/jackett.py
@@ -0,0 +1,128 @@
+import aiohttp
+import asyncio
+
+from comet.utils.models import settings
+from comet.utils.logger import logger
+from comet.utils.torrent import (
+ download_torrent,
+ extract_torrent_metadata,
+ extract_trackers_from_magnet,
+ add_torrent_queue,
+)
+
+
+async def process_torrent(
+ session: aiohttp.ClientSession, result: dict, media_id: str, season: int
+):
+ base_torrent = {
+ "title": result["Title"],
+ "infoHash": None,
+ "fileIndex": None,
+ "seeders": result["Seeders"],
+ "size": result["Size"],
+ "tracker": result["Tracker"],
+ "sources": [],
+ }
+
+ torrents = []
+
+ if result["Link"] is not None:
+ content, magnet_hash, magnet_url = await download_torrent(
+ session, result["Link"]
+ )
+
+ if content:
+ metadata = extract_torrent_metadata(content)
+ if metadata:
+ for file in metadata["files"]:
+ torrent = base_torrent.copy()
+ torrent["title"] = file["name"]
+ torrent["infoHash"] = metadata["info_hash"].lower()
+ torrent["fileIndex"] = file["index"]
+ torrent["size"] = file["size"]
+ torrent["sources"] = metadata["announce_list"]
+ torrents.append(torrent)
+ return torrents
+
+ if magnet_hash:
+ base_torrent["infoHash"] = magnet_hash.lower()
+ base_torrent["sources"] = extract_trackers_from_magnet(magnet_url)
+
+ await add_torrent_queue.add_torrent(
+ magnet_url,
+ base_torrent["seeders"],
+ base_torrent["tracker"],
+ media_id,
+ season,
+ )
+
+ torrents.append(base_torrent)
+ return torrents
+
+ if "InfoHash" in result and result["InfoHash"]:
+ base_torrent["infoHash"] = result["InfoHash"].lower()
+ if result["MagnetUri"] is not None:
+ base_torrent["sources"] = extract_trackers_from_magnet(result["MagnetUri"])
+
+ await add_torrent_queue.add_torrent(
+ result["MagnetUri"],
+ base_torrent["seeders"],
+ base_torrent["tracker"],
+ media_id,
+ season,
+ )
+
+ torrents.append(base_torrent)
+
+ return torrents
+
+
+async def fetch_jackett_results(
+ session: aiohttp.ClientSession, indexer: str, query: str
+):
+ try:
+ response = await session.get(
+ f"{settings.INDEXER_MANAGER_URL}/api/v2.0/indexers/all/results?apikey={settings.INDEXER_MANAGER_API_KEY}&Query={query}&Tracker[]={indexer}",
+ timeout=aiohttp.ClientTimeout(total=settings.INDEXER_MANAGER_TIMEOUT),
+ )
+ response = await response.json()
+ return response.get("Results", [])
+ except Exception as e:
+ logger.warning(
+ f"Exception while fetching Jackett results for indexer {indexer}: {e}"
+ )
+ return []
+
+
+async def get_jackett(manager, session: aiohttp.ClientSession, title: str, seen: set):
+ torrents = []
+ try:
+ tasks = [
+ fetch_jackett_results(session, indexer, title)
+ for indexer in settings.INDEXER_MANAGER_INDEXERS
+ ]
+ all_results = await asyncio.gather(*tasks)
+
+ torrent_tasks = []
+ for result_set in all_results:
+ for result in result_set:
+ if result["Details"] in seen:
+ continue
+
+ seen.add(result["Details"])
+ torrent_tasks.append(
+ process_torrent(
+ session, result, manager.media_only_id, manager.season
+ )
+ )
+
+ processed_torrents = await asyncio.gather(*torrent_tasks)
+ torrents = [
+ t for sublist in processed_torrents for t in sublist if t["infoHash"]
+ ]
+ except Exception as e:
+ logger.warning(
+ f"Exception while getting torrents for {title} with Jackett: {e}"
+ )
+
+ await manager.filter_manager(torrents)
diff --git a/comet/scrapers/manager.py b/comet/scrapers/manager.py
new file mode 100644
index 0000000..a158b1e
--- /dev/null
+++ b/comet/scrapers/manager.py
@@ -0,0 +1,349 @@
+import aiohttp
+import asyncio
+import orjson
+import time
+
+from RTN import (
+ parse,
+ title_match,
+ get_rank,
+ check_fetch,
+ sort_torrents,
+ ParsedData,
+ BestRanking,
+ Torrent,
+)
+
+from comet.utils.models import settings, database, CometSettingsModel
+from comet.utils.general import default_dump
+from comet.utils.debrid import cache_availability, get_cached_availability
+from comet.debrid.manager import retrieve_debrid_availability
+from .zilean import get_zilean
+from .torrentio import get_torrentio
+from .mediafusion import get_mediafusion
+from .jackett import get_jackett
+from .prowlarr import get_prowlarr
+
+
+class TorrentManager:
+ def __init__(
+ self,
+ debrid_service: str,
+ debrid_api_key: str,
+ ip: str,
+ media_type: str,
+ media_full_id: str,
+ media_only_id: str,
+ title: str,
+ year: int,
+ year_end: int,
+ season: int,
+ episode: int,
+ aliases: dict,
+ remove_adult_content: bool,
+ ):
+ self.debrid_service = debrid_service
+ self.debrid_api_key = debrid_api_key
+ self.ip = ip
+ self.media_type = media_type
+ self.media_id = media_full_id
+ self.media_only_id = media_only_id
+ self.title = title
+ self.year = year
+ self.year_end = year_end
+ self.season = season
+ self.episode = episode
+ self.aliases = aliases
+ self.remove_adult_content = remove_adult_content
+
+ self.seen_hashes = set()
+ self.torrents = {}
+ self.ready_to_cache = []
+ self.ranked_torrents = {}
+
+ async def scrape_torrents(
+ self,
+ session: aiohttp.ClientSession,
+ ):
+ tasks = []
+ if settings.SCRAPE_TORRENTIO:
+ tasks.append(get_torrentio(self, self.media_type, self.media_id))
+ if settings.SCRAPE_MEDIAFUSION:
+ tasks.append(get_mediafusion(self, self.media_type, self.media_id))
+ if settings.SCRAPE_ZILEAN:
+ tasks.append(
+ get_zilean(self, session, self.title, self.season, self.episode)
+ )
+ if settings.INDEXER_MANAGER_API_KEY:
+ queries = [self.title]
+
+ if self.media_type == "series":
+ queries.append(f"{self.title} S{self.season:02d}")
+ queries.append(f"{self.title} S{self.season:02d}E{self.episode:02d}")
+
+ seen_already = set()
+ for query in queries:
+ if settings.INDEXER_MANAGER_TYPE == "jackett":
+ tasks.append(get_jackett(self, session, query, seen_already))
+ elif settings.INDEXER_MANAGER_TYPE == "prowlarr":
+ tasks.append(get_prowlarr(self, session, query, seen_already))
+
+ await asyncio.gather(*tasks)
+ asyncio.create_task(self.cache_torrents())
+
+ for torrent in self.ready_to_cache:
+ season = torrent["parsed"].seasons[0] if torrent["parsed"].seasons else None
+ episode = (
+ torrent["parsed"].episodes[0] if torrent["parsed"].episodes else None
+ )
+
+ if (season is not None and season != self.season) or (
+ episode is not None and episode != self.episode
+ ):
+ continue
+
+ info_hash = torrent["infoHash"]
+ self.torrents[info_hash] = {
+ "fileIndex": torrent["fileIndex"],
+ "title": torrent["title"],
+ "seeders": torrent["seeders"],
+ "size": torrent["size"],
+ "tracker": torrent["tracker"],
+ "sources": torrent["sources"],
+ "parsed": torrent["parsed"],
+ }
+
+ async def get_cached_torrents(self):
+ rows = await database.fetch_all(
+ """
+ SELECT info_hash, file_index, title, seeders, size, tracker, sources, parsed
+ FROM torrents
+ WHERE media_id = :media_id
+ AND ((season IS NOT NULL AND season = cast(:season as INTEGER)) OR (season IS NULL AND cast(:season as INTEGER) IS NULL))
+ AND (episode IS NULL OR episode = cast(:episode as INTEGER))
+ AND timestamp + :cache_ttl >= :current_time
+ """,
+ {
+ "media_id": self.media_only_id,
+ "season": self.season,
+ "episode": self.episode,
+ "cache_ttl": settings.TORRENT_CACHE_TTL,
+ "current_time": time.time(),
+ },
+ )
+
+ for row in rows:
+ info_hash = row["info_hash"]
+ self.torrents[info_hash] = {
+ "fileIndex": row["file_index"],
+ "title": row["title"],
+ "seeders": row["seeders"],
+ "size": row["size"],
+ "tracker": row["tracker"],
+ "sources": orjson.loads(row["sources"]),
+ "parsed": ParsedData(**orjson.loads(row["parsed"])),
+ }
+
+ async def cache_torrents(self):
+ current_time = time.time()
+ values = [
+ {
+ "media_id": self.media_only_id,
+ "info_hash": torrent["infoHash"],
+ "file_index": torrent["fileIndex"],
+ "season": torrent["parsed"].seasons[0]
+ if torrent["parsed"].seasons
+ else self.season,
+ "episode": torrent["parsed"].episodes[0]
+ if torrent["parsed"].episodes
+ else None,
+ "title": torrent["title"],
+ "seeders": torrent["seeders"],
+ "size": torrent["size"],
+ "tracker": torrent["tracker"],
+ "sources": orjson.dumps(torrent["sources"]).decode("utf-8"),
+ "parsed": orjson.dumps(torrent["parsed"], default_dump).decode("utf-8"),
+ "timestamp": current_time,
+ }
+ for torrent in self.ready_to_cache
+ ]
+
+ query = f"""
+ INSERT {"OR IGNORE " if settings.DATABASE_TYPE == "sqlite" else ""}
+ INTO torrents
+ VALUES (:media_id, :info_hash, :file_index, :season, :episode, :title, :seeders, :size, :tracker, :sources, :parsed, :timestamp)
+ {" ON CONFLICT DO NOTHING" if settings.DATABASE_TYPE == "postgresql" else ""}
+ """
+
+ await database.execute_many(query, values)
+
+ async def filter(self, torrents: list):
+ title = self.title
+ year = self.year
+ year_end = self.year_end
+ aliases = self.aliases
+ remove_adult_content = self.remove_adult_content
+
+ for torrent in torrents:
+ parsed = parse(torrent["title"])
+
+ if remove_adult_content and parsed.adult:
+ continue
+
+ if parsed.parsed_title and not title_match(
+ title, parsed.parsed_title, aliases=aliases
+ ):
+ continue
+
+ if year and parsed.year:
+ if year_end is not None:
+ if not (year <= parsed.year <= year_end):
+ continue
+ else:
+ if year < (parsed.year - 1) or year > (parsed.year + 1):
+ continue
+
+ torrent["parsed"] = parsed
+ self.ready_to_cache.append(torrent)
+
+ async def filter_manager(self, torrents: list):
+ new_torrents = [
+ torrent
+ for torrent in torrents
+ if (torrent["infoHash"], torrent["title"]) not in self.seen_hashes
+ ]
+ self.seen_hashes.update(
+ (torrent["infoHash"], torrent["title"]) for torrent in new_torrents
+ )
+
+ chunk_size = 50
+ tasks = [
+ self.filter(new_torrents[i : i + chunk_size])
+ for i in range(0, len(new_torrents), chunk_size)
+ ]
+ await asyncio.gather(*tasks)
+
+ def rank_torrents(
+ self,
+ rtn_settings: CometSettingsModel,
+ rtn_ranking: BestRanking,
+ max_results_per_resolution: int,
+ max_size: int,
+ cached_only: int,
+ remove_trash: int,
+ ):
+ ranked_torrents = set()
+ for info_hash, torrent in self.torrents.items():
+ if (
+ cached_only
+ and self.debrid_service != "torrent"
+ and not torrent["cached"]
+ ):
+ continue
+
+ if max_size != 0 and torrent["size"] > max_size:
+ continue
+
+ parsed = torrent["parsed"]
+
+ raw_title = torrent["title"]
+
+ is_fetchable, failed_keys = check_fetch(parsed, rtn_settings)
+ rank = get_rank(parsed, rtn_settings, rtn_ranking)
+
+ if remove_trash:
+ if (
+ not is_fetchable
+ or rank < rtn_settings.options["remove_ranks_under"]
+ ):
+ continue
+
+ try:
+ ranked_torrents.add(
+ Torrent(
+ infohash=info_hash,
+ raw_title=raw_title,
+ data=parsed,
+ fetch=is_fetchable,
+ rank=rank,
+ lev_ratio=0.0,
+ )
+ )
+ except Exception:
+ pass
+
+ self.ranked_torrents = sort_torrents(
+ ranked_torrents, max_results_per_resolution
+ )
+
+ async def get_and_cache_debrid_availability(self, session: aiohttp.ClientSession):
+ info_hashes = list(self.torrents.keys())
+
+ seeders_map = {hash: self.torrents[hash]["seeders"] for hash in info_hashes}
+ tracker_map = {hash: self.torrents[hash]["tracker"] for hash in info_hashes}
+ sources_map = {hash: self.torrents[hash]["sources"] for hash in info_hashes}
+
+ availability = await retrieve_debrid_availability(
+ session,
+ self.media_id,
+ self.media_only_id,
+ self.debrid_service,
+ self.debrid_api_key,
+ self.ip,
+ info_hashes,
+ seeders_map,
+ tracker_map,
+ sources_map,
+ )
+
+ if len(availability) == 0:
+ return
+
+ for file in availability:
+ season = file["season"]
+ episode = file["episode"]
+ if (season is not None and season != self.season) or (
+ episode is not None and episode != self.episode
+ ):
+ continue
+
+ info_hash = file["info_hash"]
+ self.torrents[info_hash]["cached"] = True
+
+ if file["parsed"] is not None:
+ self.torrents[info_hash]["parsed"] = file["parsed"]
+ if file["index"] is not None:
+ self.torrents[info_hash]["fileIndex"] = file["index"]
+ if file["title"] is not None:
+ self.torrents[info_hash]["title"] = file["title"]
+ if file["size"] is not None:
+ self.torrents[info_hash]["size"] = file["size"]
+
+ asyncio.create_task(cache_availability(self.debrid_service, availability))
+
+ async def get_cached_availability(self):
+ info_hashes = list(self.torrents.keys())
+ for hash in info_hashes:
+ self.torrents[hash]["cached"] = False
+
+ if self.debrid_service == "torrent" or len(self.torrents) == 0:
+ return
+
+ rows = await get_cached_availability(
+ self.debrid_service, info_hashes, self.season, self.episode
+ )
+
+ for row in rows:
+ info_hash = row["info_hash"]
+ self.torrents[info_hash]["cached"] = True
+
+ if row["parsed"] is not None:
+ self.torrents[info_hash]["parsed"] = ParsedData(
+ **orjson.loads(row["parsed"])
+ )
+ if row["file_index"] is not None:
+ self.torrents[info_hash]["fileIndex"] = row["file_index"]
+ if row["title"] is not None:
+ self.torrents[info_hash]["title"] = row["title"]
+ if row["size"] is not None:
+ self.torrents[info_hash]["size"] = row["size"]
diff --git a/comet/scrapers/mediafusion.py b/comet/scrapers/mediafusion.py
new file mode 100644
index 0000000..295b4b7
--- /dev/null
+++ b/comet/scrapers/mediafusion.py
@@ -0,0 +1,58 @@
+from curl_cffi import requests
+
+from comet.utils.models import settings
+from comet.utils.logger import logger
+
+
+async def get_mediafusion(manager, media_type: str, media_id: str):
+ torrents = []
+ try:
+ try:
+ get_mediafusion = requests.get(
+ f"{settings.MEDIAFUSION_URL}/D-zn4qJLK4wUZVWscY9ESCnoZBEiNJCZ9uwfCvmxuliDjY7vkc-fu0OdxUPxwsP3_A/stream/{media_type}/{media_id}.json"
+ ).json()
+ except Exception as e:
+ logger.warning(
+ f"Failed to get MediaFusion results without proxy for {media_id}: {e}"
+ )
+
+ get_mediafusion = requests.get(
+ f"{settings.MEDIAFUSION_URL}/stream/{media_type}/{media_id}.json",
+ proxies={
+ "http": settings.DEBRID_PROXY_URL,
+ "https": settings.DEBRID_PROXY_URL,
+ },
+ ).json()
+
+ for torrent in get_mediafusion["streams"]:
+ title_full = torrent["description"]
+ lines = title_full.split("\n")
+
+ title = lines[0].replace("📂 ", "").replace("/", "")
+
+ seeders = None
+ if "👤" in lines[1]:
+ seeders = int(lines[1].split("👤 ")[1].split("\n")[0])
+
+ tracker = lines[-1].split("🔗 ")[1]
+
+ torrents.append(
+ {
+ "title": title,
+ "infoHash": torrent["infoHash"].lower(),
+ "fileIndex": torrent["fileIdx"] if "fileIdx" in torrent else None,
+ "seeders": seeders,
+ "size": torrent["behaviorHints"][
+ "videoSize"
+ ], # not the pack size but still useful for prowlarr userss
+ "tracker": f"MediaFusion|{tracker}",
+ "sources": torrent["sources"] if "sources" in torrent else [],
+ }
+ )
+ except Exception as e:
+ logger.warning(
+ f"Exception while getting torrents for {media_id} with MediaFusion, your IP is most likely blacklisted (you should try proxying Comet): {e}"
+ )
+ pass
+
+ await manager.filter_manager(torrents)
diff --git a/comet/scrapers/prowlarr.py b/comet/scrapers/prowlarr.py
new file mode 100644
index 0000000..393f0aa
--- /dev/null
+++ b/comet/scrapers/prowlarr.py
@@ -0,0 +1,124 @@
+import aiohttp
+import asyncio
+
+from comet.utils.models import settings
+from comet.utils.logger import logger
+from comet.utils.torrent import (
+ download_torrent,
+ extract_torrent_metadata,
+ extract_trackers_from_magnet,
+ add_torrent_queue,
+)
+
+
+async def process_torrent(
+ session: aiohttp.ClientSession, result: dict, media_id: str, season: int
+):
+ base_torrent = {
+ "title": result["title"],
+ "infoHash": None,
+ "fileIndex": None,
+ "seeders": result["seeders"],
+ "size": result["size"],
+ "tracker": result["indexer"],
+ "sources": [],
+ }
+
+ torrents = []
+
+ if "downloadUrl" in result:
+ content, magnet_hash, magnet_url = await download_torrent(
+ session, result["downloadUrl"]
+ )
+
+ if content:
+ metadata = extract_torrent_metadata(content)
+ if metadata:
+ for file in metadata["files"]:
+ torrent = base_torrent.copy()
+ torrent["title"] = file["name"]
+ torrent["infoHash"] = metadata["info_hash"].lower()
+ torrent["fileIndex"] = file["index"]
+ torrent["size"] = file["size"]
+ torrent["sources"] = metadata["announce_list"]
+ torrents.append(torrent)
+ return torrents
+
+ if magnet_hash:
+ base_torrent["infoHash"] = magnet_hash.lower()
+ base_torrent["sources"] = extract_trackers_from_magnet(magnet_url)
+
+ await add_torrent_queue.add_torrent(
+ magnet_url,
+ base_torrent["seeders"],
+ base_torrent["tracker"],
+ media_id,
+ season,
+ )
+
+ torrents.append(base_torrent)
+ return torrents
+
+ if "infoHash" in result and result["infoHash"]:
+ base_torrent["infoHash"] = result["infoHash"].lower()
+ if "guid" in result and result["guid"].startswith("magnet:"):
+ base_torrent["sources"] = extract_trackers_from_magnet(result["guid"])
+
+ await add_torrent_queue.add_torrent(
+ result["guid"],
+ base_torrent["seeders"],
+ base_torrent["tracker"],
+ media_id,
+ season,
+ )
+
+ torrents.append(base_torrent)
+
+ return torrents
+
+
+async def get_prowlarr(manager, session: aiohttp.ClientSession, title: str, seen: set):
+ torrents = []
+ try:
+ indexers = [indexer.lower() for indexer in settings.INDEXER_MANAGER_INDEXERS]
+
+ get_indexers = await session.get(
+ f"{settings.INDEXER_MANAGER_URL}/api/v1/indexer",
+ headers={"X-Api-Key": settings.INDEXER_MANAGER_API_KEY},
+ )
+ get_indexers = await get_indexers.json()
+
+ indexers_id = []
+ for indexer in get_indexers:
+ if (
+ indexer["name"].lower() in indexers
+ or indexer["definitionName"].lower() in indexers
+ ):
+ indexers_id.append(indexer["id"])
+
+ response = await session.get(
+ f"{settings.INDEXER_MANAGER_URL}/api/v1/search?query={title}&indexerIds={'&indexerIds='.join(str(indexer_id) for indexer_id in indexers_id)}&type=search",
+ headers={"X-Api-Key": settings.INDEXER_MANAGER_API_KEY},
+ )
+ response = await response.json()
+
+ torrent_tasks = []
+ for result in response:
+ if result["infoUrl"] in seen:
+ continue
+
+ seen.add(result["infoUrl"])
+ torrent_tasks.append(
+ process_torrent(session, result, manager.media_only_id, manager.season)
+ )
+
+ processed_torrents = await asyncio.gather(*torrent_tasks)
+ torrents = [
+ t for sublist in processed_torrents for t in sublist if t["infoHash"]
+ ]
+ except Exception as e:
+ logger.warning(
+ f"Exception while getting torrents for {title} with Prowlarr: {e}"
+ )
+
+ await manager.filter_manager(torrents)
diff --git a/comet/scrapers/torrentio.py b/comet/scrapers/torrentio.py
new file mode 100644
index 0000000..3dc2c6a
--- /dev/null
+++ b/comet/scrapers/torrentio.py
@@ -0,0 +1,65 @@
+import re
+
+from curl_cffi import requests
+
+from comet.utils.models import settings
+from comet.utils.logger import logger
+from comet.utils.general import size_to_bytes
+
+
+data_pattern = re.compile(
+ r"(?:👤 (\d+) )?💾 ([\d.]+ [KMGT]B)(?: ⚙️ (\w+))?", re.IGNORECASE
+)
+
+
+async def get_torrentio(manager, media_type: str, media_id: str):
+ torrents = []
+ try:
+ try:
+ get_torrentio = requests.get(
+ f"{settings.TORRENTIO_URL}/stream/{media_type}/{media_id}.json"
+ ).json()
+ except Exception as e:
+ logger.warning(
+ f"Failed to get Torrentio results without proxy for {media_id}: {e}"
+ )
+
+ get_torrentio = requests.get(
+ f"{settings.TORRENTIO_URL}/stream/{media_type}/{media_id}.json",
+ proxies={
+ "http": settings.DEBRID_PROXY_URL,
+ "https": settings.DEBRID_PROXY_URL,
+ },
+ ).json()
+
+ for torrent in get_torrentio["streams"]:
+ title_full = torrent["title"]
+ title = (
+ title_full.split("\n")[0]
+ if settings.TORRENTIO_URL == "https://torrentio.strem.fun"
+ else title_full.split("\n💾")[0].split("\n")[-1]
+ )
+
+ match = data_pattern.search(title_full)
+
+ seeders = int(match.group(1)) if match.group(1) else None
+ size = size_to_bytes(match.group(2))
+ tracker = match.group(3) if match.group(3) else "KnightCrawler"
+
+ torrents.append(
+ {
+ "title": title,
+ "infoHash": torrent["infoHash"].lower(),
+ "fileIndex": torrent["fileIdx"] if "fileIdx" in torrent else None,
+ "seeders": seeders,
+ "size": size,
+ "tracker": f"Torrentio|{tracker}",
+ "sources": torrent["sources"] if "sources" in torrent else [],
+ }
+ )
+ except Exception as e:
+ logger.warning(
+ f"Exception while getting torrents for {media_id} with Torrentio, your IP is most likely blacklisted (you should try proxying Comet): {e}"
+ )
+
+ await manager.filter_manager(torrents)
diff --git a/comet/scrapers/zilean.py b/comet/scrapers/zilean.py
new file mode 100644
index 0000000..49711ca
--- /dev/null
+++ b/comet/scrapers/zilean.py
@@ -0,0 +1,33 @@
+import aiohttp
+
+from comet.utils.models import settings
+from comet.utils.logger import logger
+
+
+async def get_zilean(
+ manager, session: aiohttp.ClientSession, title: str, season: int, episode: int
+):
+ torrents = []
+ try:
+ show = f"&season={season}&episode={episode}"
+ get_dmm = await session.get(
+ f"{settings.ZILEAN_URL}/dmm/filtered?query={title}{show if season else ''}"
+ )
+ get_dmm = await get_dmm.json()
+
+ for result in get_dmm:
+ object = {
+ "title": result["raw_title"],
+ "infoHash": result["info_hash"].lower(),
+ "fileIndex": None,
+ "seeders": None,
+ "size": int(result["size"]),
+ "tracker": "DMM",
+ "sources": [],
+ }
+
+ torrents.append(object)
+ except Exception as e:
+ logger.warning(f"Exception while getting torrents for {title} with Zilean: {e}")
+
+ await manager.filter_manager(torrents)
diff --git a/comet/templates/index.html b/comet/templates/index.html
index 2326ade..d734763 100644
--- a/comet/templates/index.html
+++ b/comet/templates/index.html
@@ -12,8 +12,8 @@
Comet - Stremio's fastest torrent/debrid search add-on.
-
-
+
+