diff --git a/.github/.domain/domain_update.py b/.github/.domain/domain_update.py
index 27ba72dd..e9d04b8e 100644
--- a/.github/.domain/domain_update.py
+++ b/.github/.domain/domain_update.py
@@ -116,14 +116,16 @@ def check_dns_resolution(domain):
try:
answers = resolver.resolve(domain, 'A')
return str(answers[0])
- except:
+ except Exception:
try:
answers = resolver.resolve(domain, 'AAAA')
return str(answers[0])
- except:
+
+ except Exception:
pass
return None
- except:
+
+ except Exception:
return None
def find_new_domain(input_url, output_file=None, verbose=True, json_output=False):
@@ -141,7 +143,7 @@ def find_new_domain(input_url, output_file=None, verbose=True, json_output=False
if orig_ip:
log(f"Original domain resolves to: {orig_ip}", "SUCCESS")
else:
- log(f"Original domain does not resolve to an IP address", "WARNING")
+ log("Original domain does not resolve to an IP address", "WARNING")
headers = get_headers()
new_domains = []
@@ -182,7 +184,7 @@ def find_new_domain(input_url, output_file=None, verbose=True, json_output=False
new_domains.append({'domain': redirect_domain_info_val['full_domain'], 'url': next_url, 'source': 'redirect'})
else:
- log(f"Redirect status code but no Location header", "WARNING")
+ log("Redirect status code but no Location header", "WARNING")
break
else:
break
@@ -250,7 +252,7 @@ def find_new_domain(input_url, output_file=None, verbose=True, json_output=False
except httpx.RequestError as e:
log(f"Error with auto-redirect attempt: {str(e)}", "ERROR")
except NameError:
- log(f"Error: URL for auto-redirect attempt was not defined.", "ERROR")
+ log("Error: URL for auto-redirect attempt was not defined.", "ERROR")
unique_domains = []
seen_domains = set()
diff --git a/.github/.domain/domains.json b/.github/.domain/domains.json
index 77a8f49f..958eba1c 100644
--- a/.github/.domain/domains.json
+++ b/.github/.domain/domains.json
@@ -1,62 +1,62 @@
{
- "1337xx": {
- "domain": "to",
- "full_url": "https://www.1337xx.to/",
- "old_domain": "to",
- "time_change": "2025-03-19 12:20:19"
- },
- "cb01new": {
- "domain": "run",
- "full_url": "https://cb01net.run/",
- "old_domain": "watch",
- "time_change": "2025-07-29 10:24:42"
- },
- "animeunity": {
- "domain": "so",
- "full_url": "https://www.animeunity.so/",
- "old_domain": "so",
- "time_change": "2025-07-08 13:59:31"
- },
- "animeworld": {
- "domain": "ac",
- "full_url": "https://www.animeworld.ac/",
- "old_domain": "ac",
- "time_change": "2025-03-21 12:20:27"
- },
- "guardaserie": {
- "domain": "world",
- "full_url": "https://guardaserietv.world/",
- "old_domain": "cc",
- "time_change": "2025-07-30 07:25:56"
- },
- "ddlstreamitaly": {
- "domain": "co",
- "full_url": "https://ddlstreamitaly.co/",
- "old_domain": "co",
- "time_change": "2025-03-19 12:20:26"
- },
- "streamingwatch": {
- "domain": "org",
- "full_url": "https://www.streamingwatch.org/",
- "old_domain": "org",
- "time_change": "2025-04-29 12:30:30"
- },
- "altadefinizione": {
- "domain": "free",
- "full_url": "https://altadefinizione.free/",
- "old_domain": "qpon",
- "time_change": "2025-07-08 13:59:37"
- },
- "streamingcommunity": {
- "domain": "life",
- "full_url": "https://streamingcommunityz.life/",
- "old_domain": "info",
- "time_change": "2025-08-04 15:26:24"
- },
- "altadefinizionegratis": {
- "domain": "city",
- "full_url": "https://altadefinizionegratis.city/",
- "old_domain": "life",
- "time_change": "2025-07-07 17:19:15"
- }
+ "1337xx": {
+ "domain": "to",
+ "full_url": "https://www.1337xx.to/",
+ "old_domain": "to",
+ "time_change": "2025-03-19 12:20:19"
+ },
+ "cb01new": {
+ "domain": "buzz",
+ "full_url": "https://cb01net.buzz/",
+ "old_domain": "run",
+ "time_change": "2025-08-04 20:26:33"
+ },
+ "animeunity": {
+ "domain": "so",
+ "full_url": "https://www.animeunity.so/",
+ "old_domain": "so",
+ "time_change": "2025-07-08 13:59:31"
+ },
+ "animeworld": {
+ "domain": "ac",
+ "full_url": "https://www.animeworld.ac/",
+ "old_domain": "ac",
+ "time_change": "2025-03-21 12:20:27"
+ },
+ "guardaserie": {
+ "domain": "app",
+ "full_url": "https://guardaserietv.app/",
+ "old_domain": "world",
+ "time_change": "2025-08-05 10:25:54"
+ },
+ "ddlstreamitaly": {
+ "domain": "co",
+ "full_url": "https://ddlstreamitaly.co/",
+ "old_domain": "co",
+ "time_change": "2025-03-19 12:20:26"
+ },
+ "streamingwatch": {
+ "domain": "org",
+ "full_url": "https://www.streamingwatch.org/",
+ "old_domain": "org",
+ "time_change": "2025-04-29 12:30:30"
+ },
+ "altadefinizione": {
+ "domain": "free",
+ "full_url": "https://altadefinizione.free/",
+ "old_domain": "qpon",
+ "time_change": "2025-07-08 13:59:37"
+ },
+ "streamingcommunity": {
+ "domain": "app",
+ "full_url": "https://streamingcommunityz.app/",
+ "old_domain": "life",
+ "time_change": "2025-08-12 12:44:44"
+ },
+ "altadefinizionegratis": {
+ "domain": "gold",
+ "full_url": "https://altadefinizionegratis.gold/",
+ "old_domain": "city",
+ "time_change": "2025-08-05 14:26:46"
+ }
}
\ No newline at end of file
diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml
index fbf001aa..43013333 100644
--- a/.github/workflows/testing.yml
+++ b/.github/workflows/testing.yml
@@ -90,4 +90,20 @@ jobs:
- name: Run MP4 download test
run: |
- PYTHONPATH=$PYTHONPATH:$(pwd) python -m unittest Test.Download.MP4
\ No newline at end of file
+ PYTHONPATH=$PYTHONPATH:$(pwd) python -m unittest Test.Download.MP4
+
+ ruff-lint:
+ name: Lint with Ruff
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.11'
+ - name: Install Ruff
+ run: |
+ python -m pip install ruff
+ - name: Run Ruff check
+ run: |
+ ruff check . --fix --exclude Test --exclude .github
\ No newline at end of file
diff --git a/.github/workflows/update-loc.yml b/.github/workflows/update-loc.yml
deleted file mode 100644
index a570fed5..00000000
--- a/.github/workflows/update-loc.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-name: Update Lines of Code
-
-on:
- workflow_dispatch:
-
-jobs:
- update-loc-badge:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout Repository
- uses: actions/checkout@v3
-
- - name: Install cloc
- run: sudo apt-get install -y cloc
-
- - name: Count Lines of Code
- run: |
- LOC=$(cloc . --json | jq '.SUM.code')
- echo "{\"schemaVersion\": 1, \"label\": \"Lines of Code\", \"message\": \"$LOC\", \"color\": \"green\"}" > .github/.domain/loc-badge.json
-
- - name: Commit and Push LOC Badge
- run: |
- git config --local user.name "GitHub Actions"
- git config --local user.email "actions@github.com"
- git add .github/.domain/loc-badge.json
- git commit -m "Update lines of code badge" || echo "No changes to commit"
- git push
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 9322c75a..839fa5c0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -52,4 +52,5 @@ cmd.txt
bot_config.json
scripts.json
active_requests.json
-working_proxies.json
\ No newline at end of file
+working_proxies.json
+start.sh
\ No newline at end of file
diff --git a/Installer/unix_install.sh b/Installer/unix_install.sh
deleted file mode 100644
index 81b43dce..00000000
--- a/Installer/unix_install.sh
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/bin/sh
-
-# Function to check if a command exists
-command_exists() {
- command -v "$1" > /dev/null 2>&1
-}
-
-# Install on Debian/Ubuntu-based systems
-install_on_debian() {
- echo "Installing $1..."
- sudo apt update
- sudo apt install -y "$1"
-}
-
-# Install on Red Hat/CentOS/Fedora-based systems
-install_on_redhat() {
- echo "Installing $1..."
- sudo yum install -y "$1"
-}
-
-# Install on Arch-based systems
-install_on_arch() {
- echo "Installing $1..."
- sudo pacman -Sy --noconfirm "$1"
-}
-
-# Install on BSD-based systems
-install_on_bsd() {
- echo "Installing $1..."
- env ASSUME_ALWAYS_YES=yes sudo pkg install -y "$1"
-}
-
-# Install on macOS
-install_on_macos() {
- echo "Installing $1..."
- if command_exists brew; then
- brew install "$1"
- else
- echo "Homebrew is not installed. Installing Homebrew first..."
- /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
- brew install "$1"
- fi
-}
-
-set -e
-
-# Get the Python version
-PYTHON_VERSION=$(python3 -c 'import sys; print(".".join(map(str, sys.version_info[:3])))')
-
-# Compare the Python version with 3.8
-REQUIRED_VERSION="3.8"
-
-if [ "$(echo -e "$PYTHON_VERSION\n$REQUIRED_VERSION" | sort -V | head -n1)" = "$REQUIRED_VERSION" ]; then
- echo "Python version $PYTHON_VERSION is >= $REQUIRED_VERSION. Continuing..."
-else
- echo "ERROR: Python version $PYTHON_VERSION is < $REQUIRED_VERSION. Exiting..."
- exit 1
-fi
-
-if [ -d ".venv/" ]; then
- echo ".venv exists. Installing requirements.txt..."
- .venv/bin/pip install -r requirements.txt
-else
- echo "Making .venv and installing requirements.txt..."
-
- if [ "$(uname)" = "Linux" ]; then
- # Detect the package manager for venv installation check.
- if command_exists apt; then
- echo "Detected Debian-based system. Checking python3-venv."
- if dpkg -l | grep -q "python3-venv"; then
- echo "python3-venv found."
- else
- echo "python3-venv not found, installing..."
- install_on_debian "python3-venv"
- fi
- fi
- fi
-
- python3 -m venv .venv
- .venv/bin/pip install -r requirements.txt
-
-fi
-
-if command_exists ffmpeg; then
- echo "ffmpeg exists."
-else
- echo "ffmpeg does not exist."
-
- # Detect the platform and install ffmpeg accordingly.
- case "$(uname)" in
- Linux)
- if command_exists apt; then
- echo "Detected Debian-based system."
- install_on_debian "ffmpeg"
- elif command_exists yum; then
- echo "Detected Red Hat-based system."
- echo "Installing needed repos for ffmpeg..."
- sudo yum config-manager --set-enabled crb > /dev/null 2>&1 || true
- sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-$(rpm -E %rhel).noarch.rpm https://dl.fedoraproject.org/pub/epel/epel-next-release-latest-$(rpm -E %rhel).noarch.rpm > /dev/null 2>&1 || true
- sudo yum install -y --nogpgcheck https://mirrors.rpmfusion.org/free/el/rpmfusion-free-release-$(rpm -E %rhel).noarch.rpm https://mirrors.rpmfusion.org/nonfree/el/rpmfusion-nonfree-release-$(rpm -E %rhel).noarch.rpm > /dev/null 2>&1 || true
- install_on_redhat "ffmpeg"
- elif command_exists pacman; then
- echo "Detected Arch-based system."
- install_on_arch "ffmpeg"
- else
- echo "Unsupported Linux distribution."
- exit 1
- fi
- ;;
- FreeBSD|NetBSD|OpenBSD)
- echo "Detected BSD-based system."
- install_on_bsd "ffmpeg"
- ;;
- Darwin)
- echo "Detected macOS."
- install_on_macos "ffmpeg"
- ;;
- *)
- echo "Unsupported operating system."
- exit 1
- ;;
- esac
-fi
-
-sed -i.bak '1s|.*|#!.venv/bin/python3|' test_run.py
-sudo chmod +x test_run.py
-echo 'Everything is installed!'
-echo 'Run StreamingCommunity with "./test_run.py"'
\ No newline at end of file
diff --git a/Installer/win_install.bat b/Installer/win_install.bat
deleted file mode 100644
index d160e417..00000000
--- a/Installer/win_install.bat
+++ /dev/null
@@ -1,120 +0,0 @@
-@echo off
-
-:: Check if the script is running as administrator
-net session >nul 2>&1
-if %errorlevel% neq 0 (
- echo Running as administrator...
- powershell -Command "Start-Process '%~f0' -Verb RunAs"
- exit /b
-)
-
-chcp 65001 > nul
-SETLOCAL ENABLEDELAYEDEXPANSION
-
-echo Script starting...
-
-:: Check if PowerShell is available
-where powershell >nul 2>&1
-IF %ERRORLEVEL% NEQ 0 (
- echo PowerShell is not available on this system. Please ensure it is installed and configured.
- echo Press any key to close...
- pause >nul
- exit /b 1
-)
-
-:: Check if Chocolatey is already installed
-:check_choco
-echo Checking if Chocolatey is installed...
-choco --version >nul 2>&1
-IF %ERRORLEVEL% EQU 0 (
- echo Chocolatey is already installed. Skipping installation.
- goto install_python
-) ELSE (
- echo Installing Chocolatey...
- @"%SystemRoot%\System32\WindowsPowerShell\v1.0\powershell.exe" -NoProfile -ExecutionPolicy Bypass -Command "[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" || (
- echo Error during Chocolatey installation.
- echo Press any key to close...
- pause >nul
- exit /b 1
- )
- echo Chocolatey installed successfully.
- where choco >nul 2>&1 || (
- echo Chocolatey is not recognized. Ensure it is correctly installed and PATH is configured.
- echo Press any key to close...
- pause >nul
- exit /b 1
- )
-)
-
-:: Check if Python is already installed
-:install_python
-echo Checking if Python is installed...
-python -V >nul 2>&1
-IF %ERRORLEVEL% EQU 0 (
- echo Python is already installed. Skipping installation.
-) ELSE (
- echo Installing Python...
- choco install python --confirm --params="'/NoStore'" --allow-downgrade || (
- echo Error during Python installation.
- echo Press any key to close...
- pause >nul
- exit /b 1
- )
- echo Python installed successfully.
- call python -V
- echo.
- echo Please restart the terminal to continue...
- pause >nul
- exit /b
-)
-
-:: Verify installations
-:verifica_installazioni
-echo Verifying installations...
-call choco --version
-call python -V
-
-echo All programs have been successfully installed and verified.
-
-:: Create a virtual environment .venv
-:create_venv
-echo Checking if the .venv virtual environment already exists...
-if exist .venv (
- echo The .venv virtual environment already exists. Skipping creation.
-) ELSE (
- echo Creating the .venv virtual environment...
- python -m venv .venv || (
- echo Error during virtual environment creation.
- echo Press any key to close...
- pause >nul
- exit /b 1
- )
- echo Virtual environment created successfully.
-)
-
-:: Activate the virtual environment and install requirements
-echo Installing requirements...
-echo Current directory: %CD%
-call .venv\Scripts\activate.bat
-pip install -r requirements.txt || (
- echo Error during requirements installation.
- echo Press any key to close...
- pause >nul
- exit /b 1
-)
-
-:: Run test_run.py
-echo Running test_run.py...
-call .venv\Scripts\python .\test_run.py || (
- echo Error during test_run.py execution.
- echo Press any key to close...
- pause >nul
- exit /b 1
-)
-
-echo End of script.
-
-echo Press any key to close...
-pause >nul
-
-ENDLOCAL
\ No newline at end of file
diff --git a/README.md b/README.md
index ad5fa194..bf26b4a3 100644
--- a/README.md
+++ b/README.md
@@ -1,14 +1,10 @@
## 📊 Project Status & Info
-
[](https://pypi.org/project/streamingcommunity)
-[](https://pypi.org/project/streamingcommunity)
-[](https://github.com/Arrowar/StreamingCommunity/blob/main/LICENSE)
-
-[](https://github.com/Arrowar/StreamingCommunity)
[](https://github.com/Arrowar/StreamingCommunity/commits)
[](https://github.com/Arrowar/StreamingCommunity/issues)
+[](https://github.com/Arrowar/StreamingCommunity/blob/main/LICENSE)
## 💝 Support the Project
@@ -60,6 +56,7 @@
- 🔧 [Manual domain configuration](#update-domains)
- 🐳 [Docker](#docker)
- 📝 [Telegram Usage](#telegram-usage)
+- 🧩 [Hook/Plugin System](#hookplugin-system)
@@ -179,6 +176,29 @@ client.start_download()
See [Torrent example](./Test/Download/TOR.py) for complete usage.
+
+🎞️ DASH Downloader
+
+```python
+license_url = "https://example.com/stream.mpd"
+mpd_url = "https://example.com/get_license"
+
+dash_process = DASH_Downloader(
+ cdm_device=get_wvd_path(),
+ license_url=license_url,
+ mpd_url=mpd_url,
+ output_path="output.mp4",
+)
+dash_process.parse_manifest()
+
+if dash_process.download_and_decrypt():
+ dash_process.finalize_output()
+
+dash_process.get_status()
+```
+
+
+
## Binary Location
@@ -309,40 +329,113 @@ python3 update.py
🌐 Domain Configuration Methods
-There are two ways to update the domains for the supported websites:
+There are two ways to manage the domains for the supported websites:
-### 1. Using Local Configuration
+### 1. Online Domain Fetching (Recommended)
-1. Create a `domains.json` file in the root directory of the project
+Set `fetch_domain_online` to `true` in your `config.json`:
-2. Add your domain configuration in the following format:
- ```json
- {
- "altadefinizione": {
- "domain": "si",
- "full_url": "https://altadefinizione.si/"
- },
- ...
+```json
+{
+ "DEFAULT": {
+ "fetch_domain_online": true
}
- ```
-
-3. Set `use_api` to `false` in the `DEFAULT` section of your `config.json`:
- ```json
- {
- "DEFAULT": {
- "use_api": false
- }
+}
+```
+
+This will:
+- Download the latest domains from the GitHub repository
+- Automatically save them to a local `domains.json` file
+- Ensure you always have the most up-to-date streaming site domains
+
+### 2. Local Domain Configuration
+
+Set `fetch_domain_online` to `false` to use a local configuration:
+
+```json
+{
+ "DEFAULT": {
+ "fetch_domain_online": false
}
- ```
+}
+```
+
+Then create a `domains.json` file in the root directory with your domain configuration:
-### 2. Using API (Legacy)
+```json
+{
+ "altadefinizione": {
+ "domain": "si",
+ "full_url": "https://altadefinizione.si/"
+ },
+ "streamingcommunity": {
+ "domain": "best",
+ "full_url": "https://streamingcommunity.best/"
+ }
+}
+```
+
+### 3. Automatic Fallback
+
+If online fetching fails, the script will automatically attempt to use the local `domains.json` file as a fallback, ensuring maximum reliability.
+
+#### 💡 Adding a New Site
+If you want to request a new site to be added to the repository, message us on the Discord server!
+
+
+
+## Hook/Plugin System
+
+
+🧩 Run custom scripts before/after the main execution
+
+Define pre/post hooks in `config.json` under the `HOOKS` section. Supported types:
+
+- **python**: runs `script.py` with the current Python interpreter
+- **bash/sh**: runs via `bash`/`sh` on macOS/Linux
+- **bat/cmd**: runs via `cmd /c` on Windows
+- Inline **command**: use `command` instead of `path`
-The API-based domain updates are currently deprecated. To use it anyway, set `use_api` to `true` in your `config.json` file.
+Sample configuration:
-Note: If `use_api` is set to `false` and no `domains.json` file is found, the script will raise an error.
+```json
+{
+ "HOOKS": {
+ "pre_run": [
+ {
+ "name": "prepare-env",
+ "type": "python",
+ "path": "scripts/prepare.py",
+ "args": ["--clean"],
+ "env": {"MY_FLAG": "1"},
+ "cwd": "~",
+ "os": ["linux", "darwin"],
+ "timeout": 60,
+ "enabled": true,
+ "continue_on_error": true
+ }
+ ],
+ "post_run": [
+ {
+ "name": "notify",
+ "type": "bash",
+ "command": "echo 'Download completed'"
+ }
+ ]
+ }
+}
+```
+
+Notes:
+
+- **os**: optional OS filter (`windows`, `darwin` (`darwin` is used for MacOS), `linux`).
+- **args**: list of arguments passed to the script.
+- **env**: additional environment variables.
+- **cwd**: working directory for the script; supports `~` and environment variables.
+- **continue_on_error**: if `false`, the app stops when the hook fails.
+- **timeout**: in seconds; when exceeded the hook fails.
-#### 💡 Adding a New Site to the Legacy API
-If you want to add a new site to the legacy API, just message me on the Discord server, and I'll add it!
+Hooks are executed automatically by `run.py` before (`pre_run`) and after (`post_run`) the main execution.
@@ -362,12 +455,9 @@ You can change some behaviors by tweaking the configuration file. The configurat
"DEFAULT": {
"debug": false,
"show_message": true,
- "clean_console": true,
"show_trending": true,
- "use_api": true,
- "not_close": false,
+ "fetch_domain_online": true,
"telegram_bot": false,
- "download_site_data": false,
"validate_github_config": false
}
}
@@ -375,13 +465,9 @@ You can change some behaviors by tweaking the configuration file. The configurat
- `debug`: Enables debug logging
- `show_message`: Displays informational messages
-- `clean_console`: Clears the console between operations
- `show_trending`: Shows trending content
-- `use_api`: Uses API for domain updates instead of local configuration
-- `not_close`: If set to true, keeps the program running after download is complete
- * Can be changed from terminal with `--not_close true/false`
+- `fetch_domain_online`: If true, downloads domains from GitHub repository and saves to local file; if false, uses existing local domains.json file
- `telegram_bot`: Enables Telegram bot integration
-- `download_site_data`: If set to false, disables automatic site data download
- `validate_github_config`: If set to false, disables validation and updating of configuration from GitHub
@@ -593,8 +679,7 @@ Note: Requires updated drivers and FFmpeg with hardware acceleration support.
```json
{
"M3U8_PARSER": {
- "force_resolution": "Best",
- "get_only_link": false
+ "force_resolution": "Best"
}
}
```
@@ -614,10 +699,14 @@ Note: Requires updated drivers and FFmpeg with hardware acceleration support.
- 240p (320x240)
- 144p (256x144)
-#### Link Options
-- `get_only_link`: Return M3U8 playlist/index URL instead of downloading
+> [!IMPORTANT]
+> If you enable `DEFAULT.expose_http_api` and start the application, it will run in a non-interactive
+> server-only mode: the terminal will still show messages, but you will not be able to
+> type commands or use the interactive prompts. Use the API endpoints to control the application when this
+> option is enabled (see `README_API.md`). Press Ctrl+C to stop the server.
+
# Global Search
@@ -663,6 +752,18 @@ The Global Search can be configured from the command line:
# Examples of terminal usage
```bash
+# Run a specific site by name with a search term
+python test_run.py --site streamingcommunity --search "interstellar"
+
+# Run a specific site by numeric index (as shown in -h help)
+python test_run.py --site 0 --search "interstellar"
+
+# Auto-download the first result from search (requires --site and --search)
+python test_run.py --site streamingcommunity --search "interstellar" --auto-first
+
+# Show help (includes available sites by name and by index)
+python test_run.py -h
+
# Change video and audio workers
python test_run.py --default_video_worker 8 --default_audio_worker 8
@@ -672,7 +773,7 @@ python test_run.py --specific_list_audio ita,eng --specific_list_subtitles eng,s
# Keep console open after download
python test_run.py --not_close true
-# Use global searchAdd commentMore actions
+# Use global search
python test_run.py --global -s "cars"
# Select specific category
@@ -680,6 +781,9 @@ python test_run.py --category 1 # Search in anime category
python test_run.py --category 2 # Search in movies & series
python test_run.py --category 3 # Search in series
python test_run.py --category 4 # Search in torrent category
+
+# If installed via pip, you can also use the entrypoint directly
+streamingcommunity --site streamingcommunity --search "interstellar" --auto-first
```
# Docker
@@ -799,4 +903,4 @@ API non ufficiale per accedere ai contenuti del sito italiano StreamingCommunity
This software is provided "as is", without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software.
-> **Note:** DASH downloads require a valid L3 CDM (Content Decryption Module) to proceed. This project does not provide, include, or facilitate obtaining any CDM. Users are responsible for ensuring compliance with all applicable laws and requirements regarding DRM and decryption modules.
+> **Note:** DASH downloads require a valid L3 CDM (Content Decryption Module) to proceed. This project does not provide, include, or facilitate obtaining any CDM. Users are responsible for ensuring compliance with all applicable laws and requirements regarding DRM and decryption modules.
\ No newline at end of file
diff --git a/README_API.md b/README_API.md
new file mode 100644
index 00000000..f0dd7878
--- /dev/null
+++ b/README_API.md
@@ -0,0 +1,386 @@
+# StreamingCommunity — API HTTP (FastAPI)
+
+Questo documento spiega come usare l'API HTTP esposta. È pensato per chi vuole integrare [StreamingCommunity](https://github.com/Arrowar/StreamingCommunity) in un'app, script o interfaccia grafica.
+
+Troverai esempi pratici (curl e Python), risposte esempio, note di configurazione e suggerimenti per risolvere errori comuni.
+
+> [!NOTE]
+> L'API deve essere abilitata nel file `config.json` impostando `DEFAULT.expose_http_api` a `true`. La documentazione interattiva è disponibile su `/docs` quando il server è in esecuzione.
+
+> [!IMPORTANT]
+> Quando `DEFAULT.expose_http_api` è impostato su `true` l'applicazione verrà eseguita in modalità non-interattiva _(server-only mode)_: la console mostrerà comunque le informazioni come di consueto ma l'input della tastiera e tutte le attività interattive saranno disabilitate. Potrai inviare comandi esclusivamente dagli endpoint che l'API offre.
+>
+> L'unica interazione disponibile è Ctrl+C per fermare l'applicazione.
+
+----
+
+## Concetti chiave
+
+- `module`: il provider (cartella in `StreamingCommunity/Api/Site/`). Esempio: `streamingcommunity`, `altadefinizione`.
+- `action`: l'operazione da eseguire su un module (es. `download_film`, `download_series` oppure una funzione custom esposta).
+- `item`: oggetto risultato di una ricerca (dizionario). Contiene i campi che il provider fornisce (`id`, `name`, `url`, `type`, `image`, ecc.).
+- `selections`: parametri opzionali per i download di serie (es. `{"season":"2","episode":"1-5"}`).
+- `job`: unità di lavoro messa in coda; gestita dal `JobManager` e processata in ordine FIFO, una alla volta.
+
+----
+
+## Endpoints principali
+
+- `GET /providers`: elenca i provider caricati.
+- `POST /search`: esegue una ricerca.
+- `POST /module_call`: invoca una funzione di un module (sync o background).
+- `POST /jobs`: crea un job per eseguire un'azione (download).
+- `GET /jobs`: mostra la lista dei job attivi.
+- `GET /jobs/{job_id}`: dettagli e stato di un job.
+
+Tutte le chiamate possono essere protette con Basic Auth se sono state inserite `http_api_username` e `http_api_password` in `config.json`.
+
+> [!TIP]
+> Dopo aver letto questo documento ti consiglio di mettere le mani in pasta e provare l'API in http://localhost:8080/docs.
+
+----
+
+## Config utili
+
+- `DEFAULT.expose_http_api`: `true|false` abilita/disabilita l'API.
+- `DEFAULT.http_api_port`: porta su cui esporre l'API (es. `8080`).
+- `DEFAULT.http_api_username`, `DEFAULT.http_api_password`: credenziali Basic Auth (opzionali).
+- `DEFAULT.http_api_provider_timeout`: tempo in secondi per chiamate verso i provider (default: 20).
+
+> [!IMPORTANT]
+> Prima di usare i download assicurati che i provider da cui vuoi scaricare non richiedano credenziali o configurazioni addizionali. Alcuni provider possono restituire errori se non configurati correttamente.
+
+> [!NOTE]
+> Se non hai modificato le impostazioni dell'API, all'avvio sarà esposta all'indirizzo http://localhost:8080/.
+
+----
+
+## 1) Elenco provider
+
+#### Richiesta curl
+
+```
+curl http://127.0.0.1:8080/providers
+```
+
+##### Risposta (esempio)
+
+```json
+{
+ "providers": [
+ {"name":"streamingcommunity","indice":0,"use_for":"Film_&_Serie"},
+ {"name":"altadefinizione","indice":2,"use_for":"Film_&_Serie"}
+ ]
+}
+```
+
+----
+
+## 2) Ricerca contenuti
+
+Esempi di richieste:
+
+#### curl
+
+```
+curl -X POST http://127.0.0.1:8080/search \
+ -H 'Content-Type: application/json' \
+ -d '{"provider":"all","query":"Matrix"}'
+```
+
+#### Python (httpx)
+
+```py
+import httpx
+client = httpx.Client(timeout=60.0)
+r = client.post('http://127.0.0.1:8080/search', json={'provider':'all','query':'Matrix'})
+print(r.json())
+```
+
+##### Risposta (esempio semplificato)
+
+```json
+{
+ "query": "Matrix",
+ "results": {
+ "streamingcommunity": [ {"id":1994,"name":"Matrix","type":"movie","url":"..."} ],
+ "altadefinizione": [ {"name":"Matrix Reloaded","type":"movie","url":"..."} ],
+ "crunchyroll": {"error":{"type":"ValueError","message":"Please enter a correct 'etp_rt' value"}}
+ }
+}
+```
+
+Note
+
+- Se un provider fallisce viene restituito un oggetto `error` per quel provider; gli altri provider continuano a rispondere.
+- Usa i risultati di `/search` come `item` per creare job di download.
+
+----
+
+## 3) Chiamare funzioni del module (module_call)
+
+Usa `module_call` per invocare funzioni esposte o metodi noti del provider.
+
+##### Richiesta sincrona
+
+```
+POST /module_call
+{
+ "module": "streamingcommunity", # Provider 'streamingcommunity'
+ "function": "search", # Nome funzione da eseguire
+ "kwargs": {"string_to_search":"Matrix","get_onlyDatabase":true},
+ "background": false
+}
+```
+
+##### Risposta (sync success)
+
+```json
+{ "result": {...} }
+```
+
+##### Schedulazione come job
+
+```
+POST /module_call
+{ ... , "background": true }
+```
+
+##### Risposta
+
+```json
+{ "status": "scheduled", "job_id": 5 }
+```
+
+----
+
+## 4) Job: creare e controllare un download
+
+### Creare job
+
+##### Esempio di download (film):
+
+```
+POST /jobs
+{
+ "module":"streamingcommunity",
+ "action":"download_film",
+ "item": { /* item preso da /search */ }
+}
+```
+
+##### Risposta
+
+```json
+{ "job_id": 12 }
+```
+
+##### Controllare lo stato
+
+```
+GET /jobs/12
+```
+
+### Tipi di risposte (esempi)
+
+queued
+
+```json
+{ "id":12, "status":"queued", "created_at": 1690000000.0 }
+```
+
+running
+
+```json
+{ "id":12, "status":"running", "started_at":1690000005.0 }
+```
+
+finished
+
+```json
+{ "id":12, "status":"finished", "finished_at":1690000100.0, "result": "..." }
+```
+
+failed
+
+```json
+{ "id":12, "status":"failed", "error":"ValueError: missing stream url" }
+```
+
+### Selezioni (`selections`)
+
+Come determinare la struttura effettiva di `selections`:
+
+- **Controlla il provider**: la struttura valida dipende dal provider (cartella in `StreamingCommunity/Api/Site/`). Molti provider usano le chiavi `season` e `episode`.
+- **Guarda il codice**: cerca dove il provider legge `selections` (es. `selections.get('season')`, `selections.get('episode')`).
+- **Esempi pratici**: usa una richiesta `/search` o leggi il codice del provider per capire quali chiavi accetta.
+
+Tipicamente per le serie il formato usato è:
+
+```json
+"selections": {
+ "season": "1",
+ "episode": "1-5"
+}
+```
+
+##### Creare un job per scaricare una serie (stagione 1, episodi 1-5)
+
+```
+POST /jobs
+{
+ "module": "streamingcommunity",
+ "action": "download_series",
+ "item": { /* item preso da /search, es. {"id":123, "name":"My Show", "url":"..."} */ },
+ "selections": { "season": "1", "episode": "1-5" }
+}
+```
+
+##### Risposta (job creato con successo)
+
+```json
+{ "job_id": 42 }
+```
+
+Poi `GET /jobs/42` tipicamente restituisce qualcosa del genere (il campo `payload` mantiene le informazioni inviate):
+
+```json
+{
+ "id": 42,
+ "progress": 0,
+ "status": "queued",
+ "created_at": 1690000000.0,
+ "payload": {
+ "module": "streamingcommunity",
+ "action": "download_series",
+ "item": {"id":123,"name":"My Show","url":"..."},
+ "selections": {"season":"1","episode":"1-5"}
+ }
+}
+```
+
+Se un provider richiede parametri diversi, il job conterrà quelle chiavi; per dubbi controlla il file `StreamingCommunity/Api/Site//__init__.py`.
+
+
+> [!NOTE]
+> Se farai più richieste download: i job sono eseguiti uno alla volta (sequenziale). Pollare lo stato con `GET /jobs/{id}` è la pratica raccomandata.
+
+> [!IMPORTANT]
+> L'oggetto `job` che restituisce `GET /jobs` e `GET /jobs/{id}` contiene il campo `progress` (0..100). Questo valore rappresenta il progresso **totale** del _job_ (dall'inizio del download fino alla fase di unione audio/video). Nei providers si possono aggiustare i valori percentuali rappresentati durante il processo utilizzando `JOB_MANAGER.update_progress(percent)` dal codice di ognuno di essi.
+
+----
+
+## 5) Esporre una funzione di provider (expose_api)
+
+Obiettivo: esporre una funzione custom nel module _aka provider (es. `/Api/Site/raiplay/`)_ e poterla chiamare via `module_call` o come job.
+
+1. Apri il file del provider (es. `StreamingCommunity/Api/Site/mymodule/__init__.py` o altro file in quella cartella).
+2. Aggiungi:
+
+```py
+from StreamingCommunity.Api.http_api import expose_api
+
+@expose_api('my_custom')
+def my_custom(item, selections=None, **kwargs):
+ # item è un oggetto MediaItem (puoi leggere item.name, item.url ecc.)
+ # fai il lavoro e ritorna un risultato serializzabile
+ # Durante operazioni lunghe puoi aggiornare il progresso totale del job
+ # (0..100) così il client che sta pollando `/jobs/{id}` ottiene feedback.
+ from StreamingCommunity.Api.http_api import JOB_MANAGER
+ JOB_MANAGER.update_progress(5)
+ # ... avvia il download ...
+ JOB_MANAGER.update_progress(60)
+ # ... sincronizza audio/video ...
+ JOB_MANAGER.update_progress(95)
+ return {'status':'ok','title': getattr(item,'name',None)}
+```
+
+3. Riavvia lo script per ricaricare i moduli.
+4. Test via API (sync):
+
+```
+POST /module_call
+{
+ "module":"mymodule",
+ "function":"my_custom",
+ "kwargs": {"item": {/* item JSON preso da /search */}},
+ "background": false
+}
+```
+
+Risposta (esempio)
+
+```json
+{ "result": {"status":"ok","title":"Matrix"} }
+```
+
+> [!IMPORTANT]
+> Seleziona nomi univoci per `expose_api('name')` per evitare conflitti tra moduli.
+
+----
+
+## Errori comuni e come risolverli
+
+#### 1. `401 Unauthorized` credenziali mancanti o sbagliate
+Impostare header `Authorization: Basic ` o rimuovere le credenziali da `config.json`.
+
+#### 2. `Timeout` su `/search` per un provider
+Aumentare `DEFAULT.http_api_provider_timeout` o correggere il provider che richiede input.
+
+#### 3. `job failed`
+Leggere `GET /jobs/{id}` campo `error` per capire cosa non ha funzionato (es. mancanza di campi in `item`, login richiesto, ecc.).
+
+#### 4. `module not found`
+Controllare `GET /providers` per il nome corretto del module.
+
+----
+
+## Buone pratiche di integrazione
+
+- Usa i risultati di `/search` come `item` senza modificarli salvo campo specifici necessari.
+- Per serie, passare `selections` chiare (`season`/`episode` o range `1-5`).
+- Non fare affidamento su chiamate sincrone per download lunghi: usa `POST /jobs` e polla lo stato.
+- Proteggi l'API in produzione con Basic Auth o con altro strato di autenticazione più forte.
+
+----
+
+## Esempi Python rapidi (httpx)
+
+```py
+import httpx
+client = httpx.Client(timeout=60.0)
+
+# search
+r = client.post('http://127.0.0.1:8080/search', json={'provider':'all','query':'Matrix'})
+print(r.json())
+
+# create job (supponendo item sia ottenuto dalla search)
+item = r.json()['results']['streamingcommunity'][0]
+job = client.post('http://127.0.0.1:8080/jobs', json={'module':'streamingcommunity','action':'download_film','item':item})
+print(job.json())
+
+# poll
+job_id = job.json()['job_id']
+while True:
+ s = client.get(f'http://127.0.0.1:8080/jobs/{job_id}').json()
+ print(s['status'])
+ if s['status'] in ('finished','failed'):
+ break
+ import time; time.sleep(1)
+```
+
+----
+
+## Manca qualcosa?
+
+Se trovi un provider che necessita modifiche per l'uso via API (es. firma funzione non standard, input interattivo), apri una issue o invia una PR con la correzione.
+
+Se pensi che questo documento abbia bisogno di informazioni aggiornate, corrette od organizzate diversamente, invia una PR.
+
+Se invece vuoi contribuire in qualsiasi modo: sei il benvenuto.
+
+## TODO (in ordine di importanza)
+- [ ] Migliorare il progress system, attualmente è implementato in modo molto spartano.
+- [ ] Rendere più efficiente la gestione degli errori e validation. Attualmente molto codice è ripetuto.
+- [ ] Implementare SSE invece di utilizzare Job Polling. Più efficiente, più pulito, ma più complesso da gestire (non solo per chi deve implementarlo, ma anche per chi consuma l'API).
+
diff --git a/StreamingCommunity/Api/Player/Helper/Vixcloud/util.py b/StreamingCommunity/Api/Player/Helper/Vixcloud/util.py
index d5f55de8..b7749ea8 100644
--- a/StreamingCommunity/Api/Player/Helper/Vixcloud/util.py
+++ b/StreamingCommunity/Api/Player/Helper/Vixcloud/util.py
@@ -1,6 +1,6 @@
# 23.11.24
-from typing import Dict, Any, List, Union, List, Optional
+from typing import Dict, Any, List, Optional
class Episode:
@@ -12,6 +12,7 @@ def __init__(self, data: Dict[str, Any]):
self.name: str = data.get('name', '')
self.duration: int = data.get('duration', 0)
self.url: str = data.get('url', '')
+ self.mpd_id: str = data.get('mpd_id', '')
def __str__(self):
return f"Episode(id={self.id}, number={self.number}, name='{self.name}', duration={self.duration} sec)"
diff --git a/StreamingCommunity/Api/Player/hdplayer.py b/StreamingCommunity/Api/Player/hdplayer.py
index ba6bc870..3bb34888 100644
--- a/StreamingCommunity/Api/Player/hdplayer.py
+++ b/StreamingCommunity/Api/Player/hdplayer.py
@@ -3,12 +3,12 @@
import re
# External library
-import httpx
from bs4 import BeautifulSoup
# Internal utilities
from StreamingCommunity.Util.headers import get_userAgent
+from StreamingCommunity.Util.http_client import create_client
from StreamingCommunity.Util.config_json import config_manager
@@ -19,7 +19,7 @@
class VideoSource:
def __init__(self):
- self.client = httpx.Client(headers={'user-agent': get_userAgent()}, timeout=MAX_TIMEOUT, verify=REQUEST_VERIFY)
+ self.client = create_client(headers={'user-agent': get_userAgent()})
def extractLinkHdPlayer(self, response):
"""Extract iframe source from the page."""
diff --git a/StreamingCommunity/Api/Player/sweetpixel.py b/StreamingCommunity/Api/Player/sweetpixel.py
index 0954c189..c887ce70 100644
--- a/StreamingCommunity/Api/Player/sweetpixel.py
+++ b/StreamingCommunity/Api/Player/sweetpixel.py
@@ -4,12 +4,12 @@
# External libraries
-import httpx
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
+from StreamingCommunity.Util.http_client import create_client
# Variable
@@ -17,21 +17,18 @@
REQUEST_VERIFY = config_manager.get_bool('REQUESTS', 'verify')
class VideoSource:
- def __init__(self, full_url, episode_data, session_id, csrf_token):
+ def __init__(self, site_url, episode_data, session_id, csrf_token):
"""Initialize the VideoSource with session details, episode data, and URL."""
self.session_id = session_id
self.csrf_token = csrf_token
self.episode_data = episode_data
self.number = episode_data['number']
- self.link = episode_data['link']
+ self.link = site_url + episode_data['link']
# Create an HTTP client with session cookies, headers, and base URL.
- self.client = httpx.Client(
+ self.client = create_client(
cookies={"sessionId": session_id},
- headers={"User-Agent": get_userAgent(), "csrf-token": csrf_token},
- base_url=full_url,
- timeout=MAX_TIMEOUT,
- verify=REQUEST_VERIFY
+ headers={"User-Agent": get_userAgent(), "csrf-token": csrf_token}
)
def get_playlist(self):
diff --git a/StreamingCommunity/Api/Site/altadefinizione/__init__.py b/StreamingCommunity/Api/Site/altadefinizione/__init__.py
index cdbc08d8..9aa1aab1 100644
--- a/StreamingCommunity/Api/Site/altadefinizione/__init__.py
+++ b/StreamingCommunity/Api/Site/altadefinizione/__init__.py
@@ -43,7 +43,7 @@ def get_user_input(string_to_search: str = None):
bot = get_bot_instance()
string_to_search = bot.ask(
"key_search",
- f"Enter the search term\nor type 'back' to return to the menu: ",
+ "Enter the search term\nor type 'back' to return to the menu: ",
None
)
@@ -117,7 +117,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
if site_constant.TELEGRAM_BOT:
- bot.send_message(f"No results found, please try again", None)
+ bot.send_message("No results found, please try again", None)
# If no results are found, ask again
string_to_search = get_user_input()
diff --git a/StreamingCommunity/Api/Site/altadefinizione/film.py b/StreamingCommunity/Api/Site/altadefinizione/film.py
index b3eefb30..c8c58e09 100644
--- a/StreamingCommunity/Api/Site/altadefinizione/film.py
+++ b/StreamingCommunity/Api/Site/altadefinizione/film.py
@@ -5,7 +5,6 @@
# External library
-import httpx
from bs4 import BeautifulSoup
from rich.console import Console
@@ -13,6 +12,7 @@
# Internal utilities
from StreamingCommunity.Util.os import os_manager
from StreamingCommunity.Util.headers import get_headers
+from StreamingCommunity.Util.http_client import create_client
from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance, TelegramSession
@@ -21,6 +21,7 @@
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
+from StreamingCommunity.Api.Template.Util import assert_item_is_movie
# Player
@@ -43,6 +44,10 @@ def download_film(select_title: MediaItem) -> str:
Return:
- str: output path if successful, otherwise None
"""
+ if not assert_item_is_movie(select_title):
+ console.print("[red]Selected item is not a film.")
+ return None
+
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
bot.send_message(f"Download in corso:\n{select_title.name}", None)
@@ -60,7 +65,7 @@ def download_film(select_title: MediaItem) -> str:
# Extract mostraguarda URL
try:
- response = httpx.get(select_title.url, headers=get_headers(), timeout=10)
+ response = create_client(headers=get_headers()).get(select_title.url)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
@@ -74,7 +79,7 @@ def download_film(select_title: MediaItem) -> str:
# Extract supervideo URL
supervideo_url = None
try:
- response = httpx.get(mostraguarda, headers=get_headers(), timeout=10)
+ response = create_client(headers=get_headers()).get(mostraguarda)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
@@ -96,7 +101,7 @@ def download_film(select_title: MediaItem) -> str:
mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))
# Download the film using the m3u8 playlist, and output filename
- r_proc = HLS_Downloader(
+ hls_process = HLS_Downloader(
m3u8_url=master_playlist,
output_path=os.path.join(mp4_path, title_name)
).start()
@@ -108,8 +113,10 @@ def download_film(select_title: MediaItem) -> str:
if script_id != "unknown":
TelegramSession.deleteScriptId(script_id)
- if r_proc['error'] is not None:
- try: os.remove(r_proc['path'])
- except: pass
+ if hls_process['error'] is not None:
+ try:
+ os.remove(hls_process['path'])
+ except Exception:
+ pass
- return r_proc['path']
\ No newline at end of file
+ return hls_process['path']
diff --git a/StreamingCommunity/Api/Site/altadefinizione/series.py b/StreamingCommunity/Api/Site/altadefinizione/series.py
index 2d310b61..3945e234 100644
--- a/StreamingCommunity/Api/Site/altadefinizione/series.py
+++ b/StreamingCommunity/Api/Site/altadefinizione/series.py
@@ -18,11 +18,12 @@
from .util.ScrapeSerie import GetSerieInfo
from StreamingCommunity.Api.Template.Util import (
manage_selection,
- map_episode_title,
+ map_episode_title,
validate_selection,
validate_episode_selection,
display_episodes_list
)
+from StreamingCommunity.Api.http_api import JOB_MANAGER
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
@@ -54,7 +55,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Get episode information
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
- console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [bold magenta]{obj_episode.name}[/bold magenta] ([cyan]S{index_season_selected}E{index_episode_selected}[/cyan]) \n")
+ console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{scrape_serie.series_name}[/cyan] \\ [bold magenta]{obj_episode.name}[/bold magenta] ([cyan]S{index_season_selected}E{index_episode_selected}[/cyan]) \n")
# Telegram integration
if site_constant.TELEGRAM_BOT:
@@ -81,16 +82,18 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
master_playlist = video_source.get_playlist()
# Download the episode
- r_proc = HLS_Downloader(
+ hls_process = HLS_Downloader(
m3u8_url=master_playlist,
output_path=os.path.join(mp4_path, mp4_name)
).start()
- if r_proc['error'] is not None:
- try: os.remove(r_proc['path'])
- except: pass
+ if hls_process['error'] is not None:
+ try:
+ os.remove(hls_process['path'])
+ except Exception:
+ pass
- return r_proc['path'], r_proc['stopped']
+ return hls_process['path'], hls_process['stopped']
def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, download_all: bool = False, episode_selection: str = None) -> None:
@@ -122,6 +125,8 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, dow
console.print(f"\n[cyan]Using provided episode selection: [yellow]{episode_selection}")
else:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No episode selection provided and cannot prompt in non-interactive mode')
last_command = display_episodes_list(episodes)
# Prompt user for episode selection
@@ -155,8 +160,16 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
# Prompt user for season selection and download episodes
console.print(f"\n[green]Seasons found: [red]{seasons_count}")
+ if seasons_count == 0:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No seasons found for this title (non-interactive mode)')
+ console.print('[red]No seasons found for this title')
+ return
+
# If season_selection is provided, use it instead of asking for input
if season_selection is None:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No season selection provided and cannot prompt in non-interactive mode')
if site_constant.TELEGRAM_BOT:
console.print("\n[cyan]Insert season number [yellow](e.g., 1), [red]* [cyan]to download all seasons, "
"[yellow](e.g., 1-2) [cyan]for a range of seasons, or [yellow](e.g., 3-*) [cyan]to download from a specific season to the end")
@@ -196,9 +209,9 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
download_episode(i_season, scrape_serie, download_all=False, episode_selection=episode_selection)
if site_constant.TELEGRAM_BOT:
- bot.send_message(f"Finito di scaricare tutte le serie e episodi", None)
+ bot.send_message("Finito di scaricare tutte le serie e episodi", None)
# Get script_id
script_id = TelegramSession.get_session()
if script_id != "unknown":
- TelegramSession.deleteScriptId(script_id)
\ No newline at end of file
+ TelegramSession.deleteScriptId(script_id)
diff --git a/StreamingCommunity/Api/Site/altadefinizione/site.py b/StreamingCommunity/Api/Site/altadefinizione/site.py
index 9356f907..e4d60a0e 100644
--- a/StreamingCommunity/Api/Site/altadefinizione/site.py
+++ b/StreamingCommunity/Api/Site/altadefinizione/site.py
@@ -93,7 +93,7 @@ def title_search(query: str) -> int:
if site_constant.TELEGRAM_BOT:
if choices:
- bot.send_message(f"Lista dei risultati:", choices)
+ bot.send_message("Lista dei risultati:", choices)
# Return the number of titles found
return media_search_manager.get_length()
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Site/animeunity/__init__.py b/StreamingCommunity/Api/Site/animeunity/__init__.py
index 13f78778..d00cf5d6 100644
--- a/StreamingCommunity/Api/Site/animeunity/__init__.py
+++ b/StreamingCommunity/Api/Site/animeunity/__init__.py
@@ -43,7 +43,7 @@ def get_user_input(string_to_search: str = None):
bot = get_bot_instance()
string_to_search = bot.ask(
"key_search",
- f"Enter the search term\nor type 'back' to return to the menu: ",
+ "Enter the search term\nor type 'back' to return to the menu: ",
None
)
@@ -116,7 +116,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
if site_constant.TELEGRAM_BOT:
- bot.send_message(f"No results found, please try again", None)
+ bot.send_message("No results found, please try again", None)
# If no results are found, ask again
string_to_search = get_user_input()
diff --git a/StreamingCommunity/Api/Site/animeunity/film.py b/StreamingCommunity/Api/Site/animeunity/film.py
index e9107759..fa9c2669 100644
--- a/StreamingCommunity/Api/Site/animeunity/film.py
+++ b/StreamingCommunity/Api/Site/animeunity/film.py
@@ -9,6 +9,7 @@
from .util.ScrapeSerie import ScrapeSerieAnime
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
+from StreamingCommunity.Api.Template.Util import assert_item_is_movie
# Player
@@ -28,6 +29,10 @@ def download_film(select_title: MediaItem):
- title_name (str): The title of the film.
"""
+ if not assert_item_is_movie(select_title):
+ console.print("[red]Selected item is not a film.")
+ return None
+
# Init class
scrape_serie = ScrapeSerieAnime(site_constant.FULL_URL)
video_source = VideoSourceAnime(site_constant.FULL_URL)
@@ -37,4 +42,4 @@ def download_film(select_title: MediaItem):
scrape_serie.is_series = False
# Start download
- download_episode(0, scrape_serie, video_source)
\ No newline at end of file
+ download_episode(0, scrape_serie, video_source)
diff --git a/StreamingCommunity/Api/Site/animeunity/serie.py b/StreamingCommunity/Api/Site/animeunity/serie.py
index 2dcf266f..5c24de30 100644
--- a/StreamingCommunity/Api/Site/animeunity/serie.py
+++ b/StreamingCommunity/Api/Site/animeunity/serie.py
@@ -19,6 +19,7 @@
from .util.ScrapeSerie import ScrapeSerieAnime
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Util import manage_selection, dynamic_format_number
+from StreamingCommunity.Api.http_api import JOB_MANAGER
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
@@ -105,11 +106,19 @@ def download_series(select_title: MediaItem, season_selection: str = None, episo
# Get episode information
episoded_count = scrape_serie.get_count_episodes()
console.print(f"[green]Episodes count:[/green] [red]{episoded_count}[/red]")
+
+ if episoded_count == 0:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No episodes found for this title (non-interactive mode)')
+ console.print('[red]No episodes found for this title')
+ return
# Telegram bot integration
if episode_selection is None:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No episode selection provided and cannot prompt in non-interactive mode')
if site_constant.TELEGRAM_BOT:
- console.print(f"\n[cyan]Insert media [red]index [yellow]or [red]* [cyan]to download all media [yellow]or [red]1-2 [cyan]or [red]3-* [cyan]for a range of media")
+ console.print("\n[cyan]Insert media [red]index [yellow]or [red]* [cyan]to download all media [yellow]or [red]1-2 [cyan]or [red]3-* [cyan]for a range of media")
bot.send_message(f"Episodi trovati: {episoded_count}", None)
last_command = bot.ask(
@@ -145,9 +154,9 @@ def download_series(select_title: MediaItem, season_selection: str = None, episo
_, kill_handler = download_episode(i_episode-1, scrape_serie, video_source)
if site_constant.TELEGRAM_BOT:
- bot.send_message(f"Finito di scaricare tutte le serie e episodi", None)
+ bot.send_message("Finito di scaricare tutte le serie e episodi", None)
# Get script_id
script_id = TelegramSession.get_session()
if script_id != "unknown":
- TelegramSession.deleteScriptId(script_id)
\ No newline at end of file
+ TelegramSession.deleteScriptId(script_id)
diff --git a/StreamingCommunity/Api/Site/animeworld/film.py b/StreamingCommunity/Api/Site/animeworld/film.py
index bb06ad26..dc414073 100644
--- a/StreamingCommunity/Api/Site/animeworld/film.py
+++ b/StreamingCommunity/Api/Site/animeworld/film.py
@@ -15,6 +15,7 @@
from .util.ScrapeSerie import ScrapSerie
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
+from StreamingCommunity.Api.Template.Util import assert_item_is_movie
# Player
@@ -34,6 +35,10 @@ def download_film(select_title: MediaItem):
- id_film (int): The ID of the film.
- title_name (str): The title of the film.
"""
+ if not assert_item_is_movie(select_title):
+ console.print("[red]Selected item is not a film.")
+ return None, False
+
start_message()
scrape_serie = ScrapSerie(select_title.url, site_constant.FULL_URL)
@@ -60,4 +65,4 @@ def download_film(select_title: MediaItem):
path=os.path.join(mp4_path, mp4_name)
)
- return path, kill_handler
\ No newline at end of file
+ return path, kill_handler
diff --git a/StreamingCommunity/Api/Site/animeworld/serie.py b/StreamingCommunity/Api/Site/animeworld/serie.py
index dcbe70a7..e9ec0cff 100644
--- a/StreamingCommunity/Api/Site/animeworld/serie.py
+++ b/StreamingCommunity/Api/Site/animeworld/serie.py
@@ -17,7 +17,8 @@
# Logic class
from .util.ScrapeSerie import ScrapSerie
from StreamingCommunity.Api.Template.config_loader import site_constant
-from StreamingCommunity.Api.Template.Util import manage_selection, dynamic_format_number
+from StreamingCommunity.Api.Template.Util import manage_selection, dynamic_format_number, assert_interactive_allowed
+from StreamingCommunity.Api.http_api import JOB_MANAGER
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
@@ -86,8 +87,16 @@ def download_series(select_title: MediaItem, episode_selection: str = None):
# Get episode count
console.print(f"[green]Episodes found:[/green] [red]{len(episodes)}[/red]")
+ if len(episodes) == 0:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No episodes found for this title (non-interactive mode)')
+ console.print('[red]No episodes found for this title')
+ return
+
# Display episodes list and get user selection
if episode_selection is None:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No episode selection provided and cannot prompt in non-interactive mode')
last_command = msg.ask("\n[cyan]Insert media [red]index [yellow]or [red]* [cyan]to download all media [yellow]or [red]1-2 [cyan]or [red]3-* [cyan]for a range of media")
else:
last_command = episode_selection
@@ -106,4 +115,4 @@ def download_series(select_title: MediaItem, episode_selection: str = None):
for i_episode in list_episode_select:
if kill_handler:
break
- _, kill_handler = download_episode(i_episode-1, scrape_serie)
\ No newline at end of file
+ _, kill_handler = download_episode(i_episode-1, scrape_serie)
diff --git a/StreamingCommunity/Api/Site/animeworld/site.py b/StreamingCommunity/Api/Site/animeworld/site.py
index 79d6aa8e..7dbe5e60 100644
--- a/StreamingCommunity/Api/Site/animeworld/site.py
+++ b/StreamingCommunity/Api/Site/animeworld/site.py
@@ -11,6 +11,7 @@
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent, get_headers
+from StreamingCommunity.Util.http_client import create_client
from StreamingCommunity.Util.table import TVShowManager
@@ -31,11 +32,8 @@ def get_session_and_csrf() -> dict:
Get the session ID and CSRF token from the website's cookies and HTML meta data.
"""
# Send an initial GET request to the website
- response = httpx.get(
- site_constant.FULL_URL,
- headers=get_headers(),
- verify=False
- )
+ client = create_client(headers=get_headers())
+ response = client.get(site_constant.FULL_URL)
# Extract the sessionId from the cookies
session_id = response.cookies.get('sessionId')
diff --git a/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py b/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py
index d46f6352..d859f822 100644
--- a/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py
+++ b/StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py
@@ -2,13 +2,14 @@
import logging
+
# External libraries
-import httpx
from bs4 import BeautifulSoup
# Internal utilities
from StreamingCommunity.Util.headers import get_userAgent
+from StreamingCommunity.Util.http_client import create_client
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.os import os_manager
@@ -23,24 +24,21 @@
class ScrapSerie:
- def __init__(self, url, full_url):
+ def __init__(self, url, site_url):
"""Initialize the ScrapSerie object with the provided URL and setup the HTTP client."""
self.url = url
- self.link = httpx.URL(url).path
self.session_id, self.csrf_token = get_session_and_csrf()
- self.client = httpx.Client(
+ self.client = create_client(
cookies={"sessionId": self.session_id},
- headers={"User-Agent": get_userAgent(), "csrf-token": self.csrf_token},
- base_url=full_url,
- verify=False
+ headers={"User-Agent": get_userAgent(), "csrf-token": self.csrf_token}
)
try:
- self.response = self.client.get(self.link, timeout=max_timeout, follow_redirects=True)
+ self.response = self.client.get(self.url, timeout=max_timeout, follow_redirects=True)
self.response.raise_for_status()
- except:
- raise Exception(f"Failed to retrieve anime page.")
+ except Exception as e:
+ raise Exception(f"Failed to retrieve anime page: {str(e)}")
def get_name(self):
"""Extract and return the name of the anime series."""
diff --git a/StreamingCommunity/Api/Site/cb01new/film.py b/StreamingCommunity/Api/Site/cb01new/film.py
index 38779702..5b4a0635 100644
--- a/StreamingCommunity/Api/Site/cb01new/film.py
+++ b/StreamingCommunity/Api/Site/cb01new/film.py
@@ -15,6 +15,7 @@
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
+from StreamingCommunity.Api.Template.Util import assert_item_is_movie
# Player
@@ -36,6 +37,10 @@ def download_film(select_title: MediaItem) -> str:
Return:
- str: output path
"""
+ if not assert_item_is_movie(select_title):
+ console.print("[red]Selected item is not a film.")
+ return None
+
start_message()
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
@@ -50,13 +55,15 @@ def download_film(select_title: MediaItem) -> str:
master_playlist = video_source.get_playlist()
# Download the film using the m3u8 playlist, and output filename
- r_proc = HLS_Downloader(
+ hls_process = HLS_Downloader(
m3u8_url=master_playlist,
output_path=os.path.join(mp4_path, title_name)
).start()
- if r_proc['error'] is not None:
- try: os.remove(r_proc['path'])
- except: pass
-
- return r_proc['path']
\ No newline at end of file
+ if hls_process['error'] is not None:
+ try:
+ os.remove(hls_process['path'])
+ except Exception:
+ pass
+
+ return hls_process['path']
diff --git a/StreamingCommunity/Api/Site/crunchyroll/__init__.py b/StreamingCommunity/Api/Site/crunchyroll/__init__.py
index 1a53355a..8dd47efd 100644
--- a/StreamingCommunity/Api/Site/crunchyroll/__init__.py
+++ b/StreamingCommunity/Api/Site/crunchyroll/__init__.py
@@ -1,7 +1,5 @@
# 16.03.25
-import sys
-import subprocess
from urllib.parse import quote_plus
@@ -25,7 +23,7 @@
indice = 8
_useFor = "Anime"
_priority = 0
-_engineDownload = "hls"
+_engineDownload = "dash"
_deprecate = False
msg = Prompt()
diff --git a/StreamingCommunity/Api/Site/crunchyroll/film.py b/StreamingCommunity/Api/Site/crunchyroll/film.py
index 414316b6..3a8f7c36 100644
--- a/StreamingCommunity/Api/Site/crunchyroll/film.py
+++ b/StreamingCommunity/Api/Site/crunchyroll/film.py
@@ -17,6 +17,7 @@
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
+from StreamingCommunity.Api.Template.Util import assert_item_is_movie
# Player
@@ -39,6 +40,10 @@ def download_film(select_title: MediaItem) -> str:
Return:
- str: output path if successful, otherwise None
"""
+ if not assert_item_is_movie(select_title):
+ console.print("[red]Selected item is not a film.")
+ return None
+
start_message()
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
@@ -54,13 +59,13 @@ def download_film(select_title: MediaItem) -> str:
query_params = parse_qs(parsed_url.query)
# Download the episode
- r_proc = DASH_Downloader(
+ dash_process = DASH_Downloader(
cdm_device=get_wvd_path(),
license_url='https://www.crunchyroll.com/license/v1/license/widevine',
mpd_url=mpd_url,
output_path=os.path.join(mp4_path, mp4_name),
)
- r_proc.parse_manifest(custom_headers=mpd_headers)
+ dash_process.parse_manifest(custom_headers=mpd_headers)
# Create headers for license request
license_headers = mpd_headers.copy()
@@ -69,14 +74,16 @@ def download_film(select_title: MediaItem) -> str:
"x-cr-video-token": query_params['playbackGuid'][0],
})
- if r_proc.download_and_decrypt(custom_headers=license_headers):
- r_proc.finalize_output()
+ if dash_process.download_and_decrypt(custom_headers=license_headers):
+ dash_process.finalize_output()
# Get final output path and status
- status = r_proc.get_status()
+ status = dash_process.get_status()
if status['error'] is not None and status['path']:
- try: os.remove(status['path'])
- except Exception: pass
+ try:
+ os.remove(status['path'])
+ except Exception:
+ pass
- return status['path'], status['stopped']
\ No newline at end of file
+ return status['path'], status['stopped']
diff --git a/StreamingCommunity/Api/Site/crunchyroll/series.py b/StreamingCommunity/Api/Site/crunchyroll/series.py
index bc5d6f95..9ca8bd68 100644
--- a/StreamingCommunity/Api/Site/crunchyroll/series.py
+++ b/StreamingCommunity/Api/Site/crunchyroll/series.py
@@ -19,11 +19,12 @@
from .util.ScrapeSerie import GetSerieInfo, delete_stream_episode
from StreamingCommunity.Api.Template.Util import (
manage_selection,
- map_episode_title,
+ map_episode_title,
validate_selection,
validate_episode_selection,
display_episodes_list
)
+from StreamingCommunity.Api.http_api import JOB_MANAGER
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
@@ -71,13 +72,13 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
query_params = parse_qs(parsed_url.query)
# Download the episode
- r_proc = DASH_Downloader(
+ dash_process = DASH_Downloader(
cdm_device=get_wvd_path(),
license_url='https://www.crunchyroll.com/license/v1/license/widevine',
mpd_url=mpd_url,
output_path=os.path.join(mp4_path, mp4_name),
)
- r_proc.parse_manifest(custom_headers=mpd_headers)
+ dash_process.parse_manifest(custom_headers=mpd_headers)
# Create headers for license request
license_headers = mpd_headers.copy()
@@ -86,15 +87,17 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
"x-cr-video-token": query_params['playbackGuid'][0],
})
- if r_proc.download_and_decrypt(custom_headers=license_headers):
- r_proc.finalize_output()
+ if dash_process.download_and_decrypt(custom_headers=license_headers):
+ dash_process.finalize_output()
# Get final output path and status
- status = r_proc.get_status()
+ status = dash_process.get_status()
if status['error'] is not None and status['path']:
- try: os.remove(status['path'])
- except Exception: pass
+ try:
+ os.remove(status['path'])
+ except Exception:
+ pass
# Delete episode stream
delete_stream_episode(url_id, query_params['playbackGuid'][0], mpd_headers)
@@ -130,6 +133,8 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, dow
console.print(f"\n[cyan]Using provided episode selection: [yellow]{episode_selection}")
else:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No episode selection provided and cannot prompt in non-interactive mode')
last_command = display_episodes_list(episodes)
# Prompt user for episode selection
@@ -162,6 +167,8 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
# If season_selection is provided, use it instead of asking for input
if season_selection is None:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No season selection provided and cannot prompt in non-interactive mode')
index_season_selected = msg.ask(
"\n[cyan]Insert season number [yellow](e.g., 1), [red]* [cyan]to download all seasons, "
"[yellow](e.g., 1-2) [cyan]for a range of seasons, or [yellow](e.g., 3-*) [cyan]to download from a specific season to the end"
@@ -183,4 +190,4 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
else:
# Otherwise, let the user select specific episodes for the single season
- download_episode(i_season, scrape_serie, download_all=False, episode_selection=episode_selection)
\ No newline at end of file
+ download_episode(i_season, scrape_serie, download_all=False, episode_selection=episode_selection)
diff --git a/StreamingCommunity/Api/Site/crunchyroll/site.py b/StreamingCommunity/Api/Site/crunchyroll/site.py
index e47c1d8e..ed77bcc6 100644
--- a/StreamingCommunity/Api/Site/crunchyroll/site.py
+++ b/StreamingCommunity/Api/Site/crunchyroll/site.py
@@ -1,5 +1,7 @@
# 16.03.25
+import os
+
# External libraries
from curl_cffi import requests
@@ -8,6 +10,7 @@
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
+from StreamingCommunity.Util.os import get_wvd_path
from StreamingCommunity.Util.headers import get_headers
from StreamingCommunity.Util.table import TVShowManager
@@ -38,8 +41,14 @@ def title_search(query: str) -> int:
media_search_manager.clear()
table_show_manager.clear()
+ # Check CDM file before usage
+ cdm_device_path = get_wvd_path()
+ if not cdm_device_path or not isinstance(cdm_device_path, (str, bytes, os.PathLike)) or not os.path.isfile(cdm_device_path):
+ console.print(f"[bold red] CDM file not found or invalid path: {cdm_device_path}[/bold red]")
+ return None
+
# Build new Crunchyroll API search URL
- api_url = f"https://www.crunchyroll.com/content/v2/discover/search"
+ api_url = "https://www.crunchyroll.com/content/v2/discover/search"
params = {
"q": query,
diff --git a/StreamingCommunity/Api/Site/guardaserie/series.py b/StreamingCommunity/Api/Site/guardaserie/series.py
index e4b71233..0c797b16 100644
--- a/StreamingCommunity/Api/Site/guardaserie/series.py
+++ b/StreamingCommunity/Api/Site/guardaserie/series.py
@@ -24,6 +24,7 @@
)
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
+from StreamingCommunity.Api.http_api import JOB_MANAGER
# Player
@@ -55,7 +56,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scap
# Get episode information
obj_episode = scape_info_serie.selectEpisode(index_season_selected, index_episode_selected-1)
index_season_selected = dynamic_format_number(str(index_season_selected))
- console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [bold magenta]{obj_episode.get('name')}[/bold magenta] ([cyan]S{index_season_selected}E{index_episode_selected}[/cyan]) \n")
+ console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{scape_info_serie.tv_name}[/cyan] \\ [bold magenta]{obj_episode.get('name')}[/bold magenta] ([cyan]S{index_season_selected}E{index_episode_selected}[/cyan]) \n")
# Define filename and path for the downloaded video
mp4_name = f"{map_episode_title(scape_info_serie.tv_name, index_season_selected, index_episode_selected, obj_episode.get('name'))}.mp4"
@@ -68,16 +69,18 @@ def download_video(index_season_selected: int, index_episode_selected: int, scap
master_playlist = video_source.get_playlist()
# Download the film using the m3u8 playlist, and output filename
- r_proc = HLS_Downloader(
+ hls_process = HLS_Downloader(
m3u8_url=master_playlist,
output_path=os.path.join(mp4_path, mp4_name)
).start()
- if r_proc['error'] is not None:
- try: os.remove(r_proc['path'])
- except: pass
+ if hls_process['error'] is not None:
+ try:
+ os.remove(hls_process['path'])
+ except Exception:
+ pass
- return r_proc['path'], r_proc['stopped']
+ return hls_process['path'], hls_process['stopped']
def download_episode(scape_info_serie: GetSerieInfo, index_season_selected: int, download_all: bool = False, episode_selection: str = None) -> None:
@@ -109,6 +112,8 @@ def download_episode(scape_info_serie: GetSerieInfo, index_season_selected: int,
# Display episodes list and manage user selection
if episode_selection is None:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No episode selection provided and cannot prompt in non-interactive mode')
last_command = display_episodes_list(scape_info_serie.list_episodes)
else:
last_command = episode_selection
@@ -146,8 +151,16 @@ def download_series(dict_serie: MediaItem, season_selection: str = None, episode
# Prompt user for season selection and download episodes
console.print(f"\n[green]Seasons found: [red]{seasons_count}")
+ if seasons_count == 0:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No seasons found for this title (non-interactive mode)')
+ console.print('[red]No seasons found for this title')
+ return
+
# If season_selection is provided, use it instead of asking for input
if season_selection is None:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No season selection provided and cannot prompt in non-interactive mode')
index_season_selected = msg.ask(
"\n[cyan]Insert season number [yellow](e.g., 1), [red]* [cyan]to download all seasons, "
"[yellow](e.g., 1-2) [cyan]for a range of seasons, or [yellow](e.g., 3-*) [cyan]to download from a specific season to the end"
@@ -169,4 +182,4 @@ def download_series(dict_serie: MediaItem, season_selection: str = None, episode
else:
# Otherwise, let the user select specific episodes for the single season
- download_episode(scrape_serie, i_season, download_all=False, episode_selection=episode_selection)
\ No newline at end of file
+ download_episode(scrape_serie, i_season, download_all=False, episode_selection=episode_selection)
diff --git a/StreamingCommunity/Api/Site/guardaserie/site.py b/StreamingCommunity/Api/Site/guardaserie/site.py
index 0d9737cb..1bbc7bf3 100644
--- a/StreamingCommunity/Api/Site/guardaserie/site.py
+++ b/StreamingCommunity/Api/Site/guardaserie/site.py
@@ -1,8 +1,5 @@
# 09.06.24
-import sys
-
-
# External libraries
import httpx
from bs4 import BeautifulSoup
diff --git a/StreamingCommunity/Api/Site/mediasetinfinity/__init__.py b/StreamingCommunity/Api/Site/mediasetinfinity/__init__.py
index ba80a81e..7931495b 100644
--- a/StreamingCommunity/Api/Site/mediasetinfinity/__init__.py
+++ b/StreamingCommunity/Api/Site/mediasetinfinity/__init__.py
@@ -22,7 +22,7 @@
indice = 3
_useFor = "Film_&_Serie"
_priority = 0
-_engineDownload = "hls"
+_engineDownload = "dash"
_deprecate = False
msg = Prompt()
diff --git a/StreamingCommunity/Api/Site/mediasetinfinity/film.py b/StreamingCommunity/Api/Site/mediasetinfinity/film.py
index f458166f..8267fbaa 100644
--- a/StreamingCommunity/Api/Site/mediasetinfinity/film.py
+++ b/StreamingCommunity/Api/Site/mediasetinfinity/film.py
@@ -17,6 +17,7 @@
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
+from StreamingCommunity.Api.Template.Util import assert_item_is_movie
# Player
@@ -39,6 +40,10 @@ def download_film(select_title: MediaItem) -> Tuple[str, bool]:
Return:
- str: output path if successful, otherwise None
"""
+ if not assert_item_is_movie(select_title):
+ console.print("[red]Selected item is not a film.")
+ return None, False
+
start_message()
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
@@ -49,37 +54,31 @@ def download_film(select_title: MediaItem) -> Tuple[str, bool]:
# Generate mpd and license URLs
bearer = get_bearer_token()
- # Extract ID from the episode URL
- episode_id = select_title.url.split('_')[-1]
- if "http" in episode_id:
- try: episode_id = select_title.url.split('/')[-1]
- except Exception:
- console.print(f"[red]Error:[/red] Failed to parse episode ID from URL: {select_title.url}")
- return None, True
-
- playback_json = get_playback_url(bearer, episode_id)
+ playback_json = get_playback_url(bearer, select_title.id)
tracking_info = get_tracking_info(bearer, playback_json)[0]
license_url = generate_license_url(bearer, tracking_info)
mpd_url = get_manifest(tracking_info['video_src'])
# Download the episode
- r_proc = DASH_Downloader(
+ dash_process = DASH_Downloader(
cdm_device=get_wvd_path(),
license_url=license_url,
mpd_url=mpd_url,
output_path=mp4_path,
)
- r_proc.parse_manifest(custom_headers=get_headers())
+ dash_process.parse_manifest(custom_headers=get_headers())
- if r_proc.download_and_decrypt():
- r_proc.finalize_output()
+ if dash_process.download_and_decrypt():
+ dash_process.finalize_output()
# Get final output path and status
- status = r_proc.get_status()
+ status = dash_process.get_status()
if status['error'] is not None and status['path']:
- try: os.remove(status['path'])
- except Exception: pass
+ try:
+ os.remove(status['path'])
+ except Exception:
+ pass
- return status['path'], status['stopped']
\ No newline at end of file
+ return status['path'], status['stopped']
diff --git a/StreamingCommunity/Api/Site/mediasetinfinity/series.py b/StreamingCommunity/Api/Site/mediasetinfinity/series.py
index 601b4c1b..16b7dc4b 100644
--- a/StreamingCommunity/Api/Site/mediasetinfinity/series.py
+++ b/StreamingCommunity/Api/Site/mediasetinfinity/series.py
@@ -24,6 +24,7 @@
validate_episode_selection,
display_episodes_list
)
+from StreamingCommunity.Api.http_api import JOB_MANAGER
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
@@ -56,7 +57,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Get episode information
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
- console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [bold magenta]{obj_episode.name}[/bold magenta] ([cyan]S{index_season_selected}E{index_episode_selected}[/cyan]) \n")
+ console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{scrape_serie.series_name}[/cyan] \\ [bold magenta]{obj_episode.name}[/bold magenta] ([cyan]S{index_season_selected}E{index_episode_selected}[/cyan]) \n")
# Define filename and path for the downloaded video
mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.mp4"
@@ -64,39 +65,33 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Generate mpd and license URLs
bearer = get_bearer_token()
-
- # Extract ID from the episode URL
- episode_id = obj_episode.url.split('_')[-1]
- if "http" in episode_id:
- try: episode_id = obj_episode.url.split('/')[-1]
- except Exception:
- console.print(f"[red]Error:[/red] Failed to parse episode ID from URL: {obj_episode.url}")
- return None, True
-
- playback_json = get_playback_url(bearer, episode_id)
+
+ playback_json = get_playback_url(bearer, obj_episode.id)
tracking_info = get_tracking_info(bearer, playback_json)[0]
license_url = generate_license_url(bearer, tracking_info)
mpd_url = get_manifest(tracking_info['video_src'])
# Download the episode
- r_proc = DASH_Downloader(
+ dash_process = DASH_Downloader(
cdm_device=get_wvd_path(),
license_url=license_url,
mpd_url=mpd_url,
output_path=os.path.join(mp4_path, mp4_name),
)
- r_proc.parse_manifest(custom_headers=get_headers())
+ dash_process.parse_manifest(custom_headers=get_headers())
- if r_proc.download_and_decrypt():
- r_proc.finalize_output()
+ if dash_process.download_and_decrypt():
+ dash_process.finalize_output()
# Get final output path and status
- status = r_proc.get_status()
+ status = dash_process.get_status()
if status['error'] is not None and status['path']:
- try: os.remove(status['path'])
- except Exception: pass
+ try:
+ os.remove(status['path'])
+ except Exception:
+ pass
return status['path'], status['stopped']
@@ -130,6 +125,8 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, dow
console.print(f"\n[cyan]Using provided episode selection: [yellow]{episode_selection}")
else:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No episode selection provided and cannot prompt in non-interactive mode')
last_command = display_episodes_list(episodes)
# Prompt user for episode selection
@@ -162,6 +159,8 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
# If season_selection is provided, use it instead of asking for input
if season_selection is None:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No season selection provided and cannot prompt in non-interactive mode')
index_season_selected = msg.ask(
"\n[cyan]Insert season number [yellow](e.g., 1), [red]* [cyan]to download all seasons, "
"[yellow](e.g., 1-2) [cyan]for a range of seasons, or [yellow](e.g., 3-*) [cyan]to download from a specific season to the end"
@@ -182,4 +181,4 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
download_episode(i_season, scrape_serie, download_all=True)
else:
# Otherwise, let the user select specific episodes for the single season
- download_episode(i_season, scrape_serie, download_all=False, episode_selection=episode_selection)
\ No newline at end of file
+ download_episode(i_season, scrape_serie, download_all=False, episode_selection=episode_selection)
diff --git a/StreamingCommunity/Api/Site/mediasetinfinity/site.py b/StreamingCommunity/Api/Site/mediasetinfinity/site.py
index 7b6f0bcf..18353553 100644
--- a/StreamingCommunity/Api/Site/mediasetinfinity/site.py
+++ b/StreamingCommunity/Api/Site/mediasetinfinity/site.py
@@ -1,5 +1,7 @@
# 25.07.25
+import os
+
# External libraries
import httpx
from rich.console import Console
@@ -7,6 +9,7 @@
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
+from StreamingCommunity.Util.os import get_wvd_path
from StreamingCommunity.Util.headers import get_headers
from StreamingCommunity.Util.table import TVShowManager
from StreamingCommunity.Api.Template.config_loader import site_constant
@@ -37,7 +40,13 @@ def title_search(query: str) -> int:
media_search_manager.clear()
table_show_manager.clear()
- search_url = f'https://api-ott-prod-fe.mediaset.net/PROD/play/reco/anonymous/v2.0'
+ # Check CDM file before usage
+ cdm_device_path = get_wvd_path()
+ if not cdm_device_path or not isinstance(cdm_device_path, (str, bytes, os.PathLike)) or not os.path.isfile(cdm_device_path):
+ console.print(f"[bold red] CDM file not found or invalid path: {cdm_device_path}[/bold red]")
+ return None
+
+ search_url = 'https://api-ott-prod-fe.mediaset.net/PROD/play/reco/account/v2.0'
console.print(f"[cyan]Search url: [yellow]{search_url}")
params = {
@@ -48,11 +57,10 @@ def title_search(query: str) -> int:
'contentId': '',
'property': 'search',
'tenant': 'play-prod-v2',
- 'userContext': 'iwiAeyJwbGF0Zm9ybSI6IndlYiJ9Aw==',
'aresContext': '',
+ 'clientId': 'client_id',
'page': '1',
'hitsPerPage': '8',
- 'clientId': 'client_id'
}
headers = get_headers()
diff --git a/StreamingCommunity/Api/Site/mediasetinfinity/util/ScrapeSerie.py b/StreamingCommunity/Api/Site/mediasetinfinity/util/ScrapeSerie.py
index 100d7ca4..12df568f 100644
--- a/StreamingCommunity/Api/Site/mediasetinfinity/util/ScrapeSerie.py
+++ b/StreamingCommunity/Api/Site/mediasetinfinity/util/ScrapeSerie.py
@@ -1,11 +1,12 @@
# 16.03.25
-import re
+
import logging
+from urllib.parse import urlparse
# External libraries
-import httpx
+from curl_cffi import requests
from bs4 import BeautifulSoup
@@ -16,7 +17,7 @@
# Logic class
-from .get_license import get_bearer_token, get_playback_url
+from .get_license import get_bearer_token
# Variable
@@ -34,197 +35,230 @@ def __init__(self, url):
self.headers = get_headers()
self.url = url
self.seasons_manager = SeasonManager()
- self.subBrandId = None
- self.id_media = None
- self.current_url = None
-
- def _extract_subbrand_id(self, soup):
- """
- Extract subBrandId from the chapter link in the main page.
- Searches all tags to see if one has 'capitoli_' in the href.
- """
- for a_tag in soup.find_all("a", href=True):
- href = a_tag["href"]
-
- if "capitoli_" in href:
- match = re.search(r"sb(\d+)", href)
- if match:
- return match.group(1)
- match = re.search(r",sb(\d+)", href)
- if match:
- return match.group(1)
-
- return None
-
- def _find_video_href_and_id(self, soup):
- """
- Search for the first with href containing '/video/' and return (current_url, id_media).
- Always builds the absolute URL.
- """
- for a_tag in soup.find_all("a", href=True):
- href = a_tag["href"]
- if "/video/" in href:
- if href.startswith("http"):
- current_url = href
- else:
- current_url = "https://mediasetinfinity.mediaset.it" + href
-
- bearer = get_bearer_token()
- playback_json = get_playback_url(bearer, current_url.split('_')[-1])
- id_media = str(playback_json['url']).split("/s/")[1].split("/")[0]
-
- return current_url, id_media
- return None, None
-
- def _parse_entries(self, entries, single_season=False):
- """
- Populate seasons and episodes from the JSON entries.
- If single_season=True, creates only one season and adds all episodes there.
- """
- if not entries:
- self.series_name = ""
- return
-
- self.series_name = entries[0].get("mediasetprogram$auditelBrandName", "")
-
- if single_season:
- logging.info("Single season mode enabled.")
- season_num = 1
- season_name = "Stagione 1"
- current_season = self.seasons_manager.add_season({
- 'number': season_num,
- 'name': season_name
- })
+ self.serie_id = None
+ self.public_id = None
+ self.series_name = ""
+ self.stagioni_disponibili = []
+
+ def _extract_serie_id(self):
+ """Estrae l'ID della serie dall'URL di partenza"""
+ self.serie_id = f"SE{self.url.split('SE')[1]}"
+ print(f"Serie ID: {self.serie_id}")
+ return self.serie_id
+
+ def _get_public_id(self):
+ """Ottiene il public ID tramite l'API watchlist"""
+ bearer_token = get_bearer_token()
+ headers = {
+ 'authorization': f'Bearer {bearer_token}',
+ 'user-agent': get_userAgent(),
+ }
- for idx, entry in enumerate(entries, 1):
- title = entry.get("title", "")
- video_page_url = entry.get("mediasetprogram$videoPageUrl", "")
-
- if video_page_url.startswith("//"):
- episode_url = "https:" + video_page_url
- else:
- episode_url = video_page_url
-
- if current_season:
- current_season.episodes.add({
- 'number': idx,
- 'name': title,
- 'url': episode_url,
- 'duration': int(entry.get("mediasetprogram$duration", 0) / 60)
- })
+ response = requests.get(
+ 'https://api-ott-prod-fe.mediaset.net/PROD/play/userlist/watchlist/v2.0',
+ headers=headers,
+ impersonate="chrome",
+ allow_redirects=True
+ )
+
+ if response.status_code == 401:
+ print("Token scaduto, rinnovare il token")
+
+ if response.status_code == 200:
+ data = response.json()
+ self.public_id = data['response']['entries'][0]['media'][0]['publicUrl'].split("/")[4]
+ print(f"Public id: {self.public_id}")
+ return self.public_id
+
else:
- seasons_dict = {}
-
- logging.info("Multi season mode")
- for entry in entries:
-
- # Use JSON fields directly instead of regex
- season_num = entry.get("tvSeasonNumber")
- ep_num = entry.get("tvSeasonEpisodeNumber")
-
- # Extract numbers from title if season_num or ep_num are None
- if season_num is None or ep_num is None:
- title = entry.get("title", "")
-
- # Find all numbers in the title
- numbers = [int(n) for n in re.findall(r"\d+", title)]
- if len(numbers) == 2:
- season_num, ep_num = numbers
+ logging.error(f"Failed to get public ID: {response.status_code}")
+ return None
- elif len(numbers) == 1:
- # If only one, use it as episode
- ep_num = numbers[0]
+ def _get_series_data(self):
+ """Ottiene i dati della serie tramite l'API"""
+ headers = {
+ 'User-Agent': get_userAgent(),
+ }
+ params = {'byGuid': self.serie_id}
+
+ response = requests.get(
+ f'https://feed.entertainment.tv.theplatform.eu/f/{self.public_id}/mediaset-prod-all-series-v2',
+ params=params,
+ headers=headers,
+ impersonate="chrome",
+ allow_redirects=True
+ )
+ print("Risposta per _get_series_data:", response.status_code)
+
+ if response.status_code == 200:
+ return response.json()
+ else:
+ logging.error(f"Failed to get series data: {response.status_code}")
+ return None
- if season_num is None or ep_num is None:
- continue
+ def _process_available_seasons(self, data):
+ """Processa le stagioni disponibili dai dati della serie"""
+ if not data or not data.get('entries'):
+ logging.error("No series data found")
+ return []
- season_name = entry.get("mediasetprogram$brandTitle") or f"Stagione {season_num}"
+ entry = data['entries'][0]
+ self.series_name = entry.get('title', '')
+
+ seriesTvSeasons = entry.get('seriesTvSeasons', [])
+ availableTvSeasonIds = entry.get('availableTvSeasonIds', [])
+
+ stagioni_disponibili = []
+
+ for url in availableTvSeasonIds:
+ season = next((s for s in seriesTvSeasons if s['id'] == url), None)
+ if season:
+ stagioni_disponibili.append({
+ 'tvSeasonNumber': season['tvSeasonNumber'],
+ 'url': url,
+ 'id': str(url).split("/")[-1],
+ 'guid': season['guid']
+ })
+ else:
+ logging.warning(f"Season URL not found: {url}")
- if season_num not in seasons_dict:
- current_season = self.seasons_manager.add_season({
- 'number': season_num,
- 'name': season_name
- })
- seasons_dict[season_num] = current_season
-
- else:
- current_season = seasons_dict[season_num]
-
- video_page_url = entry.get("mediasetprogram$videoPageUrl", "")
- if video_page_url.startswith("//"):
- episode_url = "https:" + video_page_url
- else:
- episode_url = video_page_url
-
- if current_season:
- current_season.episodes.add({
- 'number': ep_num,
- 'name': entry.get("title", ""),
- 'url': episode_url,
- 'duration': entry.get("mediasetprogram$duration")
- })
+ # Ordina le stagioni dalla più vecchia alla più nuova
+ stagioni_disponibili.sort(key=lambda s: s['tvSeasonNumber'])
+
+ return stagioni_disponibili
+
+ def _build_season_page_urls(self, stagioni_disponibili):
+ """Costruisce gli URL delle pagine delle stagioni"""
+ parsed_url = urlparse(self.url)
+ base_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
+ series_slug = parsed_url.path.strip('/').split('/')[-1].split('_')[0]
+
+ for season in stagioni_disponibili:
+ page_url = f"{base_url}/fiction/{series_slug}/{series_slug}{season['tvSeasonNumber']}_{self.serie_id},{season['guid']}"
+ season['page_url'] = page_url
+
+ def _extract_season_sb_ids(self, stagioni_disponibili):
+ """Estrae gli ID sb dalle pagine delle stagioni"""
+ for season in stagioni_disponibili:
+ response_page = requests.get(
+ season['page_url'],
+ headers={'User-Agent': get_userAgent()},
+ impersonate="chrome",
+ allow_redirects=True
+ )
+ print("Risposta per _extract_season_sb_ids:", response_page.status_code)
+
+ soup = BeautifulSoup(response_page.text, 'html.parser')
+
+ # Prova prima con 'Episodi', poi con 'Puntate intere'
+ link = soup.find('a', string='Episodi')
+ if not link:
+ print("Using word: Puntate intere")
+ link = soup.find('a', string='Puntate intere')
+
+ if link and link.has_attr('href'):
+ if not link.string == 'Puntate intere':
+ print("Using word: Episodi")
+ season['sb'] = link['href'].split(',')[-1]
+ else:
+ logging.warning(f"Link 'Episodi' o 'Puntate intere' non trovato per stagione {season['tvSeasonNumber']}")
- def collect_season(self) -> None:
- """
- Retrieve all episodes for all seasons using the Mediaset Infinity API.
- """
- response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
- soup = BeautifulSoup(response.text, "html.parser")
-
- # Find current_url and id_media from the first with /video/
- self.current_url, found_id_media = self._find_video_href_and_id(soup)
- if found_id_media:
- self.id_media = found_id_media
-
- self.subBrandId = self._extract_subbrand_id(soup)
- single_season = False
- if self.subBrandId is None:
- episodi_link = None
- for h2_tag in soup.find_all("h2", class_=True):
- a_tag = h2_tag.find("a", href=True)
- if a_tag and "/episodi_" in a_tag["href"]:
- episodi_link = a_tag["href"]
- break
-
- if episodi_link:
- match = re.search(r"sb(\d+)", episodi_link)
- if match:
- self.subBrandId = match.group(1)
-
- single_season = True
+ def _get_season_episodes(self, season):
+ """Ottiene gli episodi per una stagione specifica"""
+ if not season.get('sb'):
+ return
- else:
- puntate_link = None
- for a_tag in soup.find_all("a", href=True):
- href = a_tag["href"]
- if "puntateintere" in href and "sb" in href:
- puntate_link = href
- break
-
- if puntate_link:
- match = re.search(r"sb(\d+)", puntate_link)
- if match:
- self.subBrandId = match.group(1)
-
- single_season = True
- else:
- print("No /episodi_ or puntateintere link found.")
-
- # Step 2: JSON request
+ episode_headers = {
+ 'origin': 'https://mediasetinfinity.mediaset.it',
+ 'referer': 'https://mediasetinfinity.mediaset.it/',
+ 'user-agent': get_userAgent(),
+ }
params = {
- 'byCustomValue': "{subBrandId}{" + str(self.subBrandId) + "}",
+ 'byCustomValue': "{subBrandId}{" + str(season["sb"].replace('sb', '')) + "}",
'sort': ':publishInfo_lastPublished|asc,tvSeasonEpisodeNumber|asc',
'range': '0-100',
}
+ episode_url = f"https://feed.entertainment.tv.theplatform.eu/f/{self.public_id}/mediaset-prod-all-programs-v2"
- json_url = f'https://feed.entertainment.tv.theplatform.eu/f/{self.id_media}/mediaset-prod-all-programs-v2'
- json_resp = httpx.get(json_url, headers={'user-agent': get_userAgent()}, params=params, timeout=max_timeout, follow_redirects=True)
-
- data = json_resp.json()
- entries = data.get("entries", [])
+ episode_response = requests.get(episode_url, headers=episode_headers, params=params, impersonate="chrome"
+ , allow_redirects=True)
+ print("Risposta per _get_season_episodes:", episode_response.status_code)
+
+ if episode_response.status_code == 200:
+ episode_data = episode_response.json()
+ season['episodes'] = []
+
+ for entry in episode_data.get('entries', []):
+ episode_info = {
+ 'id': entry.get('guid'),
+ 'title': entry.get('title'),
+ 'duration': int(entry.get('mediasetprogram$duration', 0) / 60) if entry.get('mediasetprogram$duration') else 0,
+ 'url': entry.get('media', [{}])[0].get('publicUrl') if entry.get('media') else None
+ }
+ season['episodes'].append(episode_info)
+
+ print(f"Found {len(season['episodes'])} episodes for season {season['tvSeasonNumber']}")
+ else:
+ logging.error(f"Failed to get episodes for season {season['tvSeasonNumber']}: {episode_response.status_code}")
- # Use the unified parsing function
- self._parse_entries(entries, single_season=single_season)
+ def collect_season(self) -> None:
+ """
+ Retrieve all episodes for all seasons using the new Mediaset Infinity API.
+ """
+ try:
+ # Step 1: Extract serie ID from URL
+ self._extract_serie_id()
+
+ # Step 2: Get public ID
+ if not self._get_public_id():
+ logging.error("Failed to get public ID")
+ return
+
+ # Step 3: Get series data
+ data = self._get_series_data()
+ if not data:
+ logging.error("Failed to get series data")
+ return
+
+ # Step 4: Process available seasons
+ self.stagioni_disponibili = self._process_available_seasons(data)
+ if not self.stagioni_disponibili:
+ logging.error("No seasons found")
+ return
+
+ # Step 5: Build season page URLs
+ self._build_season_page_urls(self.stagioni_disponibili)
+
+ # Step 6: Extract sb IDs from season pages
+ self._extract_season_sb_ids(self.stagioni_disponibili)
+
+ # Step 7: Get episodes for each season
+ for season in self.stagioni_disponibili:
+ self._get_season_episodes(season)
+
+ # Step 8: Populate seasons manager
+ self._populate_seasons_manager()
+
+ except Exception as e:
+ logging.error(f"Error in collect_season: {str(e)}")
+
+ def _populate_seasons_manager(self):
+ """Popola il seasons_manager con i dati raccolti"""
+ for season_data in self.stagioni_disponibili:
+ season_obj = self.seasons_manager.add_season({
+ 'number': season_data['tvSeasonNumber'],
+ 'name': f"Stagione {season_data['tvSeasonNumber']}"
+ })
+
+ if season_obj and season_data.get('episodes'):
+ for idx, episode in enumerate(season_data['episodes'], 1):
+ season_obj.episodes.add({
+ 'id': episode['id'],
+ 'number': idx,
+ 'name': episode['title'],
+ 'url': episode['url'],
+ 'duration': episode['duration']
+ })
# ------------- FOR GUI -------------
def getNumberSeason(self) -> int:
diff --git a/StreamingCommunity/Api/Site/mediasetinfinity/util/get_license.py b/StreamingCommunity/Api/Site/mediasetinfinity/util/get_license.py
index 6e1f6729..ea7b081d 100644
--- a/StreamingCommunity/Api/Site/mediasetinfinity/util/get_license.py
+++ b/StreamingCommunity/Api/Site/mediasetinfinity/util/get_license.py
@@ -15,8 +15,6 @@
# Variable
MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
-bearer_token = None
-playback_json = None
def get_bearer_token():
@@ -26,29 +24,7 @@ def get_bearer_token():
Returns:
str: The bearer token string.
"""
- global bearer_token
-
- if bearer_token:
- return bearer_token
-
- LOGIN_URL = "https://api-ott-prod-fe.mediaset.net/PROD/play/idm/anonymous/login/v2.0"
-
- try:
- response = httpx.post(
- LOGIN_URL,
- json={'client_id': 'client_id', 'appName': 'embed//mediasetplay-embed'},
- follow_redirects=True,
- timeout=MAX_TIMEOUT
- )
- response.raise_for_status()
-
- # Extract the bearer token from the response
- data = response.json()
- bearer_token = data["response"]["beToken"]
- return bearer_token
-
- except Exception as e:
- raise RuntimeError(f"Failed to get bearer token: {e}")
+ return config_manager.get_dict("SITE_LOGIN", "mediasetinfinity")["beToken"]
def get_playback_url(BEARER_TOKEN, CONTENT_ID):
"""
@@ -61,11 +37,6 @@ def get_playback_url(BEARER_TOKEN, CONTENT_ID):
Returns:
dict: The playback JSON object.
"""
- global playback_json
-
- if playback_json is not None:
- return playback_json
-
headers = get_headers()
headers['authorization'] = f'Bearer {BEARER_TOKEN}'
@@ -190,7 +161,7 @@ def get_tracking_info(BEARER_TOKEN, PLAYBACK_JSON):
results = parse_smil_for_tracking_and_video(smil_xml)
return results
- except Exception as e:
+ except Exception:
return None
def generate_license_url(BEARER_TOKEN, tracking_info):
diff --git a/StreamingCommunity/Api/Site/raiplay/__init__.py b/StreamingCommunity/Api/Site/raiplay/__init__.py
index 816d753a..034f4596 100644
--- a/StreamingCommunity/Api/Site/raiplay/__init__.py
+++ b/StreamingCommunity/Api/Site/raiplay/__init__.py
@@ -22,7 +22,7 @@
indice = 5
_useFor = "Film_&_Serie"
_priority = 0
-_engineDownload = "hls"
+_engineDownload = "hls_dash"
_deprecate = False
msg = Prompt()
diff --git a/StreamingCommunity/Api/Site/raiplay/film.py b/StreamingCommunity/Api/Site/raiplay/film.py
index 2a09e738..f0f5f621 100644
--- a/StreamingCommunity/Api/Site/raiplay/film.py
+++ b/StreamingCommunity/Api/Site/raiplay/film.py
@@ -12,16 +12,19 @@
# Internal utilities
from StreamingCommunity.Util.os import os_manager
from StreamingCommunity.Util.headers import get_headers
+from StreamingCommunity.Util.os import get_wvd_path
from StreamingCommunity.Util.message import start_message
# Logic class
+from .util.get_license import generate_license_url
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
+from StreamingCommunity.Api.Template.Util import assert_item_is_movie
# Player
-from StreamingCommunity import HLS_Downloader
+from StreamingCommunity import HLS_Downloader, DASH_Downloader
from StreamingCommunity.Api.Player.mediapolisvod import VideoSource
@@ -40,6 +43,11 @@ def download_film(select_title: MediaItem) -> Tuple[str, bool]:
- str: Path to downloaded file
- bool: Whether download was stopped
"""
+ # If a TV item is passed to a film downloader while running as job, fail fast
+ if not assert_item_is_movie(select_title):
+ console.print("[red]Selected item is not a film.")
+ return None, False
+
start_message()
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
@@ -49,17 +57,46 @@ def download_film(select_title: MediaItem) -> Tuple[str, bool]:
master_playlist = VideoSource.extract_m3u8_url(first_item_path)
# Define the filename and path for the downloaded film
- title_name = os_manager.get_sanitize_file(select_title.name) + ".mp4"
- mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))
+ mp4_name = os_manager.get_sanitize_file(select_title.name) + ".mp4"
+ mp4_path = os.path.join(site_constant.MOVIE_FOLDER, mp4_name.replace(".mp4", ""))
+
+ # HLS
+ if ".mpd" not in master_playlist:
+ r_proc = HLS_Downloader(
+ m3u8_url=master_playlist,
+ output_path=os.path.join(mp4_path, mp4_name)
+ ).start()
+
+ # MPD
+ else:
+
+ # Check CDM file before usage
+ cdm_device_path = get_wvd_path()
+ if not cdm_device_path or not isinstance(cdm_device_path, (str, bytes, os.PathLike)) or not os.path.isfile(cdm_device_path):
+ console.print(f"[bold red] CDM file not found or invalid path: {cdm_device_path}[/bold red]")
+ return None
+
+ license_url = generate_license_url(select_title.mpd_id)
+
+ dash_process = DASH_Downloader(
+ cdm_device=cdm_device_path,
+ license_url=license_url,
+ mpd_url=master_playlist,
+ output_path=os.path.join(mp4_path, mp4_name),
+ )
+ dash_process.parse_manifest(custom_headers=get_headers())
+
+ if dash_process.download_and_decrypt():
+ dash_process.finalize_output()
+
+ # Get final output path and status
+ r_proc = dash_process.get_status()
- # Download the film using the m3u8 playlist, and output filename
- r_proc = HLS_Downloader(
- m3u8_url=master_playlist,
- output_path=os.path.join(mp4_path, title_name)
- ).start()
if r_proc['error'] is not None:
- try: os.remove(r_proc['path'])
- except: pass
+ try:
+ os.remove(r_proc['path'])
+ except Exception:
+ pass
- return r_proc['path'], r_proc['stopped']
\ No newline at end of file
+ return r_proc['path'], r_proc['stopped']
diff --git a/StreamingCommunity/Api/Site/raiplay/series.py b/StreamingCommunity/Api/Site/raiplay/series.py
index d40e8f23..234d21da 100644
--- a/StreamingCommunity/Api/Site/raiplay/series.py
+++ b/StreamingCommunity/Api/Site/raiplay/series.py
@@ -10,11 +10,15 @@
# Internal utilities
+from StreamingCommunity.Util.headers import get_headers
+from StreamingCommunity.Util.os import get_wvd_path
from StreamingCommunity.Util.message import start_message
+
# Logic class
from .util.ScrapeSerie import GetSerieInfo
+from .util.get_license import generate_license_url
from StreamingCommunity.Api.Template.Util import (
manage_selection,
map_episode_title,
@@ -22,12 +26,13 @@
validate_episode_selection,
display_episodes_list
)
+from StreamingCommunity.Api.http_api import JOB_MANAGER
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
# Player
-from StreamingCommunity import HLS_Downloader
+from StreamingCommunity import HLS_Downloader, DASH_Downloader
from StreamingCommunity.Api.Player.mediapolisvod import VideoSource
@@ -53,24 +58,52 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Get episode information
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
- console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [bold magenta]{obj_episode.name}[/bold magenta] ([cyan]S{index_season_selected}E{index_episode_selected}[/cyan]) \n")
-
- # Get streaming URL
- master_playlist = VideoSource.extract_m3u8_url(obj_episode.url)
+ console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{scrape_serie.series_name}[/cyan] \\ [bold magenta]{obj_episode.name}[/bold magenta] ([cyan]S{index_season_selected}E{index_episode_selected}[/cyan]) \n")
# Define filename and path
mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.mp4"
mp4_path = os.path.join(site_constant.SERIES_FOLDER, scrape_serie.series_name, f"S{index_season_selected}")
- # Download the episode
- r_proc = HLS_Downloader(
- m3u8_url=master_playlist,
- output_path=os.path.join(mp4_path, mp4_name)
- ).start()
+ # Get streaming URL
+ master_playlist = VideoSource.extract_m3u8_url(obj_episode.url)
+
+ # HLS
+ if ".mpd" not in master_playlist:
+ r_proc = HLS_Downloader(
+ m3u8_url=master_playlist,
+ output_path=os.path.join(mp4_path, mp4_name)
+ ).start()
+
+ # MPD
+ else:
+
+ # Check CDM file before usage
+ cdm_device_path = get_wvd_path()
+ if not cdm_device_path or not isinstance(cdm_device_path, (str, bytes, os.PathLike)) or not os.path.isfile(cdm_device_path):
+ console.print(f"[bold red] CDM file not found or invalid path: {cdm_device_path}[/bold red]")
+ return None
+
+ license_url = generate_license_url(obj_episode.mpd_id)
+
+ dash_process = DASH_Downloader(
+ cdm_device=cdm_device_path,
+ license_url=license_url,
+ mpd_url=master_playlist,
+ output_path=os.path.join(mp4_path, mp4_name),
+ )
+ dash_process.parse_manifest(custom_headers=get_headers())
+
+ if dash_process.download_and_decrypt():
+ dash_process.finalize_output()
+
+ # Get final output path and status
+ r_proc = dash_process.get_status()
if r_proc['error'] is not None:
- try: os.remove(r_proc['path'])
- except: pass
+ try:
+ os.remove(r_proc['path'])
+ except Exception:
+ pass
return r_proc['path'], r_proc['stopped']
@@ -99,6 +132,8 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, dow
else:
# Display episodes list and manage user selection
if episode_selection is None:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No episode selection provided and cannot prompt in non-interactive mode')
last_command = display_episodes_list(episodes)
else:
last_command = episode_selection
@@ -140,8 +175,16 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
seasons_count = len(scrape_serie.seasons_manager)
console.print(f"\n[green]Seasons found: [red]{seasons_count}")
+ if seasons_count == 0:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError("No seasons found for this title (non-interactive mode)")
+ console.print("[red]No seasons found for this title")
+ return
+
# If season_selection is provided, use it instead of asking for input
if season_selection is None:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError("No season selection provided and cannot prompt in non-interactive mode")
index_season_selected = msg.ask(
"\n[cyan]Insert season number [yellow](e.g., 1), [red]* [cyan]to download all seasons, "
"[yellow](e.g., 1-2) [cyan]for a range of seasons, or [yellow](e.g., 3-*) [cyan]to download from a specific season to the end"
@@ -160,4 +203,4 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
if len(list_season_select) > 1 or index_season_selected == "*":
download_episode(season_number, scrape_serie, download_all=True)
else:
- download_episode(season_number, scrape_serie, download_all=False, episode_selection=episode_selection)
\ No newline at end of file
+ download_episode(season_number, scrape_serie, download_all=False, episode_selection=episode_selection)
diff --git a/StreamingCommunity/Api/Site/raiplay/site.py b/StreamingCommunity/Api/Site/raiplay/site.py
index ef95cbc2..0365558e 100644
--- a/StreamingCommunity/Api/Site/raiplay/site.py
+++ b/StreamingCommunity/Api/Site/raiplay/site.py
@@ -11,6 +11,9 @@
from StreamingCommunity.Util.table import TVShowManager
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
+
+
+# Logic Import
from .util.ScrapeSerie import GetSerieInfo
@@ -59,7 +62,7 @@ def title_search(query: str) -> int:
media_search_manager.clear()
table_show_manager.clear()
- search_url = f"https://www.raiplay.it/atomatic/raiplay-search-service/api/v1/msearch"
+ search_url = "https://www.raiplay.it/atomatic/raiplay-search-service/api/v1/msearch"
console.print(f"[cyan]Search url: [yellow]{search_url}")
json_data = {
diff --git a/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py b/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py
index b7bd8638..085df29e 100644
--- a/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py
+++ b/StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py
@@ -101,7 +101,8 @@ def collect_info_season(self, number_season: int) -> None:
'number': ep.get('episode', ''),
'name': ep.get('episode_title', '') or ep.get('toptitle', ''),
'duration': ep.get('duration', ''),
- 'url': f"{self.base_url}{ep.get('weblink', '')}" if 'weblink' in ep else f"{self.base_url}{ep.get('url', '')}"
+ 'url': f"{self.base_url}{ep.get('weblink', '')}" if 'weblink' in ep else f"{self.base_url}{ep.get('url', '')}",
+ 'mpd_id': ep.get('video_url').split("=")[1].strip()
}
season.episodes.add(episode)
diff --git a/StreamingCommunity/Api/Site/raiplay/util/get_license.py b/StreamingCommunity/Api/Site/raiplay/util/get_license.py
new file mode 100644
index 00000000..4bf7dffe
--- /dev/null
+++ b/StreamingCommunity/Api/Site/raiplay/util/get_license.py
@@ -0,0 +1,40 @@
+# 16.03.25
+
+
+# External library
+import httpx
+
+
+# Internal utilities
+from StreamingCommunity.Util.config_json import config_manager
+from StreamingCommunity.Util.headers import get_headers
+
+
+# Variable
+MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
+
+
+
+def generate_license_url(mpd_id: str):
+ """
+ Generates the URL to obtain the Widevine license.
+
+ Args:
+ mpd_id (str): The ID of the MPD (Media Presentation Description) file.
+
+ Returns:
+ str: The full license URL.
+ """
+ params = {
+ 'cont': mpd_id,
+ 'output': '62',
+ }
+
+ response = httpx.get('https://mediapolisvod.rai.it/relinker/relinkerServlet.htm', params=params, headers=get_headers(), timeout=MAX_TIMEOUT)
+ response.raise_for_status()
+
+ # Extract the license URL from the response in two lines
+ json_data = response.json()
+ license_url = json_data.get('licence_server_map').get('drmLicenseUrlValues')[0].get('licenceUrl')
+
+ return license_url
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Site/streamingcommunity/__init__.py b/StreamingCommunity/Api/Site/streamingcommunity/__init__.py
index b40824da..9b24b101 100644
--- a/StreamingCommunity/Api/Site/streamingcommunity/__init__.py
+++ b/StreamingCommunity/Api/Site/streamingcommunity/__init__.py
@@ -2,7 +2,6 @@
import sys
import subprocess
-from urllib.parse import quote_plus
# External library
diff --git a/StreamingCommunity/Api/Site/streamingcommunity/film.py b/StreamingCommunity/Api/Site/streamingcommunity/film.py
index c47e0785..189b35b7 100644
--- a/StreamingCommunity/Api/Site/streamingcommunity/film.py
+++ b/StreamingCommunity/Api/Site/streamingcommunity/film.py
@@ -16,6 +16,7 @@
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
+from StreamingCommunity.Api.Template.Util import assert_item_is_movie
# Player
@@ -38,6 +39,11 @@ def download_film(select_title: MediaItem) -> str:
Return:
- str: output path
"""
+ # If a TV item is passed to a film downloader while running as job, fail fast
+ if not assert_item_is_movie(select_title):
+ console.print("[red]Selected item is not a film.")
+ return None
+
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
bot.send_message(f"Download in corso:\n{select_title.name}", None)
@@ -71,7 +77,7 @@ def download_film(select_title: MediaItem) -> str:
mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))
# Download the film using the m3u8 playlist, and output filename
- r_proc = HLS_Downloader(
+ hls_process = HLS_Downloader(
m3u8_url=master_playlist,
output_path=os.path.join(mp4_path, title_name)
).start()
@@ -83,8 +89,10 @@ def download_film(select_title: MediaItem) -> str:
if script_id != "unknown":
TelegramSession.deleteScriptId(script_id)
- if r_proc['error'] is not None:
- try: os.remove(r_proc['path'])
- except: pass
+ if hls_process['error'] is not None:
+ try:
+ os.remove(hls_process['path'])
+ except Exception:
+ pass
- return r_proc['path']
\ No newline at end of file
+ return hls_process['path']
diff --git a/StreamingCommunity/Api/Site/streamingcommunity/series.py b/StreamingCommunity/Api/Site/streamingcommunity/series.py
index 985710de..c7654e94 100644
--- a/StreamingCommunity/Api/Site/streamingcommunity/series.py
+++ b/StreamingCommunity/Api/Site/streamingcommunity/series.py
@@ -17,12 +17,14 @@
# Logic class
from .util.ScrapeSerie import GetSerieInfo
from StreamingCommunity.Api.Template.Util import (
- manage_selection,
- map_episode_title,
- validate_selection,
- validate_episode_selection,
- display_episodes_list
+ manage_selection,
+ map_episode_title,
+ validate_selection,
+ validate_episode_selection,
+ display_episodes_list,
+ assert_interactive_allowed,
)
+from StreamingCommunity.Api.http_api import JOB_MANAGER
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
@@ -55,7 +57,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Get episode information
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
- console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [bold magenta]{obj_episode.name}[/bold magenta] ([cyan]S{index_season_selected}E{index_episode_selected}[/cyan]) \n")
+ console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{scrape_serie.series_name}[/cyan] \\ [bold magenta]{obj_episode.name}[/bold magenta] ([cyan]S{index_season_selected}E{index_episode_selected}[/cyan]) \n")
if site_constant.TELEGRAM_BOT:
bot = get_bot_instance()
@@ -81,16 +83,18 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
master_playlist = video_source.get_playlist()
# Download the episode
- r_proc = HLS_Downloader(
+ hls_process = HLS_Downloader(
m3u8_url=master_playlist,
output_path=os.path.join(mp4_path, mp4_name)
).start()
- if r_proc['error'] is not None:
- try: os.remove(r_proc['path'])
- except: pass
+ if hls_process['error'] is not None:
+ try:
+ os.remove(hls_process['path'])
+ except Exception:
+ pass
- return r_proc['path'], r_proc['stopped']
+ return hls_process['path'], hls_process['stopped']
def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, video_source: VideoSource, download_all: bool = False, episode_selection: str = None) -> None:
@@ -109,6 +113,8 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, vid
episodes_count = len(episodes)
if episodes_count == 0:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError(f"No episodes found for season {index_season_selected} (non-interactive mode)")
console.print(f"[red]No episodes found for season {index_season_selected}")
return
@@ -125,6 +131,8 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, vid
else:
# Display episodes list and manage user selection
if episode_selection is None:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No episode selection provided and cannot prompt in non-interactive mode')
last_command = display_episodes_list(episodes)
else:
last_command = episode_selection
@@ -169,6 +177,9 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
# If season_selection is provided, use it instead of asking for input
if season_selection is None:
+ # If we are running as a background job we cannot prompt the user
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError("No season selection provided and cannot prompt in non-interactive mode")
if site_constant.TELEGRAM_BOT:
console.print("\n[cyan]Insert season number [yellow](e.g., 1), [red]* [cyan]to download all seasons, "
"[yellow](e.g., 1-2) [cyan]for a range of seasons, or [yellow](e.g., 3-*) [cyan]to download from a specific season to the end")
@@ -214,7 +225,7 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
download_episode(season_number, scrape_serie, video_source, download_all=False, episode_selection=episode_selection)
if site_constant.TELEGRAM_BOT:
- bot.send_message(f"Finito di scaricare tutte le serie e episodi", None)
+ bot.send_message("Finito di scaricare tutte le serie e episodi", None)
# Get script_id
script_id = TelegramSession.get_session()
diff --git a/StreamingCommunity/Api/Site/streamingcommunity/site.py b/StreamingCommunity/Api/Site/streamingcommunity/site.py
index 050a29ac..6ed6939d 100644
--- a/StreamingCommunity/Api/Site/streamingcommunity/site.py
+++ b/StreamingCommunity/Api/Site/streamingcommunity/site.py
@@ -57,7 +57,9 @@ def title_search(query: str) -> int:
version = json.loads(soup.find('div', {'id': "app"}).get("data-page"))['version']
except Exception as e:
- if "WinError" in str(e) or "Errno" in str(e): console.print("\n[bold yellow]Please make sure you have enabled and configured a valid proxy.[/bold yellow]")
+ if "WinError" in str(e) or "Errno" in str(e):
+ console.print("\n[bold yellow]Please make sure you have enabled and configured a valid proxy.[/bold yellow]")
+
console.print(f"[red]Site: {site_constant.SITE_NAME} version, request error: {e}")
return 0
@@ -116,7 +118,7 @@ def title_search(query: str) -> int:
if site_constant.TELEGRAM_BOT:
if choices:
- bot.send_message(f"Lista dei risultati:", choices)
+ bot.send_message("Lista dei risultati:", choices)
# Return the number of titles found
return media_search_manager.get_length()
diff --git a/StreamingCommunity/Api/Site/streamingwatch/film.py b/StreamingCommunity/Api/Site/streamingwatch/film.py
index 661c1607..4202dd99 100644
--- a/StreamingCommunity/Api/Site/streamingwatch/film.py
+++ b/StreamingCommunity/Api/Site/streamingwatch/film.py
@@ -15,6 +15,7 @@
# Logic class
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
+from StreamingCommunity.Api.Template.Util import assert_item_is_movie
# Player
@@ -37,6 +38,10 @@ def download_film(select_title: MediaItem) -> str:
Return:
- str: output path
"""
+ if not assert_item_is_movie(select_title):
+ console.print("[red]Selected item is not a film.")
+ return None
+
start_message()
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
@@ -49,13 +54,15 @@ def download_film(select_title: MediaItem) -> str:
mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))
# Download the film using the m3u8 playlist, and output filename
- r_proc = HLS_Downloader(
+ hls_process = HLS_Downloader(
m3u8_url=master_playlist,
output_path=os.path.join(mp4_path, title_name)
).start()
- if r_proc['error'] is not None:
- try: os.remove(r_proc['path'])
- except: pass
+ if hls_process['error'] is not None:
+ try:
+ os.remove(hls_process['path'])
+ except Exception:
+ pass
- return r_proc['path']
\ No newline at end of file
+ return hls_process['path']
diff --git a/StreamingCommunity/Api/Site/streamingwatch/series.py b/StreamingCommunity/Api/Site/streamingwatch/series.py
index f6a13581..b6015b2e 100644
--- a/StreamingCommunity/Api/Site/streamingwatch/series.py
+++ b/StreamingCommunity/Api/Site/streamingwatch/series.py
@@ -18,11 +18,12 @@
from .util.ScrapeSerie import GetSerieInfo
from StreamingCommunity.Api.Template.Util import (
manage_selection,
- map_episode_title,
+ map_episode_title,
validate_selection,
validate_episode_selection,
display_episodes_list
)
+from StreamingCommunity.Api.http_api import JOB_MANAGER
from StreamingCommunity.Api.Template.config_loader import site_constant
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
@@ -53,7 +54,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
# Get episode information
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
- console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [bold magenta]{obj_episode.name}[/bold magenta] ([cyan]S{index_season_selected}E{index_episode_selected}[/cyan]) \n")
+ console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{scrape_serie.series_name}[/cyan] \\ [bold magenta]{obj_episode.name}[/bold magenta] ([cyan]S{index_season_selected}E{index_episode_selected}[/cyan]) \n")
# Define filename and path for the downloaded video
mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.mp4"
@@ -64,16 +65,18 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
master_playlist = video_source.get_m3u8_url(obj_episode.url)
# Download the episode
- r_proc = HLS_Downloader(
+ hls_process = HLS_Downloader(
m3u8_url=master_playlist,
output_path=os.path.join(mp4_path, mp4_name)
).start()
- if r_proc['error'] is not None:
- try: os.remove(r_proc['path'])
- except: pass
+ if hls_process['error'] is not None:
+ try:
+ os.remove(hls_process['path'])
+ except Exception:
+ pass
- return r_proc['path'], r_proc['stopped']
+ return hls_process['path'], hls_process['stopped']
def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, download_all: bool = False, episode_selection: str = None) -> None:
@@ -105,6 +108,8 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, dow
console.print(f"\n[cyan]Using provided episode selection: [yellow]{episode_selection}")
else:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No episode selection provided and cannot prompt in non-interactive mode')
last_command = display_episodes_list(episodes)
# Prompt user for episode selection
@@ -137,6 +142,8 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
# If season_selection is provided, use it instead of asking for input
if season_selection is None:
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('No season selection provided and cannot prompt in non-interactive mode')
index_season_selected = msg.ask(
"\n[cyan]Insert season number [yellow](e.g., 1), [red]* [cyan]to download all seasons, "
"[yellow](e.g., 1-2) [cyan]for a range of seasons, or [yellow](e.g., 3-*) [cyan]to download from a specific season to the end"
@@ -157,4 +164,4 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
download_episode(i_season, scrape_serie, download_all=True)
else:
# Otherwise, let the user select specific episodes for the single season
- download_episode(i_season, scrape_serie, download_all=False, episode_selection=episode_selection)
\ No newline at end of file
+ download_episode(i_season, scrape_serie, download_all=False, episode_selection=episode_selection)
diff --git a/StreamingCommunity/Api/Site/streamingwatch/site.py b/StreamingCommunity/Api/Site/streamingwatch/site.py
index 2f0b69c3..194c2b81 100644
--- a/StreamingCommunity/Api/Site/streamingwatch/site.py
+++ b/StreamingCommunity/Api/Site/streamingwatch/site.py
@@ -86,7 +86,9 @@ def title_search(query: str) -> int:
soup = BeautifulSoup(response.text, 'html.parser')
except Exception as e:
- if "WinError" in str(e) or "Errno" in str(e): console.print("\n[bold yellow]Please make sure you have enabled and configured a valid proxy.[/bold yellow]")
+ if "WinError" in str(e) or "Errno" in str(e):
+ console.print("\n[bold yellow]Please make sure you have enabled and configured a valid proxy.[/bold yellow]")
+
console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
return 0
diff --git a/StreamingCommunity/Api/Site/streamingwatch/util/ScrapeSerie.py b/StreamingCommunity/Api/Site/streamingwatch/util/ScrapeSerie.py
index dcde6e50..da19867d 100644
--- a/StreamingCommunity/Api/Site/streamingwatch/util/ScrapeSerie.py
+++ b/StreamingCommunity/Api/Site/streamingwatch/util/ScrapeSerie.py
@@ -5,11 +5,12 @@
# External libraries
-import httpx
from bs4 import BeautifulSoup
+
# Internal utilities
from StreamingCommunity.Util.headers import get_userAgent
+from StreamingCommunity.Util.http_client import create_client
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Api.Player.Helper.Vixcloud.util import SeasonManager, Episode
@@ -24,8 +25,7 @@ def __init__(self, url):
self.url = url
self.seasons_manager = SeasonManager()
self.series_name = None
-
- self.client = httpx.Client(headers=self.headers, timeout=max_timeout)
+ self.client = create_client(headers=self.headers)
def collect_info_season(self) -> None:
"""
diff --git a/StreamingCommunity/Api/Template/Util/__init__.py b/StreamingCommunity/Api/Template/Util/__init__.py
index 937eb369..498076a0 100644
--- a/StreamingCommunity/Api/Template/Util/__init__.py
+++ b/StreamingCommunity/Api/Template/Util/__init__.py
@@ -1,10 +1,11 @@
-# 23.11.24
-from .manage_ep import (
- manage_selection,
- map_episode_title,
- validate_episode_selection,
- validate_selection,
- dynamic_format_number,
- display_episodes_list
-)
\ No newline at end of file
+__all__ = [
+ "manage_selection",
+ "map_episode_title",
+ "validate_episode_selection",
+ "validate_selection",
+ "dynamic_format_number",
+ "assert_interactive_allowed",
+ "assert_item_is_movie",
+ "display_episodes_list"
+]
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Template/Util/manage_ep.py b/StreamingCommunity/Api/Template/Util/manage_ep.py
index c3a60afb..754f3607 100644
--- a/StreamingCommunity/Api/Template/Util/manage_ep.py
+++ b/StreamingCommunity/Api/Template/Util/manage_ep.py
@@ -14,6 +14,26 @@
from StreamingCommunity.Util.os import os_manager
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.table import TVShowManager
+from StreamingCommunity.Api.http_api import JOB_MANAGER
+
+
+def assert_interactive_allowed():
+ """Raise ValueError if current execution is inside a Job (non-interactive)."""
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError("Operation requires interactive input but the process is running as a background job")
+
+
+def assert_item_is_movie(item):
+ """If item represents a TV show and we're running in a job, raise ValueError.
+
+ If interactive, returns False so caller can print/handle as needed.
+ """
+ t = getattr(item, 'type', None) if not isinstance(item, dict) else item.get('type')
+ if t == 'tv' or t == 'series':
+ if JOB_MANAGER.get_current_job_id() is not None:
+ raise ValueError('Requested movie download but item is a TV show (non-interactive mode)')
+ return False
+ return True
# Variable
@@ -79,6 +99,8 @@ def manage_selection(cmd_insert: str, max_count: int) -> List[int]:
Returns:
list_selection (List[int]): List of selected items.
"""
+ # If invoked inside a job (non-interactive), attempt a single pass and raise on invalid input
+ in_job = JOB_MANAGER.get_current_job_id() is not None
while True:
list_selection = []
logging.info(f"Command insert: {cmd_insert}, end index: {max_count + 1}")
@@ -104,6 +126,8 @@ def manage_selection(cmd_insert: str, max_count: int) -> List[int]:
list_selection = list(range(1, max_count + 1))
break
+ if in_job:
+ raise ValueError(f"Invalid selection command: {cmd_insert}")
cmd_insert = msg.ask("[red]Invalid input. Please enter a valid command: ")
logging.info(f"List return: {list_selection}")
@@ -125,20 +149,20 @@ def map_episode_title(tv_name: str, number_season: int, episode_number: int, epi
"""
map_episode_temp = MAP_EPISODE
- if tv_name != None:
+ if tv_name is not None:
map_episode_temp = map_episode_temp.replace("%(tv_name)", os_manager.get_sanitize_file(tv_name))
- if number_season != None:
+ if number_season is not None:
map_episode_temp = map_episode_temp.replace("%(season)", str(number_season))
else:
map_episode_temp = map_episode_temp.replace("%(season)", dynamic_format_number(str(0)))
- if episode_number != None:
+ if episode_number is not None:
map_episode_temp = map_episode_temp.replace("%(episode)", dynamic_format_number(str(episode_number)))
else:
map_episode_temp = map_episode_temp.replace("%(episode)", dynamic_format_number(str(0)))
- if episode_name != None:
+ if episode_name is not None:
map_episode_temp = map_episode_temp.replace("%(episode_name)", os_manager.get_sanitize_file(episode_name))
logging.info(f"Map episode string return: {map_episode_temp}")
@@ -157,6 +181,8 @@ def validate_selection(list_season_select: List[int], seasons_count: int) -> Lis
Returns:
- List[int]: Adjusted list of valid season numbers.
"""
+ # If called inside a job (non-interactive), validate once and raise on invalid input
+ in_job = JOB_MANAGER.get_current_job_id() is not None
while True:
try:
@@ -166,6 +192,8 @@ def validate_selection(list_season_select: List[int], seasons_count: int) -> Lis
# If the list is empty, the input was completely invalid
if not valid_seasons:
logging.error(f"Invalid selection: The selected seasons are outside the available range (1-{seasons_count}). Please try again.")
+ if in_job:
+ raise ValueError(f"Invalid selection: seasons out of range 1-{seasons_count}")
# Re-prompt for valid input
input_seasons = input(f"Enter valid season numbers (1-{seasons_count}): ")
@@ -176,6 +204,8 @@ def validate_selection(list_season_select: List[int], seasons_count: int) -> Lis
except ValueError:
logging.error("Error: Please enter valid integers separated by commas.")
+ if in_job:
+ raise
# Prompt the user for valid input again
input_seasons = input(f"Enter valid season numbers (1-{seasons_count}): ")
@@ -194,6 +224,7 @@ def validate_episode_selection(list_episode_select: List[int], episodes_count: i
Returns:
- List[int]: Adjusted list of valid episode numbers.
"""
+ in_job = JOB_MANAGER.get_current_job_id() is not None
while True:
try:
@@ -203,6 +234,8 @@ def validate_episode_selection(list_episode_select: List[int], episodes_count: i
# If the list is empty, the input was completely invalid
if not valid_episodes:
logging.error(f"Invalid selection: The selected episodes are outside the available range (1-{episodes_count}). Please try again.")
+ if in_job:
+ raise ValueError(f"Invalid selection: episodes out of range 1-{episodes_count}")
# Re-prompt for valid input
input_episodes = input(f"Enter valid episode numbers (1-{episodes_count}): ")
@@ -213,7 +246,9 @@ def validate_episode_selection(list_episode_select: List[int], episodes_count: i
except ValueError:
logging.error("Error: Please enter valid integers separated by commas.")
-
+ if in_job:
+ raise
+
# Prompt the user for valid input again
input_episodes = input(f"Enter valid episode numbers (1-{episodes_count}): ")
list_episode_select = list(map(int, input_episodes.split(',')))
@@ -257,4 +292,4 @@ def display_episodes_list(episodes_manager) -> str:
console.print("\n[red]Quit ...")
sys.exit(0)
- return last_command
\ No newline at end of file
+ return last_command
diff --git a/StreamingCommunity/Api/Template/__init__.py b/StreamingCommunity/Api/Template/__init__.py
index 3574946b..68b80ddd 100644
--- a/StreamingCommunity/Api/Template/__init__.py
+++ b/StreamingCommunity/Api/Template/__init__.py
@@ -1,3 +1,7 @@
# 19.06.24
-from .site import get_select_title
\ No newline at end of file
+from .site import get_select_title
+
+__all__ = [
+ "get_select_title"
+]
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Template/config_loader.py b/StreamingCommunity/Api/Template/config_loader.py
index fe3e16e5..7886e88c 100644
--- a/StreamingCommunity/Api/Template/config_loader.py
+++ b/StreamingCommunity/Api/Template/config_loader.py
@@ -11,14 +11,40 @@
def get_site_name_from_stack():
for frame_info in inspect.stack():
file_path = frame_info.filename
-
- if "__init__" in file_path:
- parts = file_path.split(f"Site{os.sep}")
-
- if len(parts) > 1:
- site_name = parts[1].split(os.sep)[0]
- return site_name
-
+
+ # Common case: path contains Api/Site//__init__.py
+ try:
+ marker = os.path.join('Api', 'Site') + os.sep
+ if marker in file_path and '__init__' in file_path:
+ parts = file_path.split(marker)
+ if len(parts) > 1:
+ site_name = parts[1].split(os.sep)[0]
+ return site_name
+ except Exception:
+ pass
+
+ # Fallback: if path contains 'Site' folder, try a more permissive split
+ try:
+ if 'Site' + os.sep in file_path and '__init__' in file_path:
+ parts = file_path.split('Site' + os.sep)
+ if len(parts) > 1:
+ site_name = parts[1].split(os.sep)[0]
+ return site_name
+ except Exception:
+ pass
+
+ # Last-resort: try to infer module/package name from the frame
+ try:
+ module = inspect.getmodule(frame_info.frame)
+ if module is not None and hasattr(module, '__package__') and module.__package__:
+ # package typically like 'StreamingCommunity.Api.Site.'
+ pkg = module.__package__
+ if 'Api.Site.' in pkg:
+ site_name = pkg.split('Api.Site.')[-1].split('.')[0]
+ return site_name
+ except Exception:
+ pass
+
return None
diff --git a/StreamingCommunity/Api/Template/site.py b/StreamingCommunity/Api/Template/site.py
index 8f1d1351..be111216 100644
--- a/StreamingCommunity/Api/Template/site.py
+++ b/StreamingCommunity/Api/Template/site.py
@@ -1,8 +1,5 @@
# 19.06.24
-import sys
-
-
# External library
from rich.console import Console
@@ -83,12 +80,19 @@ def get_select_title(table_show_manager, media_search_manager, num_results_avail
color_index = 1
for key in first_media_item.__dict__.keys():
+
if key.capitalize() in column_to_hide:
continue
+
if key in ('id', 'type', 'name', 'score'):
- if key == 'type': column_info["Type"] = {'color': 'yellow'}
- elif key == 'name': column_info["Name"] = {'color': 'magenta'}
- elif key == 'score': column_info["Score"] = {'color': 'cyan'}
+ if key == 'type':
+ column_info["Type"] = {'color': 'yellow'}
+
+ elif key == 'name':
+ column_info["Name"] = {'color': 'magenta'}
+ elif key == 'score':
+ column_info["Score"] = {'color': 'cyan'}
+
else:
column_info[key.capitalize()] = {'color': available_colors[color_index % len(available_colors)]}
color_index += 1
diff --git a/StreamingCommunity/Api/http_api.py b/StreamingCommunity/Api/http_api.py
new file mode 100644
index 00000000..ff92526e
--- /dev/null
+++ b/StreamingCommunity/Api/http_api.py
@@ -0,0 +1,389 @@
+"""FastAPI HTTP API for StreamingCommunity with job queue.
+
+Exposes endpoints:
+- GET /providers
+- POST /search
+- POST /module_call
+- POST /jobs (create download job)
+- GET /jobs
+- GET /jobs/{job_id}
+
+Auth: optional Basic Auth via config DEFAULT.http_api_username/password
+
+Jobs are processed sequentially (one at a time) to ensure downloads don't run concurrently.
+"""
+from __future__ import annotations
+
+import base64
+import importlib
+import glob
+import os
+import threading
+import time
+from collections import deque
+from typing import Any, Callable, Deque, Dict, List, Optional
+
+from fastapi import Depends, FastAPI, HTTPException, Request, status
+from fastapi.responses import JSONResponse
+from pydantic import BaseModel
+
+from StreamingCommunity.Util.config_json import config_manager
+
+app = FastAPI(title="StreamingCommunity API")
+
+
+# ----------------------- auth dependency -----------------------
+def _check_auth(request: Request) -> None:
+ username = config_manager.get('DEFAULT', 'http_api_username')
+ password = config_manager.get('DEFAULT', 'http_api_password')
+ if not username and not password:
+ return
+
+ auth = request.headers.get('Authorization')
+ if not auth:
+ raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail='Authentication required', headers={"WWW-Authenticate": "Basic"})
+ try:
+ scheme, data = auth.split(' ', 1)
+ if scheme.lower() != 'basic':
+ raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail='Bad auth scheme', headers={"WWW-Authenticate": "Basic"})
+ decoded = base64.b64decode(data).decode()
+ u, p = decoded.split(':', 1)
+ if not (u == username and p == password):
+ raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail='Invalid credentials', headers={"WWW-Authenticate": "Basic"})
+ except HTTPException:
+ raise
+ except Exception:
+ raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail='Invalid authorization header', headers={"WWW-Authenticate": "Basic"})
+
+
+# ----------------------- registry / helpers -----------------------
+API_REGISTRY: Dict[str, Callable[..., Any]] = {}
+
+
+def expose_api(name: Optional[str] = None):
+ def _decorator(func: Callable[..., Any]):
+ key = f"{func.__module__.split('.')[-1]}.{name or func.__name__}"
+ API_REGISTRY[key] = func
+ return func
+
+ return _decorator
+
+
+def _get_site_modules() -> List[Dict[str, Any]]:
+ api_dir = os.path.join(os.path.dirname(__file__), 'Site')
+ init_files = glob.glob(os.path.join(api_dir, '*', '__init__.py'))
+ modules: List[Dict[str, Any]] = []
+ for init_file in init_files:
+ module_name = os.path.basename(os.path.dirname(init_file))
+ try:
+ mod = importlib.import_module(f'StreamingCommunity.Api.Site.{module_name}')
+ indice = getattr(mod, 'indice', None)
+ use_for = getattr(mod, '_useFor', None)
+ deprecated = getattr(mod, '_deprecate', False)
+ if not deprecated:
+ modules.append({'name': module_name, 'indice': indice, 'use_for': use_for})
+ except Exception:
+ continue
+ modules.sort(key=lambda x: (x['indice'] if x['indice'] is not None else 9999))
+ return modules
+
+
+def _serialize_media_manager(manager: Any) -> List[Dict[str, Any]]:
+ out: List[Dict[str, Any]] = []
+ try:
+ media_list = getattr(manager, 'media_list', [])
+ for item in media_list:
+ try:
+ data = item.__dict__.copy()
+ except Exception:
+ data = {k: getattr(item, k, None) for k in ['id', 'name', 'type', 'url', 'size', 'score', 'date', 'desc']}
+ out.append(data)
+ except Exception:
+ pass
+ return out
+
+
+# ----------------------- pydantic models -----------------------
+class SearchRequest(BaseModel):
+ provider: Optional[str] = None
+ query: str
+
+
+class ModuleCallRequest(BaseModel):
+ module: str
+ function: str
+ kwargs: Optional[Dict[str, Any]] = None
+ background: Optional[bool] = False
+
+
+class JobCreateRequest(BaseModel):
+ module: str
+ action: str # 'download_film' or 'download_series' or custom
+ item: Dict[str, Any]
+ selections: Optional[Dict[str, Any]] = None
+
+
+class JobInfo(BaseModel):
+ id: int
+ status: str
+ created_at: float
+ started_at: Optional[float] = None
+ finished_at: Optional[float] = None
+ result: Optional[Any] = None
+ error: Optional[str] = None
+
+
+# ----------------------- job queue -----------------------
+class JobManager:
+ def __init__(self):
+ self._jobs: Dict[int, Dict[str, Any]] = {}
+ self._queue: Deque[int] = deque()
+ self._lock = threading.Lock()
+ self._local = threading.local()
+ self._next_id = 1
+ self._worker = threading.Thread(target=self._worker_loop, daemon=True)
+ self._worker.start()
+
+ def add_job(self, payload: Dict[str, Any]) -> int:
+ with self._lock:
+ job_id = self._next_id
+ self._next_id += 1
+ job = {
+ 'id': job_id,
+ 'status': 'queued',
+ 'created_at': time.time(),
+ 'progress': 0,
+ 'payload': payload,
+ 'result': None,
+ 'error': None,
+ }
+ self._jobs[job_id] = job
+ self._queue.append(job_id)
+ return job_id
+
+ def get_job(self, job_id: int) -> Optional[Dict[str, Any]]:
+ return self._jobs.get(job_id)
+
+ def list_jobs(self) -> List[Dict[str, Any]]:
+ return list(self._jobs.values())
+
+ def _worker_loop(self) -> None:
+ while True:
+ job_id = None
+ with self._lock:
+ if self._queue:
+ job_id = self._queue.popleft()
+ if job_id is None:
+ time.sleep(0.5)
+ continue
+
+ job = self._jobs.get(job_id)
+ if not job:
+ continue
+ # set thread-local current job id so the running code can update progress
+ try:
+ self._local.current_job = job_id
+ except Exception:
+ self._local = threading.local()
+ self._local.current_job = job_id
+ job['status'] = 'running'
+ job['started_at'] = time.time()
+ try:
+ # ensure progress starts at 0
+ job['progress'] = 0
+ job['result'] = self._run_job(job['payload'])
+ job['status'] = 'finished'
+ job['progress'] = 100
+ except Exception as e:
+ job['status'] = 'failed'
+ job['error'] = f"{type(e).__name__}: {e}"
+ # on failure we set progress to 100 to indicate job completion
+ job['progress'] = 100
+ finally:
+ job['finished_at'] = time.time()
+ # clear thread-local current job
+ try:
+ del self._local.current_job
+ except Exception:
+ pass
+
+ def update_progress(self, percent: float, job_id: Optional[int] = None) -> None:
+ """Update progress for a job. Percent is clamped to 0..100."""
+ try:
+ p = float(percent)
+ except Exception:
+ return
+ p = max(0.0, min(100.0, p))
+ with self._lock:
+ if job_id is None:
+ # try get from thread-local
+ job_id = getattr(self._local, 'current_job', None)
+ if not job_id:
+ return
+ job = self._jobs.get(job_id)
+ if not job:
+ return
+ job['progress'] = p
+
+ def get_current_job_id(self) -> Optional[int]:
+ return getattr(self._local, 'current_job', None)
+
+ def _run_job(self, payload: Dict[str, Any]) -> Any:
+ module_name = payload.get('module')
+ action = payload.get('action')
+ item = payload.get('item')
+ selections = payload.get('selections') or {}
+
+ mod = importlib.import_module(f'StreamingCommunity.Api.Site.{module_name}')
+ # prepare MediaItem
+ from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
+
+ media_obj = MediaItem(**item)
+
+ if action == 'download_film':
+ fn = getattr(mod, 'download_film', None)
+ if not callable(fn):
+ raise RuntimeError('download_film not available in module')
+ return fn(media_obj)
+ elif action == 'download_series':
+ fn = getattr(mod, 'download_series', None)
+ if not callable(fn):
+ raise RuntimeError('download_series not available in module')
+ season = selections.get('season')
+ episode = selections.get('episode')
+ return fn(media_obj, season, episode)
+ else:
+ # try registry or attribute
+ key = f"{module_name}.{action}"
+ fn = API_REGISTRY.get(key)
+ if fn is None:
+ fn = getattr(mod, action, None)
+ if not callable(fn):
+ raise RuntimeError('Action not found')
+ return fn(media_obj, **selections)
+
+
+JOB_MANAGER = JobManager()
+
+
+# ----------------------- endpoints -----------------------
+@app.get('/providers', dependencies=[Depends(_check_auth)])
+def providers():
+ return {'providers': _get_site_modules()}
+
+
+@app.post('/search', dependencies=[Depends(_check_auth)])
+def search(req: SearchRequest):
+ modules = _get_site_modules()
+ results: Dict[str, Any] = {}
+ targets = []
+ if req.provider in (None, 'all'):
+ targets = modules
+ else:
+ for m in modules:
+ if str(m.get('indice')) == str(req.provider) or m.get('name') == req.provider:
+ targets = [m]
+ break
+
+ timeout = 20
+ try:
+ timeout = int(config_manager.get('DEFAULT', 'http_api_provider_timeout') or 20)
+ except Exception:
+ timeout = 20
+
+ import concurrent.futures
+ futures = {}
+ with concurrent.futures.ThreadPoolExecutor(max_workers=min(8, max(1, len(targets)))) as executor:
+ for m in targets:
+ name = m['name']
+ try:
+ mod = importlib.import_module(f'StreamingCommunity.Api.Site.{name}')
+ except Exception as e:
+ results[name] = {'error': {'type': type(e).__name__, 'message': str(e)}}
+ continue
+ futures[name] = executor.submit(mod.search, req.query, True)
+
+ for name, fut in futures.items():
+ try:
+ manager = fut.result(timeout=timeout)
+ results[name] = _serialize_media_manager(manager) if manager is not None else []
+ except concurrent.futures.TimeoutError:
+ results[name] = {'error': {'type': 'TimeoutError', 'message': f'Provider timed out after {timeout}s'}}
+ except Exception as e:
+ results[name] = {'error': {'type': type(e).__name__, 'message': str(e)}}
+
+ return {'query': req.query, 'results': results}
+
+
+@app.post('/module_call', dependencies=[Depends(_check_auth)])
+def module_call(req: ModuleCallRequest):
+ # first try registry
+ key = f"{req.module}.{req.function}"
+ fn = API_REGISTRY.get(key)
+ if fn is None:
+ try:
+ mod = importlib.import_module(f'StreamingCommunity.Api.Site.{req.module}')
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+ fn = getattr(mod, req.function, None)
+ if not callable(fn):
+ raise HTTPException(status_code=400, detail='function not found')
+
+ if req.background:
+ # schedule as job
+ job_id = JOB_MANAGER.add_job({'module': req.module, 'action': req.function, 'item': req.kwargs or {}, 'selections': {}})
+ return {'status': 'scheduled', 'job_id': job_id}
+
+ # call synchronously but protect with timeout via executor
+ import concurrent.futures
+ try:
+ timeout = int(config_manager.get('DEFAULT', 'http_api_provider_timeout') or 20)
+ except Exception:
+ timeout = 20
+
+ def _call():
+ return fn(**(req.kwargs or {}))
+
+ with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
+ fut = executor.submit(_call)
+ try:
+ res = fut.result(timeout=timeout)
+ return {'result': res}
+ except concurrent.futures.TimeoutError:
+ raise HTTPException(status_code=500, detail=f'Function timed out after {timeout}s')
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@app.post('/jobs', dependencies=[Depends(_check_auth)])
+def create_job(req: JobCreateRequest):
+ # validate module exists
+ try:
+ importlib.import_module(f'StreamingCommunity.Api.Site.{req.module}')
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+ job_id = JOB_MANAGER.add_job(req.dict())
+ return {'job_id': job_id}
+
+
+@app.get('/jobs', dependencies=[Depends(_check_auth)])
+def list_jobs():
+ return {'jobs': JOB_MANAGER.list_jobs()}
+
+
+@app.get('/jobs/{job_id}', dependencies=[Depends(_check_auth)])
+def get_job(job_id: int):
+ job = JOB_MANAGER.get_job(job_id)
+ if not job:
+ raise HTTPException(status_code=404, detail='Job not found')
+ return job
+
+
+def start_api_server():
+ port = int(config_manager.get('DEFAULT', 'http_api_port') or 8080)
+ # import here to avoid requiring uvicorn at module import time
+ try:
+ import uvicorn as _uvicorn
+ except Exception as e:
+ raise RuntimeError('uvicorn is required to run the HTTP server') from e
+ _uvicorn.run(app, host='0.0.0.0', port=port, log_level="error")
diff --git a/StreamingCommunity/Lib/Downloader/DASH/cdm_helpher.py b/StreamingCommunity/Lib/Downloader/DASH/cdm_helpher.py
index 27832bba..03860f6d 100644
--- a/StreamingCommunity/Lib/Downloader/DASH/cdm_helpher.py
+++ b/StreamingCommunity/Lib/Downloader/DASH/cdm_helpher.py
@@ -2,7 +2,6 @@
import base64
import logging
-import os
# External libraries
@@ -31,16 +30,11 @@ def get_widevine_keys(pssh, license_url, cdm_device_path, headers=None, payload=
list: List of dicts {'kid': ..., 'key': ...} (only CONTENT keys) or None if error.
"""
- # Check if CDM file exists
- if not os.path.isfile(cdm_device_path):
- console.print(f"[bold red] CDM file not found: {cdm_device_path}[/bold red]")
- return None
-
# Check if PSSH is a valid base64 string
try:
base64.b64decode(pssh)
except Exception:
- console.print(f"[bold red] Invalid PSSH base64 string.[/bold red]")
+ console.print("[bold red] Invalid PSSH base64 string.[/bold red]")
return None
try:
@@ -67,7 +61,7 @@ def get_widevine_keys(pssh, license_url, cdm_device_path, headers=None, payload=
response = httpx.post(license_url, data=challenge, headers=req_headers, content=payload)
if response.status_code != 200:
- console.print(f"[bold red]License error:[/bold red] {response.status_code}")
+ console.print(f"[bold red]License error:[/bold red] {response.status_code}, {response.text}")
return None
# Handle (JSON) or classic (binary) license response
@@ -77,18 +71,17 @@ def get_widevine_keys(pssh, license_url, cdm_device_path, headers=None, payload=
# Check if license_data is empty
if not license_data:
- console.print(f"[bold red]License response is empty.[/bold red]")
+ console.print("[bold red]License response is empty.[/bold red]")
return None
if "application/json" in content_type:
try:
# Try to decode as JSON only if plausible
- text = response.text
data = None
try:
data = response.json()
- except Exception as e:
+ except Exception:
data = None
if data and "license" in data:
@@ -118,7 +111,7 @@ def get_widevine_keys(pssh, license_url, cdm_device_path, headers=None, payload=
# Check if content_keys list is empty
if not content_keys:
- console.print(f"[bold yellow]⚠️ No CONTENT keys found in license.[/bold yellow]")
+ console.print("[bold yellow]⚠️ No CONTENT keys found in license.[/bold yellow]")
return None
return content_keys
diff --git a/StreamingCommunity/Lib/Downloader/DASH/decrypt.py b/StreamingCommunity/Lib/Downloader/DASH/decrypt.py
index 36ffaceb..ffa0c34e 100644
--- a/StreamingCommunity/Lib/Downloader/DASH/decrypt.py
+++ b/StreamingCommunity/Lib/Downloader/DASH/decrypt.py
@@ -38,7 +38,7 @@ def decrypt_with_mp4decrypt(encrypted_path, kid, key, output_path=None, cleanup=
bytes.fromhex(kid)
bytes.fromhex(key)
except Exception:
- console.print(f"[bold red] Invalid KID or KEY (not hex).[/bold red]")
+ console.print("[bold red] Invalid KID or KEY (not hex).[/bold red]")
return None
if not output_path:
diff --git a/StreamingCommunity/Lib/Downloader/DASH/downloader.py b/StreamingCommunity/Lib/Downloader/DASH/downloader.py
index 3b39ddee..f8563b6a 100644
--- a/StreamingCommunity/Lib/Downloader/DASH/downloader.py
+++ b/StreamingCommunity/Lib/Downloader/DASH/downloader.py
@@ -18,11 +18,12 @@
from .segments import MPD_Segments
from .decrypt import decrypt_with_mp4decrypt
from .cdm_helpher import get_widevine_keys
+from StreamingCommunity.Api.http_api import JOB_MANAGER
# Config
DOWNLOAD_SPECIFIC_AUDIO = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_audio')
-FILTER_CUSTOM_REOLUTION = str(config_manager.get('M3U8_PARSER', 'force_resolution')).strip().lower()
+FILTER_CUSTOM_REOLUTION = str(config_manager.get('M3U8_CONVERSION', 'force_resolution')).strip().lower()
CLEANUP_TMP = config_manager.get_bool('M3U8_DOWNLOAD', 'cleanup_tmp_folder')
@@ -43,6 +44,9 @@ def __init__(self, cdm_device, license_url, mpd_url, output_path):
self.error = None
self.stopped = False
self.output_file = None
+ self.total_segments_all = 0
+ self.segments_downloaded = 0
+ self._progress_lock = None
def _setup_temp_dirs(self):
"""
@@ -99,60 +103,83 @@ def download_and_decrypt(self, custom_headers=None, custom_payload=None):
if rep:
encrypted_path = os.path.join(self.encrypted_dir, f"{rep['id']}_encrypted.m4s")
- downloader = MPD_Segments(
- tmp_folder=self.encrypted_dir,
- representation=rep,
- pssh=self.parser.pssh
- )
-
- try:
- result = downloader.download_streams()
-
- # Check for interruption or failure
- if result.get("stopped"):
- self.stopped = True
- self.error = "Download interrupted"
- return False
-
- if result.get("nFailed", 0) > 0:
- self.error = f"Failed segments: {result['nFailed']}"
- return False
-
- except Exception as ex:
- self.error = str(ex)
+ # compute total segments for progress aggregation
+ try:
+ total = 0
+ v = self.get_representation_by_type('video')
+ a = self.get_representation_by_type('audio')
+ if v and 'segment_urls' in v:
+ total += len(v.get('segment_urls', [])) + 1
+ if a and 'segment_urls' in a:
+ total += len(a.get('segment_urls', [])) + 1
+ if total <= 0:
+ total = 1
+ self.total_segments_all = total
+ self.segments_downloaded = 0
+ import threading as _th
+ self._progress_lock = _th.Lock()
+ except Exception:
+ self.total_segments_all = 1
+ self.segments_downloaded = 0
+ import threading as _th
+ self._progress_lock = _th.Lock()
+
+ downloader = MPD_Segments(
+ tmp_folder=self.encrypted_dir,
+ representation=rep,
+ pssh=self.parser.pssh
+ )
+ # attach progress aggregation reference
+ setattr(downloader, 'progress_parent', self)
+
+ try:
+ result = downloader.download_streams()
+
+ # Check for interruption or failure
+ if result.get("stopped"):
+ self.stopped = True
+ self.error = "Download interrupted"
return False
- if not self.parser.pssh:
- print("No PSSH found: segments are not encrypted, skipping decryption.")
- self.download_segments(clear=True)
- return True
-
- keys = get_widevine_keys(
- pssh=self.parser.pssh,
- license_url=self.license_url,
- cdm_device_path=self.cdm_device,
- headers=custom_headers,
- payload=custom_payload
- )
-
- if not keys:
- self.error = f"No key found, cannot decrypt {typ}"
- print(self.error)
+ if result.get("nFailed", 0) > 0:
+ self.error = f"Failed segments: {result['nFailed']}"
return False
- key = keys[0]
- KID = key['kid']
- KEY = key['key']
+ except Exception as ex:
+ self.error = str(ex)
+ return False
- decrypted_path = os.path.join(self.decrypted_dir, f"{typ}.mp4")
- result_path = decrypt_with_mp4decrypt(
- encrypted_path, KID, KEY, output_path=decrypted_path
- )
+ if not self.parser.pssh:
+ print("No PSSH found: segments are not encrypted, skipping decryption.")
+ self.download_segments(clear=True)
+ return True
+
+ keys = get_widevine_keys(
+ pssh=self.parser.pssh,
+ license_url=self.license_url,
+ cdm_device_path=self.cdm_device,
+ headers=custom_headers,
+ payload=custom_payload
+ )
+
+ if not keys:
+ self.error = f"No key found, cannot decrypt {typ}"
+ print(self.error)
+ return False
- if not result_path:
- self.error = f"Decryption of {typ} failed"
- print(self.error)
- return False
+ key = keys[0]
+ KID = key['kid']
+ KEY = key['key']
+
+ decrypted_path = os.path.join(self.decrypted_dir, f"{typ}.mp4")
+ result_path = decrypt_with_mp4decrypt(
+ encrypted_path, KID, KEY, output_path=decrypted_path
+ )
+
+ if not result_path:
+ self.error = f"Decryption of {typ} failed"
+ print(self.error)
+ return False
else:
self.error = f"No {typ} found"
@@ -215,4 +242,4 @@ def get_status(self):
"path": self.output_file,
"error": self.error,
"stopped": self.stopped
- }
\ No newline at end of file
+ }
diff --git a/StreamingCommunity/Lib/Downloader/DASH/parser.py b/StreamingCommunity/Lib/Downloader/DASH/parser.py
index 8fd1ceb7..6a66a564 100644
--- a/StreamingCommunity/Lib/Downloader/DASH/parser.py
+++ b/StreamingCommunity/Lib/Downloader/DASH/parser.py
@@ -61,7 +61,7 @@ def __init__(self, mpd_url):
self.base_url = mpd_url.rsplit('/', 1)[0] + '/'
def parse(self, custom_headers):
- response = httpx.get(self.mpd_url, headers=custom_headers, timeout=max_timeout)
+ response = httpx.get(self.mpd_url, headers=custom_headers, timeout=max_timeout, follow_redirects=True)
response.raise_for_status()
root = ET.fromstring(response.content)
diff --git a/StreamingCommunity/Lib/Downloader/DASH/segments.py b/StreamingCommunity/Lib/Downloader/DASH/segments.py
index 0a870f28..748e6d61 100644
--- a/StreamingCommunity/Lib/Downloader/DASH/segments.py
+++ b/StreamingCommunity/Lib/Downloader/DASH/segments.py
@@ -14,6 +14,7 @@
from StreamingCommunity.Lib.M3U8.estimator import M3U8_Ts_Estimator
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.color import Colors
+from StreamingCommunity.Api.http_api import JOB_MANAGER
# Config
@@ -134,7 +135,7 @@ async def _download_init_segment(self, client, init_url, concat_path, estimator,
try:
headers = {'User-Agent': get_userAgent()}
- response = await client.get(init_url, headers=headers)
+ response = await client.get(init_url, headers=headers, follow_redirects=True)
with open(concat_path, 'wb') as outfile:
if response.status_code == 200:
@@ -143,6 +144,22 @@ async def _download_init_segment(self, client, init_url, concat_path, estimator,
estimator.add_ts_file(len(response.content))
progress_bar.update(1)
+ try:
+ parent = getattr(self, 'progress_parent', None)
+ if parent is not None and getattr(parent, 'total_segments_all', 0) > 0:
+ with parent._progress_lock:
+ parent.segments_downloaded += 1
+ downloaded = parent.segments_downloaded
+ total = parent.total_segments_all
+ percent = 5.0 + (downloaded / float(total)) * 85.0
+ if percent > 99.0:
+ percent = 99.0
+ try:
+ JOB_MANAGER.update_progress(percent)
+ except Exception:
+ pass
+ except Exception:
+ pass
# Update progress bar with estimated info
estimator.update_progress_bar(len(response.content), progress_bar)
@@ -160,7 +177,8 @@ async def download_single(url, idx):
headers = {'User-Agent': get_userAgent()}
for attempt in range(max_retry):
try:
- resp = await client.get(url, headers=headers)
+ resp = await client.get(url, headers=headers, follow_redirects=True)
+
if resp.status_code == 200:
return idx, resp.content, attempt
else:
@@ -188,6 +206,22 @@ async def download_single(url, idx):
# Update progress bar with estimated info
estimator.update_progress_bar(len(data), progress_bar)
+ try:
+ parent = getattr(self, 'progress_parent', None)
+ if parent is not None and getattr(parent, 'total_segments_all', 0) > 0:
+ with parent._progress_lock:
+ parent.segments_downloaded += 1
+ downloaded = parent.segments_downloaded
+ total = parent.total_segments_all
+ percent = 5.0 + (downloaded / float(total)) * 85.0
+ if percent > 99.0:
+ percent = 99.0
+ try:
+ JOB_MANAGER.update_progress(percent)
+ except Exception:
+ pass
+ except Exception:
+ pass
except KeyboardInterrupt:
self.download_interrupted = True
@@ -214,7 +248,7 @@ async def download_single(url, idx):
for attempt in range(max_retry):
try:
resp = await client.get(url, headers=headers)
-
+
if resp.status_code == 200:
return idx, resp.content, attempt
else:
@@ -329,4 +363,4 @@ def _display_error_summary(self) -> None:
f"[white]Failed segments: [red]{getattr(self, 'info_nFailed', 0)}")
if getattr(self, 'info_nRetry', 0) > len(self.selected_representation['segment_urls']) * 0.3:
- print("[yellow]Warning: High retry count detected. Consider reducing worker count in config.")
\ No newline at end of file
+ print("[yellow]Warning: High retry count detected. Consider reducing worker count in config.")
diff --git a/StreamingCommunity/Lib/Downloader/HLS/downloader.py b/StreamingCommunity/Lib/Downloader/HLS/downloader.py
index 32134b8a..d7f10740 100644
--- a/StreamingCommunity/Lib/Downloader/HLS/downloader.py
+++ b/StreamingCommunity/Lib/Downloader/HLS/downloader.py
@@ -2,6 +2,7 @@
import os
import re
+import threading
import time
import logging
import shutil
@@ -17,6 +18,7 @@
# Internal utilities
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.headers import get_userAgent
+from StreamingCommunity.Util.http_client import create_client
from StreamingCommunity.Util.os import os_manager, internet_manager
from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance
@@ -30,6 +32,7 @@
)
from ...M3U8 import M3U8_Parser, M3U8_UrlFix
from .segments import M3U8_Segments
+from StreamingCommunity.Api.http_api import JOB_MANAGER
# Config
@@ -38,7 +41,7 @@
DOWNLOAD_SPECIFIC_SUBTITLE = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_subtitles')
MERGE_SUBTITLE = config_manager.get_bool('M3U8_DOWNLOAD', 'merge_subs')
CLEANUP_TMP = config_manager.get_bool('M3U8_DOWNLOAD', 'cleanup_tmp_folder')
-FILTER_CUSTOM_REOLUTION = str(config_manager.get('M3U8_PARSER', 'force_resolution')).strip().lower()
+FILTER_CUSTOM_RESOLUTION = str(config_manager.get('M3U8_CONVERSION', 'force_resolution')).strip().lower()
RETRY_LIMIT = config_manager.get_int('REQUESTS', 'max_retry')
MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
TELEGRAM_BOT = config_manager.get_bool('DEFAULT', 'telegram_bot')
@@ -62,7 +65,8 @@ def request(self, url: str, return_content: bool = False) -> Optional[httpx.Resp
Returns:
Response content/text or None if all retries fail
"""
- client = httpx.Client(headers=self.headers, timeout=MAX_TIMEOUT, follow_redirects=True)
+ # Use unified HTTP client (inherits timeout/verify/proxy from config)
+ client = create_client(headers=self.headers)
for attempt in range(RETRY_LIMIT):
try:
@@ -154,12 +158,12 @@ def select_streams(self):
self.sub_streams = []
else:
- if str(FILTER_CUSTOM_REOLUTION) == "best":
+ if str(FILTER_CUSTOM_RESOLUTION) == "best":
self.video_url, self.video_res = self.parser._video.get_best_uri()
- elif str(FILTER_CUSTOM_REOLUTION) == "worst":
+ elif str(FILTER_CUSTOM_RESOLUTION) == "worst":
self.video_url, self.video_res = self.parser._video.get_worst_uri()
- elif str(FILTER_CUSTOM_REOLUTION).replace("p", "").replace("px", "").isdigit():
- resolution_value = int(str(FILTER_CUSTOM_REOLUTION).replace("p", "").replace("px", ""))
+ elif str(FILTER_CUSTOM_RESOLUTION).replace("p", "").replace("px", "").isdigit():
+ resolution_value = int(str(FILTER_CUSTOM_RESOLUTION).replace("p", "").replace("px", ""))
self.video_url, self.video_res = self.parser._video.get_custom_uri(resolution_value)
else:
logging.error("Resolution not recognized.")
@@ -188,7 +192,7 @@ def log_selection(self):
console.print(
f"[cyan bold]Video [/cyan bold] [green]Available:[/green] [purple]{', '.join(list_available_resolution)}[/purple] | "
- f"[red]Set:[/red] [purple]{FILTER_CUSTOM_REOLUTION}[/purple] | "
+ f"[red]Set:[/red] [purple]{FILTER_CUSTOM_RESOLUTION}[/purple] | "
f"[yellow]Downloadable:[/yellow] [purple]{self.video_res[0]}x{self.video_res[1]}[/purple]"
)
@@ -253,6 +257,8 @@ def download_video(self, video_url: str):
video_tmp_dir = os.path.join(self.temp_dir, 'video')
downloader = M3U8_Segments(url=video_full_url, tmp_folder=video_tmp_dir)
+ # attach reference so segments can report aggregated progress
+ setattr(downloader, 'progress_parent', self)
result = downloader.download_streams("Video", "video")
self.missing_segments.append(result)
@@ -270,6 +276,7 @@ def download_audio(self, audio: Dict):
audio_tmp_dir = os.path.join(self.temp_dir, 'audio', audio['language'])
downloader = M3U8_Segments(url=audio_full_url, tmp_folder=audio_tmp_dir)
+ setattr(downloader, 'progress_parent', self)
result = downloader.download_streams(f"Audio {audio['language']}", "audio")
self.missing_segments.append(result)
@@ -300,18 +307,48 @@ def download_all(self, video_url: str, audio_streams: List[Dict], sub_streams: L
"""
Downloads all selected streams (video, audio, subtitles).
"""
+ # Build a summary of total segments to download across video/audio/subtitles
return_stopped = False
+ total_segments = 0
+
+ # video
+ try:
+ tmp_video = M3U8_Segments(url=video_url, tmp_folder=os.path.join(self.temp_dir, 'video'))
+ tmp_video.get_info()
+ total_segments += len(tmp_video.segments)
+ except Exception:
+ # If we cannot retrieve info, fallback to 1 to avoid division by zero
+ total_segments += 1
+
+ # audio
+ for audio in audio_streams:
+ try:
+ tmp_audio = M3U8_Segments(url=audio['uri'], tmp_folder=os.path.join(self.temp_dir, 'audio', audio['language']))
+ tmp_audio.get_info()
+ total_segments += len(tmp_audio.segments)
+ except Exception:
+ total_segments += 0
+
+ # subtitles: count as 1 per subtitle file
+ total_segments += len(sub_streams)
+
+ # ensure at least 1
+ if total_segments <= 0:
+ total_segments = 1
+
+ # attach aggregation info to this manager so segment download can report progress
+ self.total_segments_all = total_segments
+ self.segments_downloaded = 0
+ self._progress_lock = threading.Lock()
+
video_file = os.path.join(self.temp_dir, 'video', '0.ts')
-
+
if not os.path.exists(video_file):
if self.download_video(video_url):
if not return_stopped:
return_stopped = True
for audio in audio_streams:
- #if self.stopped:
- # break
-
audio_file = os.path.join(self.temp_dir, 'audio', audio['language'], '0.ts')
if not os.path.exists(audio_file):
if self.download_audio(audio):
@@ -319,9 +356,6 @@ def download_all(self, video_url: str, audio_streams: List[Dict], sub_streams: L
return_stopped = True
for sub in sub_streams:
- #if self.stopped:
- # break
-
sub_file = os.path.join(self.temp_dir, 'subs', f"{sub['language']}.vtt")
if not os.path.exists(sub_file):
if self.download_subtitle(sub):
@@ -418,7 +452,7 @@ def start(self) -> Dict[str, Any]:
- is_master: Whether the M3U8 was a master playlist
Or raises an exception if there's an error
"""
- console.print(f"[cyan]You can safely stop the download with [bold]Ctrl+c[bold] [cyan] \n")
+ console.print("[cyan]You can safely stop the download with [bold]Ctrl+c[bold] [cyan] \n")
if TELEGRAM_BOT:
bot = get_bot_instance()
@@ -435,7 +469,7 @@ def start(self) -> Dict[str, Any]:
'stopped': False
}
if TELEGRAM_BOT:
- bot.send_message(f"Contenuto già scaricato!", None)
+ bot.send_message("Contenuto già scaricato!", None)
return response
self.path_manager.setup_directories()
@@ -444,6 +478,10 @@ def start(self) -> Dict[str, Any]:
self.m3u8_manager.parse()
self.m3u8_manager.select_streams()
self.m3u8_manager.log_selection()
+ try:
+ JOB_MANAGER.update_progress(5.0)
+ except Exception:
+ pass
self.download_manager = DownloadManager(
temp_dir=self.path_manager.temp_dir,
@@ -466,10 +504,18 @@ def start(self) -> Dict[str, Any]:
)
final_file = self.merge_manager.merge()
+ try:
+ # during merge set near-complete progress
+ JOB_MANAGER.update_progress(95.0)
+ except Exception:
+ pass
self.path_manager.move_final_file(final_file)
self._print_summary()
self.path_manager.cleanup()
-
+ try:
+ JOB_MANAGER.update_progress(100.0)
+ except Exception:
+ pass
return {
'path': self.path_manager.output_path,
'url': self.m3u8_url,
@@ -527,4 +573,4 @@ def _print_summary(self):
panel_content,
title=f"{os.path.basename(self.path_manager.output_path.replace('.mp4', ''))}",
border_style="green"
- ))
\ No newline at end of file
+ ))
diff --git a/StreamingCommunity/Lib/Downloader/HLS/segments.py b/StreamingCommunity/Lib/Downloader/HLS/segments.py
index 0fc77e1d..18b47e97 100644
--- a/StreamingCommunity/Lib/Downloader/HLS/segments.py
+++ b/StreamingCommunity/Lib/Downloader/HLS/segments.py
@@ -23,6 +23,7 @@
# Internal utilities
from StreamingCommunity.Util.color import Colors
from StreamingCommunity.Util.headers import get_userAgent
+from StreamingCommunity.Util.http_client import create_client
from StreamingCommunity.Util.config_json import config_manager
@@ -198,14 +199,7 @@ def interrupt_handler(signum, frame):
print("Signal handler must be set in the main thread")
def _get_http_client(self):
- client_params = {
- 'headers': {'User-Agent': get_userAgent()},
- 'timeout': SEGMENT_MAX_TIMEOUT,
- 'follow_redirects': True,
- 'http2': False,
- 'verify': REQUEST_VERIFY
- }
- return httpx.Client(**client_params)
+ return create_client(headers={'User-Agent': get_userAgent()}, follow_redirects=True)
def download_segment(self, ts_url: str, index: int, progress_bar: tqdm, backoff_factor: float = 1.1) -> None:
"""
@@ -243,8 +237,31 @@ def download_segment(self, ts_url: str, index: int, progress_bar: tqdm, backoff_
self.class_ts_estimator.update_progress_bar(content_size, progress_bar)
self.queue.put((index, segment_content))
- self.downloaded_segments.add(index)
+ self.downloaded_segments.add(index)
progress_bar.update(1)
+
+ # If part of an aggregated download (HLS with audio tracks), report overall job progress
+ try:
+ parent = getattr(self, 'progress_parent', None)
+ if parent is not None and getattr(parent, 'total_segments_all', 0) > 0:
+ with parent._progress_lock:
+ parent.segments_downloaded += 1
+ downloaded = parent.segments_downloaded
+ total = parent.total_segments_all
+ # Map segments progress to overall percentage: parsing 5%, download 85%, merge 10%
+ percent = 5.0 + (downloaded / float(total)) * 85.0
+ # Guard bounds
+ if percent < 0:
+ percent = 0.0
+ if percent > 99.0:
+ percent = 99.0
+ try:
+ from StreamingCommunity.Api.http_api import JOB_MANAGER
+ JOB_MANAGER.update_progress(percent)
+ except Exception:
+ pass
+ except Exception:
+ pass
return
except Exception as e:
@@ -264,7 +281,8 @@ def download_segment(self, ts_url: str, index: int, progress_bar: tqdm, backoff_
with self.active_retries_lock:
self.active_retries += 1
- sleep_time = backoff_factor * (2 ** attempt)
+ #sleep_time = backoff_factor * (2 ** attempt)
+ sleep_time = backoff_factor * (attempt + 1)
logging.info(f"Retrying segment {index} in {sleep_time} seconds...")
time.sleep(sleep_time)
diff --git a/StreamingCommunity/Lib/Downloader/MP4/downloader.py b/StreamingCommunity/Lib/Downloader/MP4/downloader.py
index 85377d46..c96a676b 100644
--- a/StreamingCommunity/Lib/Downloader/MP4/downloader.py
+++ b/StreamingCommunity/Lib/Downloader/MP4/downloader.py
@@ -10,7 +10,6 @@
# External libraries
-import httpx
from tqdm import tqdm
from rich.console import Console
from rich.prompt import Prompt
@@ -19,10 +18,12 @@
# Internal utilities
from StreamingCommunity.Util.headers import get_userAgent
+from StreamingCommunity.Util.http_client import create_client
from StreamingCommunity.Util.color import Colors
from StreamingCommunity.Util.config_json import config_manager
from StreamingCommunity.Util.os import internet_manager, os_manager
from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance
+from StreamingCommunity.Api.http_api import JOB_MANAGER
# Logic class
@@ -83,7 +84,7 @@ def MP4_downloader(url: str, path: str, referer: str = None, headers_: dict = No
if os.path.exists(path):
console.log("[red]Output file already exists.")
if TELEGRAM_BOT:
- bot.send_message(f"Contenuto già scaricato!", None)
+ bot.send_message("Contenuto già scaricato!", None)
return None, False
if not (url.lower().startswith('http://') or url.lower().startswith('https://')):
@@ -110,7 +111,8 @@ def MP4_downloader(url: str, path: str, referer: str = None, headers_: dict = No
os.makedirs(os.path.dirname(path), exist_ok=True)
try:
- with httpx.Client(verify=REQUEST_VERIFY) as client:
+ # Use unified HTTP client (verify/timeout/proxy from config)
+ with create_client() as client:
with client.stream("GET", url, headers=headers) as response:
response.raise_for_status()
total = int(response.headers.get('content-length', 0))
@@ -147,6 +149,12 @@ def MP4_downloader(url: str, path: str, referer: str = None, headers_: dict = No
size = file.write(chunk)
downloaded += size
bar.update(size)
+ try:
+ # report overall progress for this job (single-phase download)
+ percent = (downloaded / float(total)) * 100.0 if total > 0 else 0.0
+ JOB_MANAGER.update_progress(percent)
+ except Exception:
+ pass
except KeyboardInterrupt:
if not interrupt_handler.force_quit:
diff --git a/StreamingCommunity/Lib/Downloader/TOR/downloader.py b/StreamingCommunity/Lib/Downloader/TOR/downloader.py
index c3ed4cec..ed1c23ef 100644
--- a/StreamingCommunity/Lib/Downloader/TOR/downloader.py
+++ b/StreamingCommunity/Lib/Downloader/TOR/downloader.py
@@ -19,6 +19,7 @@
from StreamingCommunity.Util.color import Colors
from StreamingCommunity.Util.os import internet_manager
from StreamingCommunity.Util.config_json import config_manager
+from StreamingCommunity.Api.http_api import JOB_MANAGER
# Configuration
@@ -229,7 +230,7 @@ def _check_torrent_viability(self):
torrent_info.num_seeds == 0 and
torrent_info.state in ('stalledDL', 'missingFiles', 'error')):
- self.console.print(f"[bold red]Torrent not downloadable. No seeds or peers available. Removing...[/bold red]")
+ self.console.print("[bold red]Torrent not downloadable. No seeds or peers available. Removing...[/bold red]")
self._remove_torrent(self.latest_torrent_hash)
self.latest_torrent_hash = None
return False
@@ -250,7 +251,7 @@ def _remove_torrent(self, torrent_hash, delete_files=True):
"""
try:
self.qb.torrents_delete(delete_files=delete_files, torrent_hashes=torrent_hash)
- self.console.print(f"[yellow]Torrent removed from client[/yellow]")
+ self.console.print("[yellow]Torrent removed from client[/yellow]")
except Exception as e:
logging.error(f"Error removing torrent: {str(e)}")
@@ -353,13 +354,15 @@ def start_download(self):
# Update progress
progress = torrent_info.progress * 100
pbar.n = progress
+ try:
+ JOB_MANAGER.update_progress(progress)
+ except Exception:
+ pass
# Get download statistics
download_speed = torrent_info.dlspeed
- upload_speed = torrent_info.upspeed
total_size = torrent_info.size
downloaded_size = torrent_info.downloaded
- eta = torrent_info.eta # eta in seconds
# Format sizes and speeds using the existing functions without modification
downloaded_size_str = internet_manager.format_file_size(downloaded_size)
@@ -465,5 +468,5 @@ def cleanup(self):
try:
self.qb.auth_log_out()
- except:
- pass
\ No newline at end of file
+ except Exception:
+ pass
diff --git a/StreamingCommunity/Lib/Downloader/__init__.py b/StreamingCommunity/Lib/Downloader/__init__.py
index 2cadf740..c851fd6f 100644
--- a/StreamingCommunity/Lib/Downloader/__init__.py
+++ b/StreamingCommunity/Lib/Downloader/__init__.py
@@ -2,4 +2,12 @@
from .HLS.downloader import HLS_Downloader
from .MP4.downloader import MP4_downloader
-from .TOR.downloader import TOR_downloader
\ No newline at end of file
+from .TOR.downloader import TOR_downloader
+from .DASH.downloader import DASH_Downloader
+
+__all__ = [
+ "HLS_Downloader",
+ "MP4_downloader",
+ "TOR_downloader",
+ "DASH_Downloader"
+]
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/FFmpeg/__init__.py b/StreamingCommunity/Lib/FFmpeg/__init__.py
index ec2ad67a..15e13bd7 100644
--- a/StreamingCommunity/Lib/FFmpeg/__init__.py
+++ b/StreamingCommunity/Lib/FFmpeg/__init__.py
@@ -1,4 +1,13 @@
# 18.04.24
from .command import join_video, join_audios, join_subtitle
-from .util import print_duration_table, get_video_duration
\ No newline at end of file
+from .util import print_duration_table, get_video_duration
+
+
+__all__ = [
+ "join_video",
+ "join_audios",
+ "join_subtitle",
+ "print_duration_table",
+ "get_video_duration",
+]
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/FFmpeg/command.py b/StreamingCommunity/Lib/FFmpeg/command.py
index 118ce70b..d469f19f 100644
--- a/StreamingCommunity/Lib/FFmpeg/command.py
+++ b/StreamingCommunity/Lib/FFmpeg/command.py
@@ -1,6 +1,5 @@
# 31.01.24
-import sys
import logging
import subprocess
from typing import List, Dict, Tuple, Optional
@@ -110,13 +109,12 @@ def join_video(video_path: str, out_path: str, codec: M3U8_Codec = None):
if need_to_force_to_ts(video_path):
#console.log("[red]Force input file to 'mpegts'.")
ffmpeg_cmd.extend(['-f', 'mpegts'])
- vcodec = "libx264"
# Insert input video path
ffmpeg_cmd.extend(['-i', video_path])
# Add output Parameters
- if USE_CODEC and codec != None:
+ if USE_CODEC and codec is not None:
if USE_VCODEC:
if codec.video_codec_name:
if not USE_GPU:
@@ -162,7 +160,7 @@ def join_video(video_path: str, out_path: str, codec: M3U8_Codec = None):
print()
else:
- console.log(f"[purple]FFmpeg [white][[cyan]Join video[white]] ...")
+ console.log("[purple]FFmpeg [white][[cyan]Join video[white]] ...")
with suppress_output():
capture_ffmpeg_real_time(ffmpeg_cmd, "[cyan]Join video")
print()
@@ -258,7 +256,7 @@ def join_audios(video_path: str, audio_tracks: List[Dict[str, str]], out_path: s
print()
else:
- console.log(f"[purple]FFmpeg [white][[cyan]Join audio[white]] ...")
+ console.log("[purple]FFmpeg [white][[cyan]Join audio[white]] ...")
with suppress_output():
capture_ffmpeg_real_time(ffmpeg_cmd, "[cyan]Join audio")
print()
@@ -313,7 +311,7 @@ def join_subtitle(video_path: str, subtitles_list: List[Dict[str, str]], out_pat
print()
else:
- console.log(f"[purple]FFmpeg [white][[cyan]Join subtitle[white]] ...")
+ console.log("[purple]FFmpeg [white][[cyan]Join subtitle[white]] ...")
with suppress_output():
capture_ffmpeg_real_time(ffmpeg_cmd, "[cyan]Join subtitle")
print()
diff --git a/StreamingCommunity/Lib/FFmpeg/util.py b/StreamingCommunity/Lib/FFmpeg/util.py
index 1fb08516..b49bb712 100644
--- a/StreamingCommunity/Lib/FFmpeg/util.py
+++ b/StreamingCommunity/Lib/FFmpeg/util.py
@@ -76,7 +76,7 @@ def get_video_duration(file_path: str) -> float:
try:
return float(probe_result['format']['duration'])
- except:
+ except Exception:
return 1
except Exception as e:
diff --git a/StreamingCommunity/Lib/M3U8/__init__.py b/StreamingCommunity/Lib/M3U8/__init__.py
index 98889721..31867e17 100644
--- a/StreamingCommunity/Lib/M3U8/__init__.py
+++ b/StreamingCommunity/Lib/M3U8/__init__.py
@@ -3,4 +3,12 @@
from .decryptor import M3U8_Decryption
from .estimator import M3U8_Ts_Estimator
from .parser import M3U8_Parser, M3U8_Codec
-from .url_fixer import M3U8_UrlFix
\ No newline at end of file
+from .url_fixer import M3U8_UrlFix
+
+__all__ = [
+ "M3U8_Decryption",
+ "M3U8_Ts_Estimator",
+ "M3U8_Parser",
+ "M3U8_Codec",
+ "M3U8_UrlFix"
+]
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/M3U8/decryptor.py b/StreamingCommunity/Lib/M3U8/decryptor.py
index 69819101..bd83787a 100644
--- a/StreamingCommunity/Lib/M3U8/decryptor.py
+++ b/StreamingCommunity/Lib/M3U8/decryptor.py
@@ -1,7 +1,6 @@
# 03.04.24
import sys
-import time
import logging
import importlib.util
@@ -10,18 +9,23 @@
from rich.console import Console
+# Cryptodome imports
+from Cryptodome.Cipher import AES
+from Cryptodome.Util.Padding import unpad
+
+
# Check if Cryptodome module is installed
console = Console()
crypto_spec = importlib.util.find_spec("Cryptodome")
crypto_installed = crypto_spec is not None
+
if not crypto_installed:
console.log("[red]pycryptodomex non è installato. Per favore installalo. Leggi readme.md [Requirement].")
sys.exit(0)
-logging.info("[cyan]Decrypy use: Cryptodomex")
-from Cryptodome.Cipher import AES
-from Cryptodome.Util.Padding import unpad
+
+logging.info("[cyan]Decryption use: Cryptodomex")
diff --git a/StreamingCommunity/Lib/M3U8/estimator.py b/StreamingCommunity/Lib/M3U8/estimator.py
index cd5d2781..993f4f56 100644
--- a/StreamingCommunity/Lib/M3U8/estimator.py
+++ b/StreamingCommunity/Lib/M3U8/estimator.py
@@ -121,12 +121,6 @@ def update_progress_bar(self, total_downloaded: int, progress_counter: tqdm) ->
number_file_total_size = file_total_size.split(' ')[0]
units_file_total_size = file_total_size.split(' ')[1]
- # Reduce lock contention by acquiring data with minimal synchronization
- retry_count = 0
- if self.segments_instance:
- with self.segments_instance.active_retries_lock:
- retry_count = self.segments_instance.active_retries
-
# Get speed data outside of any locks
speed_data = ["N/A", ""]
with self.lock:
diff --git a/StreamingCommunity/Lib/M3U8/parser.py b/StreamingCommunity/Lib/M3U8/parser.py
index 0a083e84..6f2ff319 100644
--- a/StreamingCommunity/Lib/M3U8/parser.py
+++ b/StreamingCommunity/Lib/M3U8/parser.py
@@ -485,7 +485,7 @@ def __parse_video_info__(self, m3u8_obj) -> None:
try:
for playlist in m3u8_obj.playlists:
- there_is_codec = not playlist.stream_info.codecs is None
+ there_is_codec = playlist.stream_info.codecs is not None
logging.info(f"There is coded: {there_is_codec}")
if there_is_codec:
diff --git a/StreamingCommunity/Lib/M3U8/url_fixer.py b/StreamingCommunity/Lib/M3U8/url_fixer.py
index 2297c5e9..2b48ace7 100644
--- a/StreamingCommunity/Lib/M3U8/url_fixer.py
+++ b/StreamingCommunity/Lib/M3U8/url_fixer.py
@@ -33,7 +33,7 @@ def generate_full_url(self, url_resource: str) -> str:
Returns:
str: The full URL for the specified resource.
"""
- if self.url_playlist == None:
+ if self.url_playlist is None:
logging.error("[M3U8_UrlFix] Cant generate full url, playlist not present")
raise
diff --git a/StreamingCommunity/Lib/TMBD/__init__.py b/StreamingCommunity/Lib/TMBD/__init__.py
index c9c65687..f778fff1 100644
--- a/StreamingCommunity/Lib/TMBD/__init__.py
+++ b/StreamingCommunity/Lib/TMBD/__init__.py
@@ -1,4 +1,9 @@
# 17.09.24
from .tmdb import tmdb
-from .obj_tmbd import Json_film
\ No newline at end of file
+from .obj_tmbd import Json_film
+
+__all__ = [
+ "tmdb",
+ "Json_film"
+]
\ No newline at end of file
diff --git a/StreamingCommunity/TelegramHelp/config.json b/StreamingCommunity/TelegramHelp/config.json
index f23f8b66..641a932a 100644
--- a/StreamingCommunity/TelegramHelp/config.json
+++ b/StreamingCommunity/TelegramHelp/config.json
@@ -2,10 +2,8 @@
"DEFAULT": {
"debug": false,
"show_message": true,
- "clean_console": true,
"show_trending": true,
"use_api": true,
- "not_close": false,
"telegram_bot": true,
"download_site_data": true,
"validate_github_config": true
@@ -45,9 +43,7 @@
"use_acodec": true,
"use_bitrate": true,
"use_gpu": false,
- "default_preset": "ultrafast"
- },
- "M3U8_PARSER": {
+ "default_preset": "ultrafast",
"force_resolution": "Best"
},
"REQUESTS": {
diff --git a/StreamingCommunity/TelegramHelp/telegram_bot.py b/StreamingCommunity/TelegramHelp/telegram_bot.py
index 85eb4c91..49aded52 100644
--- a/StreamingCommunity/TelegramHelp/telegram_bot.py
+++ b/StreamingCommunity/TelegramHelp/telegram_bot.py
@@ -9,7 +9,6 @@
import json
import threading
import subprocess
-import threading
from typing import Optional
# External libraries
@@ -305,7 +304,7 @@ def is_authorized(self, user_id):
def handle_get_id(self, message):
if not self.is_authorized(message.from_user.id):
- print(f" Non sei autorizzato.")
+ print(" Non sei autorizzato.")
self.bot.send_message(message.chat.id, " Non sei autorizzato.")
return
@@ -384,7 +383,7 @@ def handle_start_script(self, message):
def handle_list_scripts(self, message):
if not self.is_authorized(message.from_user.id):
- print(f" Non sei autorizzato.")
+ print(" Non sei autorizzato.")
self.bot.send_message(message.chat.id, " Non sei autorizzato.")
return
@@ -395,7 +394,7 @@ def handle_list_scripts(self, message):
scripts_data = []
if not scripts_data:
- print(f" Nessuno script registrato.")
+ print(" Nessuno script registrato.")
self.bot.send_message(message.chat.id, " Nessuno script registrato.")
return
@@ -437,7 +436,7 @@ def handle_list_scripts(self, message):
def handle_stop_script(self, message):
if not self.is_authorized(message.from_user.id):
- print(f" Non sei autorizzato.")
+ print(" Non sei autorizzato.")
self.bot.send_message(message.chat.id, " Non sei autorizzato.")
return
@@ -452,7 +451,7 @@ def handle_stop_script(self, message):
running_scripts = [s for s in scripts_data if s["status"] == "running"]
if not running_scripts:
- print(f" Nessuno script attivo da fermare.")
+ print(" Nessuno script attivo da fermare.")
self.bot.send_message(
message.chat.id, " Nessuno script attivo da fermare."
)
@@ -526,7 +525,7 @@ def handle_response(self, message):
def handle_screen_status(self, message):
command_parts = message.text.split()
if len(command_parts) < 2:
- print(f" ID mancante nel comando. Usa: /screen ")
+ print(" ID mancante nel comando. Usa: /screen ")
self.bot.send_message(
message.chat.id, " ID mancante nel comando. Usa: /screen "
)
@@ -557,9 +556,9 @@ def handle_screen_status(self, message):
return
if not os.path.exists(temp_file):
- print(f" Impossibile catturare l'output della screen.")
+ print(" Impossibile catturare l'output della screen.")
self.bot.send_message(
- message.chat.id, f" Impossibile catturare l'output della screen."
+ message.chat.id, " Impossibile catturare l'output della screen."
)
return
@@ -669,7 +668,7 @@ def ask(self, type, prompt_message, choices, timeout=60):
return response
time.sleep(1)
- print(f" Timeout: nessuna risposta ricevuta.")
+ print(" Timeout: nessuna risposta ricevuta.")
for chat_id in self.authorized_users: # Manda a tutti gli ID autorizzati
self.bot.send_message(chat_id, " Timeout: nessuna risposta ricevuta.")
self.request_manager.clear_file()
diff --git a/StreamingCommunity/Upload/version.py b/StreamingCommunity/Upload/version.py
index 24911c5d..8b896956 100644
--- a/StreamingCommunity/Upload/version.py
+++ b/StreamingCommunity/Upload/version.py
@@ -1,5 +1,5 @@
__title__ = 'StreamingCommunity'
-__version__ = '3.2.7'
+__version__ = '3.2.8'
__author__ = 'Arrowar'
__description__ = 'A command-line program to download film'
__copyright__ = 'Copyright 2025'
diff --git a/StreamingCommunity/Util/config_json.py b/StreamingCommunity/Util/config_json.py
index 23958455..d1a8fb41 100644
--- a/StreamingCommunity/Util/config_json.py
+++ b/StreamingCommunity/Util/config_json.py
@@ -18,8 +18,6 @@
# Variable
console = Console()
-download_site_data = True
-validate_github_config = True
class ConfigManager:
@@ -54,8 +52,7 @@ def __init__(self, file_name: str = 'config.json') -> None:
self.configSite = {}
self.cache = {}
- self.use_api = False
- self.download_site_data = False
+ self.fetch_domain_online = True
self.validate_github_config = False
console.print(f"[bold cyan]Initializing ConfigManager:[/bold cyan] [green]{self.file_path}[/green]")
@@ -67,7 +64,7 @@ def load_config(self) -> None:
"""Load the configuration and initialize all settings."""
if not os.path.exists(self.file_path):
console.print(f"[bold red]WARNING: Configuration file not found:[/bold red] {self.file_path}")
- console.print(f"[bold yellow]Attempting to download from reference repository...[/bold yellow]")
+ console.print("[bold yellow]Attempting to download from reference repository...[/bold yellow]")
self._download_reference_config()
# Load the configuration file
@@ -85,11 +82,8 @@ def load_config(self) -> None:
else:
console.print("[bold yellow]GitHub validation disabled[/bold yellow]")
- # Load site data if requested
- if self.download_site_data:
- self._load_site_data()
- else:
- console.print("[bold yellow]Site data download disabled[/bold yellow]")
+ # Load site data based on fetch_domain_online setting
+ self._load_site_data()
except json.JSONDecodeError as e:
console.print(f"[bold red]Error parsing JSON:[/bold red] {str(e)}")
@@ -119,18 +113,11 @@ def _update_settings_from_config(self) -> None:
"""Update internal settings from loaded configurations."""
default_section = self.config.get('DEFAULT', {})
- # Save local values in temporary variables
- temp_use_api = default_section.get('use_api', False)
- temp_download_site_data = default_section.get('download_site_data', False)
- temp_validate_github_config = default_section.get('validate_github_config', False)
-
- # Update settings with found values (False by default)
- self.use_api = temp_use_api
- self.download_site_data = temp_download_site_data
- self.validate_github_config = temp_validate_github_config
+ # Get fetch_domain_online setting (True by default)
+ self.fetch_domain_online = default_section.get('fetch_domain_online', True)
+ self.validate_github_config = default_section.get('validate_github_config', False)
- console.print(f"[bold cyan]API Usage:[/bold cyan] [{'green' if self.use_api else 'yellow'}]{self.use_api}[/{'green' if self.use_api else 'yellow'}]")
- console.print(f"[bold cyan]Site data download:[/bold cyan] [{'green' if self.download_site_data else 'yellow'}]{self.download_site_data}[/{'green' if self.download_site_data else 'yellow'}]")
+ console.print(f"[bold cyan]Fetch domains online:[/bold cyan] [{'green' if self.fetch_domain_online else 'yellow'}]{self.fetch_domain_online}[/{'green' if self.fetch_domain_online else 'yellow'}]")
console.print(f"[bold cyan]GitHub configuration validation:[/bold cyan] [{'green' if self.validate_github_config else 'yellow'}]{self.validate_github_config}[/{'green' if self.validate_github_config else 'yellow'}]")
def _download_reference_config(self) -> None:
@@ -159,7 +146,7 @@ def _validate_and_update_config(self) -> None:
"""Validate the local configuration against the reference one and update missing keys."""
try:
# Download the reference configuration
- console.print(f"[bold cyan]Validating configuration with GitHub...[/bold cyan]")
+ console.print("[bold cyan]Validating configuration with GitHub...[/bold cyan]")
response = requests.get(self.reference_config_url, timeout=8, headers={'User-Agent': get_userAgent()})
if not response.ok:
@@ -242,11 +229,9 @@ def _deep_merge_configs(self, local_config: dict, reference_config: dict) -> dic
# Make sure control keys maintain local values
merged_section = self._deep_merge_configs(merged[key], value)
- # Preserve local values for the three critical settings
- if 'use_api' in merged[key]:
- merged_section['use_api'] = merged[key]['use_api']
- if 'download_site_data' in merged[key]:
- merged_section['download_site_data'] = merged[key]['download_site_data']
+ # Preserve local values for critical settings
+ if 'fetch_domain_online' in merged[key]:
+ merged_section['fetch_domain_online'] = merged[key]['fetch_domain_online']
if 'validate_github_config' in merged[key]:
merged_section['validate_github_config'] = merged[key]['validate_github_config']
@@ -259,28 +244,31 @@ def _deep_merge_configs(self, local_config: dict, reference_config: dict) -> dic
return merged
def _load_site_data(self) -> None:
- """Load site data from API or local file."""
- if self.use_api:
- self._load_site_data_from_api()
+ """Load site data based on fetch_domain_online setting."""
+ if self.fetch_domain_online:
+ self._load_site_data_online()
else:
self._load_site_data_from_file()
- def _load_site_data_from_api(self) -> None:
- """Load site data from GitHub."""
+ def _load_site_data_online(self) -> None:
+ """Load site data from GitHub and update local domains.json file."""
domains_github_url = "https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/.github/.domain/domains.json"
headers = {
"User-Agent": get_userAgent()
}
try:
- console.print("[bold cyan]Retrieving site data from GitHub:[/bold cyan]")
+ console.print("[bold cyan]Fetching domains from GitHub:[/bold cyan]")
response = requests.get(domains_github_url, timeout=8, headers=headers)
if response.ok:
self.configSite = response.json()
+ # Determine which file to save to
+ self._save_domains_to_appropriate_location()
+
site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
- console.print(f"[bold green]Site data loaded from GitHub:[/bold green] {site_count} streaming services found.")
+ console.print(f"[bold green]Domains loaded from GitHub:[/bold green] {site_count} streaming services found.")
else:
console.print(f"[bold red]GitHub request failed:[/bold red] HTTP {response.status_code}, {response.text[:100]}")
@@ -294,42 +282,129 @@ def _load_site_data_from_api(self) -> None:
console.print(f"[bold red]GitHub connection error:[/bold red] {str(e)}")
self._handle_site_data_fallback()
+ def _save_domains_to_appropriate_location(self) -> None:
+ """Save domains to the appropriate location based on existing files."""
+ if getattr(sys, 'frozen', False):
+ # If the application is frozen (e.g., PyInstaller)
+ base_path = os.path.dirname(sys.executable)
+ else:
+ # Use the current working directory where the script is executed
+ base_path = os.getcwd()
+
+ # Check for GitHub structure first
+ github_domains_path = os.path.join(base_path, '.github', '.domain', 'domains.json')
+
+ try:
+ if os.path.exists(github_domains_path):
+
+ # Update existing GitHub structure file
+ with open(github_domains_path, 'w', encoding='utf-8') as f:
+ json.dump(self.configSite, f, indent=4, ensure_ascii=False)
+ console.print(f"[bold green]Domains updated in GitHub structure:[/bold green] {github_domains_path}")
+
+ elif not os.path.exists(self.domains_path):
+
+ # Save to root only if it doesn't exist and GitHub structure doesn't exist
+ with open(self.domains_path, 'w', encoding='utf-8') as f:
+ json.dump(self.configSite, f, indent=4, ensure_ascii=False)
+ console.print(f"[bold green]Domains saved to:[/bold green] {self.domains_path}")
+
+ else:
+
+ # Root file exists, don't overwrite it
+ console.print(f"[bold yellow]Local domains.json already exists, not overwriting:[/bold yellow] {self.domains_path}")
+ console.print("[bold yellow]Tip: Delete the file if you want to recreate it from GitHub[/bold yellow]")
+
+ except Exception as save_error:
+ console.print(f"[bold yellow]Warning: Could not save domains to file:[/bold yellow] {str(save_error)}")
+
+ # Try to save to root as fallback only if it doesn't exist
+ if not os.path.exists(self.domains_path):
+ try:
+ with open(self.domains_path, 'w', encoding='utf-8') as f:
+ json.dump(self.configSite, f, indent=4, ensure_ascii=False)
+ console.print(f"[bold green]Domains saved to fallback location:[/bold green] {self.domains_path}")
+ except Exception as fallback_error:
+ console.print(f"[bold red]Failed to save to fallback location:[/bold red] {str(fallback_error)}")
+
def _load_site_data_from_file(self) -> None:
- """Load site data from local file."""
+ """Load site data from local domains.json file."""
try:
- if os.path.exists(self.domains_path):
- console.print(f"[bold cyan]Reading domains from:[/bold cyan] {self.domains_path}")
- with open(self.domains_path, 'r') as f:
+ # Determine the base path
+ if getattr(sys, 'frozen', False):
+
+ # If the application is frozen (e.g., PyInstaller)
+ base_path = os.path.dirname(sys.executable)
+ else:
+
+ # Use the current working directory where the script is executed
+ base_path = os.getcwd()
+
+ # Check for GitHub structure first
+ github_domains_path = os.path.join(base_path, '.github', '.domain', 'domains.json')
+
+ if os.path.exists(github_domains_path):
+ console.print(f"[bold cyan]Reading domains from GitHub structure:[/bold cyan] {github_domains_path}")
+ with open(github_domains_path, 'r', encoding='utf-8') as f:
+ self.configSite = json.load(f)
+
+ site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
+ console.print(f"[bold green]Domains loaded from GitHub structure:[/bold green] {site_count} streaming services")
+
+ elif os.path.exists(self.domains_path):
+ console.print(f"[bold cyan]Reading domains from root:[/bold cyan] {self.domains_path}")
+ with open(self.domains_path, 'r', encoding='utf-8') as f:
self.configSite = json.load(f)
site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
- console.print(f"[bold green]Site data loaded from file:[/bold green] {site_count} streaming services")
+ console.print(f"[bold green]Domains loaded from root file:[/bold green] {site_count} streaming services")
else:
- error_msg = f"domains.json not found at {self.domains_path} and API usage is disabled"
+ error_msg = f"domains.json not found in GitHub structure ({github_domains_path}) or root ({self.domains_path}) and fetch_domain_online is disabled"
console.print(f"[bold red]Configuration error:[/bold red] {error_msg}")
- self._handle_site_data_fallback()
+ console.print("[bold yellow]Tip: Set 'fetch_domain_online' to true to download domains from GitHub[/bold yellow]")
+ self.configSite = {}
except Exception as e:
- console.print(f"[bold red]Domain file error:[/bold red] {str(e)}")
- self._handle_site_data_fallback()
+ console.print(f"[bold red]Local domain file error:[/bold red] {str(e)}")
+ self.configSite = {}
def _handle_site_data_fallback(self) -> None:
"""Handle site data fallback in case of error."""
- if self.use_api and os.path.exists(self.domains_path):
- console.print("[bold yellow]Attempting fallback to local domains.json file...[/bold yellow]")
-
+ # Determine the base path
+ if getattr(sys, 'frozen', False):
+
+ # If the application is frozen (e.g., PyInstaller)
+ base_path = os.path.dirname(sys.executable)
+ else:
+ # Use the current working directory where the script is executed
+ base_path = os.getcwd()
+
+ # Check for GitHub structure first
+ github_domains_path = os.path.join(base_path, '.github', '.domain', 'domains.json')
+
+ if os.path.exists(github_domains_path):
+ console.print("[bold yellow]Attempting fallback to GitHub structure domains.json file...[/bold yellow]")
try:
- with open(self.domains_path, 'r') as f:
+ with open(github_domains_path, 'r', encoding='utf-8') as f:
self.configSite = json.load(f)
- console.print("[bold green]Fallback to local data successful[/bold green]")
+ console.print("[bold green]Fallback to GitHub structure successful[/bold green]")
+ return
except Exception as fallback_error:
- console.print(f"[bold red]Fallback also failed:[/bold red] {str(fallback_error)}")
- self.configSite = {}
- else:
-
- # Initialize with an empty dictionary if there are no alternatives
- self.configSite = {}
+ console.print(f"[bold red]GitHub structure fallback failed:[/bold red] {str(fallback_error)}")
+
+ if os.path.exists(self.domains_path):
+ console.print("[bold yellow]Attempting fallback to root domains.json file...[/bold yellow]")
+ try:
+ with open(self.domains_path, 'r', encoding='utf-8') as f:
+ self.configSite = json.load(f)
+ console.print("[bold green]Fallback to root domains successful[/bold green]")
+ return
+ except Exception as fallback_error:
+ console.print(f"[bold red]Root domains fallback failed:[/bold red] {str(fallback_error)}")
+
+ console.print("[bold red]No local domains.json file available for fallback[/bold red]")
+ self.configSite = {}
def download_file(self, url: str, filename: str) -> None:
"""
@@ -412,23 +487,28 @@ def _convert_to_data_type(self, value: Any, data_type: type) -> Any:
Any: Converted value
"""
try:
- if data_type == int:
+ if data_type is int:
return int(value)
- elif data_type == float:
+
+ elif data_type is float:
return float(value)
- elif data_type == bool:
+
+ elif data_type is bool:
if isinstance(value, str):
return value.lower() in ("yes", "true", "t", "1")
return bool(value)
- elif data_type == list:
+
+ elif data_type is list:
if isinstance(value, list):
return value
if isinstance(value, str):
return [item.strip() for item in value.split(',')]
return [value]
- elif data_type == dict:
+
+ elif data_type is dict:
if isinstance(value, dict):
return value
+
raise ValueError(f"Cannot convert {type(value).__name__} to dict")
else:
return value
diff --git a/StreamingCommunity/Util/http_client.py b/StreamingCommunity/Util/http_client.py
new file mode 100644
index 00000000..3aa31b5b
--- /dev/null
+++ b/StreamingCommunity/Util/http_client.py
@@ -0,0 +1,201 @@
+# 09.08.25
+from __future__ import annotations
+
+import time
+import random
+from typing import Any, Dict, Optional, Union
+
+
+# External library
+import httpx
+
+
+# Logic class
+from StreamingCommunity.Util.config_json import config_manager
+from StreamingCommunity.Util.headers import get_userAgent
+
+
+# Defaults from config
+def _get_timeout() -> int:
+ try:
+ return int(config_manager.get_int("REQUESTS", "timeout"))
+ except Exception:
+ return 20
+
+
+def _get_max_retry() -> int:
+ try:
+ return int(config_manager.get_int("REQUESTS", "max_retry"))
+ except Exception:
+ return 3
+
+
+def _get_verify() -> bool:
+ try:
+ return bool(config_manager.get_bool("REQUESTS", "verify"))
+ except Exception:
+ return True
+
+
+def _get_proxies() -> Optional[Dict[str, str]]:
+ """Return proxies dict if present in config and non-empty, else None."""
+ try:
+ proxies = config_manager.get_dict("REQUESTS", "proxy")
+ if not isinstance(proxies, dict):
+ return None
+ # Normalize empty strings to None (httpx ignores None)
+ cleaned: Dict[str, str] = {}
+ for scheme, url in proxies.items():
+ if isinstance(url, str) and url.strip():
+ cleaned[scheme] = url.strip()
+ return cleaned or None
+ except Exception:
+ return None
+
+
+def _default_headers(extra: Optional[Dict[str, str]] = None) -> Dict[str, str]:
+ headers = {"User-Agent": get_userAgent()}
+ if extra:
+ headers.update(extra)
+ return headers
+
+
+def create_client(
+ *,
+ headers: Optional[Dict[str, str]] = None,
+ cookies: Optional[Dict[str, str]] = None,
+ timeout: Optional[Union[int, float]] = None,
+ verify: Optional[bool] = None,
+ proxies: Optional[Dict[str, str]] = None,
+ http2: bool = False,
+ follow_redirects: bool = True,
+) -> httpx.Client:
+ """Factory for a configured httpx.Client."""
+ return httpx.Client(
+ headers=_default_headers(headers),
+ cookies=cookies,
+ timeout=timeout if timeout is not None else _get_timeout(),
+ verify=_get_verify() if verify is None else verify,
+ follow_redirects=follow_redirects,
+ http2=http2,
+ proxy=proxies if proxies is not None else _get_proxies(),
+ )
+
+
+def create_async_client(
+ *,
+ headers: Optional[Dict[str, str]] = None,
+ cookies: Optional[Dict[str, str]] = None,
+ timeout: Optional[Union[int, float]] = None,
+ verify: Optional[bool] = None,
+ proxies: Optional[Dict[str, str]] = None,
+ http2: bool = False,
+ follow_redirects: bool = True,
+) -> httpx.AsyncClient:
+ """Factory for a configured httpx.AsyncClient."""
+ return httpx.AsyncClient(
+ headers=_default_headers(headers),
+ cookies=cookies,
+ timeout=timeout if timeout is not None else _get_timeout(),
+ verify=_get_verify() if verify is None else verify,
+ follow_redirects=follow_redirects,
+ http2=http2,
+ proxies=proxies if proxies is not None else _get_proxies(),
+ )
+
+
+def _sleep_with_backoff(attempt: int, base: float = 1.1, cap: float = 10.0) -> None:
+ """Exponential backoff with jitter."""
+ delay = min(base * (2 ** attempt), cap)
+ # Add small jitter to avoid thundering herd
+ delay += random.uniform(0.0, 0.25)
+ time.sleep(delay)
+
+
+def fetch(
+ url: str,
+ *,
+ method: str = "GET",
+ params: Optional[Dict[str, Any]] = None,
+ data: Optional[Any] = None,
+ json: Optional[Any] = None,
+ headers: Optional[Dict[str, str]] = None,
+ cookies: Optional[Dict[str, str]] = None,
+ timeout: Optional[Union[int, float]] = None,
+ verify: Optional[bool] = None,
+ proxies: Optional[Dict[str, str]] = None,
+ follow_redirects: bool = True,
+ http2: bool = False,
+ max_retry: Optional[int] = None,
+ return_content: bool = False,
+) -> Optional[Union[str, bytes]]:
+ """
+ Perform an HTTP request with retry. Returns text or bytes according to return_content.
+ Returns None if all retries fail.
+ """
+ attempts = max_retry if max_retry is not None else _get_max_retry()
+
+ with create_client(
+ headers=headers,
+ cookies=cookies,
+ timeout=timeout,
+ verify=verify,
+ proxies=proxies,
+ http2=http2,
+ follow_redirects=follow_redirects,
+ ) as client:
+ for attempt in range(attempts):
+ try:
+ resp = client.request(method, url, params=params, data=data, json=json)
+ resp.raise_for_status()
+ return resp.content if return_content else resp.text
+ except Exception:
+ if attempt + 1 >= attempts:
+ break
+ _sleep_with_backoff(attempt)
+ return None
+
+
+async def async_fetch(
+ url: str,
+ *,
+ method: str = "GET",
+ params: Optional[Dict[str, Any]] = None,
+ data: Optional[Any] = None,
+ json: Optional[Any] = None,
+ headers: Optional[Dict[str, str]] = None,
+ cookies: Optional[Dict[str, str]] = None,
+ timeout: Optional[Union[int, float]] = None,
+ verify: Optional[bool] = None,
+ proxies: Optional[Dict[str, str]] = None,
+ follow_redirects: bool = True,
+ http2: bool = False,
+ max_retry: Optional[int] = None,
+ return_content: bool = False,
+) -> Optional[Union[str, bytes]]:
+ """
+ Async HTTP request with retry. Returns text or bytes according to return_content.
+ Returns None if all retries fail.
+ """
+ attempts = max_retry if max_retry is not None else _get_max_retry()
+
+ async with create_async_client(
+ headers=headers,
+ cookies=cookies,
+ timeout=timeout,
+ verify=verify,
+ proxies=proxies,
+ http2=http2,
+ follow_redirects=follow_redirects,
+ ) as client:
+ for attempt in range(attempts):
+ try:
+ resp = await client.request(method, url, params=params, data=data, json=json)
+ resp.raise_for_status()
+ return resp.content if return_content else resp.text
+ except Exception:
+ if attempt + 1 >= attempts:
+ break
+ # Use same backoff logic for async by sleeping in thread (short duration)
+ _sleep_with_backoff(attempt)
+ return None
\ No newline at end of file
diff --git a/StreamingCommunity/Util/message.py b/StreamingCommunity/Util/message.py
index 75a32819..838bbdb9 100644
--- a/StreamingCommunity/Util/message.py
+++ b/StreamingCommunity/Util/message.py
@@ -14,7 +14,7 @@
# Variable
console = Console()
-CLEAN = config_manager.get_bool('DEFAULT', 'clean_console')
+CLEAN = config_manager.get_bool('DEFAULT', 'show_message')
SHOW = config_manager.get_bool('DEFAULT', 'show_message')
diff --git a/StreamingCommunity/Util/os.py b/StreamingCommunity/Util/os.py
index 6eb2d144..da6b7550 100644
--- a/StreamingCommunity/Util/os.py
+++ b/StreamingCommunity/Util/os.py
@@ -5,7 +5,6 @@
import glob
import sys
import shutil
-import hashlib
import logging
import platform
import inspect
@@ -432,10 +431,11 @@ def get_system_summary(self):
if not self.mp4decrypt_path:
console.log("[yellow]Warning: mp4decrypt not found")
-
- console.print(f"[cyan]Path: [red]ffmpeg [bold yellow]'{self.ffmpeg_path}'[/bold yellow][white], [red]ffprobe '[bold yellow]{self.ffprobe_path}'[/bold yellow]")
- if self.mp4decrypt_path:
- console.print(f"[cyan]Path: [red]mp4decrypt [bold yellow]'{self.mp4decrypt_path}'[/bold yellow]")
+
+ ffmpeg_str = f"'{self.ffmpeg_path}'" if self.ffmpeg_path else "None"
+ ffprobe_str = f"'{self.ffprobe_path}'" if self.ffprobe_path else "None"
+ mp4decrypt_str = f"'{self.mp4decrypt_path}'" if self.mp4decrypt_path else "None"
+ console.print(f"[cyan]Path: [red]ffmpeg [bold yellow]{ffmpeg_str}[/bold yellow][white], [red]ffprobe [bold yellow]{ffprobe_str}[/bold yellow][white], [red]mp4decrypt [bold yellow]{mp4decrypt_str}[/bold yellow]")
os_manager = OsManager()
diff --git a/StreamingCommunity/Util/table.py b/StreamingCommunity/Util/table.py
index 7051e7bd..f4522cc3 100644
--- a/StreamingCommunity/Util/table.py
+++ b/StreamingCommunity/Util/table.py
@@ -107,7 +107,7 @@ def run_back_command(research_func: dict) -> None:
search_func = getattr(module, 'search')
search_func(None)
- except Exception as e:
+ except Exception:
logging.error("Error during search execution")
finally:
@@ -142,7 +142,7 @@ def run(self, force_int_input: bool = False, max_int_input: int = 0) -> str:
# Handle pagination and user input
if self.slice_end < total_items:
- self.console.print(f"\n[green]Press [red]Enter [green]for next page, [red]'q' [green]to quit, or [red]'back' [green]to search.")
+ self.console.print("\n[green]Press [red]Enter [green]for next page, [red]'q' [green]to quit, or [red]'back' [green]to search.")
if not force_int_input:
prompt_msg = ("\n[cyan]Insert media index [yellow](e.g., 1), [red]* [cyan]to download all media, "
@@ -184,7 +184,7 @@ def run(self, force_int_input: bool = False, max_int_input: int = 0) -> str:
else:
# Last page handling
- self.console.print(f"\n[green]You've reached the end. [red]Enter [green]for first page, [red]'q' [green]to quit, or [red]'back' [green]to search.")
+ self.console.print("\n[green]You've reached the end. [red]Enter [green]for first page, [red]'q' [green]to quit, or [red]'back' [green]to search.")
if not force_int_input:
prompt_msg = ("\n[cyan]Insert media index [yellow](e.g., 1), [red]* [cyan]to download all media, "
diff --git a/StreamingCommunity/__init__.py b/StreamingCommunity/__init__.py
index 97ce44b8..69fe86ec 100644
--- a/StreamingCommunity/__init__.py
+++ b/StreamingCommunity/__init__.py
@@ -4,4 +4,12 @@
from .Lib.Downloader.HLS.downloader import HLS_Downloader
from .Lib.Downloader.MP4.downloader import MP4_downloader
from .Lib.Downloader.TOR.downloader import TOR_downloader
-from .Lib.Downloader.DASH.downloader import DASH_Downloader
\ No newline at end of file
+from .Lib.Downloader.DASH.downloader import DASH_Downloader
+
+__all__ = [
+ "main",
+ "HLS_Downloader",
+ "MP4_downloader",
+ "TOR_downloader",
+ "DASH_Downloader"
+]
\ No newline at end of file
diff --git a/StreamingCommunity/run.py b/StreamingCommunity/run.py
index aa83dfd6..1a87600f 100644
--- a/StreamingCommunity/run.py
+++ b/StreamingCommunity/run.py
@@ -8,9 +8,11 @@
import platform
import argparse
import importlib
-import threading, asyncio
+import threading
+import asyncio
+import subprocess
from urllib.parse import urlparse
-from typing import Callable
+from typing import Callable, Dict, Tuple
# External library
@@ -22,18 +24,25 @@
from .global_search import global_search
from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Util.config_json import config_manager
-from StreamingCommunity.Util.os import os_summary, internet_manager
+from StreamingCommunity.Util.os import os_summary, internet_manager, os_manager
from StreamingCommunity.Util.logger import Logger
-from StreamingCommunity.Upload.update import update as git_update
from StreamingCommunity.Lib.TMBD import tmdb
+from StreamingCommunity.Upload.update import update as git_update
from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance, TelegramSession
+from StreamingCommunity.Api import http_api
# Config
SHOW_TRENDING = config_manager.get_bool('DEFAULT', 'show_trending')
-NOT_CLOSE_CONSOLE = config_manager.get_bool('DEFAULT', 'not_close')
TELEGRAM_BOT = config_manager.get_bool('DEFAULT', 'telegram_bot')
BYPASS_DNS = config_manager.get_bool('DEFAULT', 'bypass_dns')
+COLOR_MAP = {
+ "anime": "red",
+ "film_&_serie": "yellow",
+ "serie": "blue",
+ "torrent": "white"
+}
+CATEGORY_MAP = {1: "anime", 2: "film_&_serie", 3: "serie", 4: "torrent"}
# Variable
@@ -42,14 +51,7 @@
def run_function(func: Callable[..., None], close_console: bool = False, search_terms: str = None) -> None:
- """
- Run a given function indefinitely or once, depending on the value of close_console.
-
- Parameters:
- func (Callable[..., None]): The function to run.
- close_console (bool, optional): Whether to close the console after running the function once. Defaults to False.
- search_terms (str, optional): Search terms to use for the function. Defaults to None.
- """
+ """Run function once or indefinitely based on close_console flag."""
if close_console:
while 1:
func(search_terms)
@@ -57,124 +59,246 @@ def run_function(func: Callable[..., None], close_console: bool = False, search_
func(search_terms)
-# !!! DA METTERE IN COMUNE CON QUELLA DI GLOBAL
-def load_search_functions():
- modules = []
+def load_search_functions() -> Dict[str, Tuple]:
+ """Load and return all available search functions from site modules."""
loaded_functions = {}
-
- # Lista dei siti da escludere se TELEGRAM_BOT è attivo
excluded_sites = {"cb01new", "guardaserie", "ilcorsaronero", "mostraguarda"} if TELEGRAM_BOT else set()
-
- # Find api home directory
- if getattr(sys, 'frozen', False): # Modalità PyInstaller
- base_path = os.path.join(sys._MEIPASS, "StreamingCommunity")
- else:
- base_path = os.path.dirname(__file__)
-
+
+ # Determine base path
+ base_path = os.path.join(sys._MEIPASS, "StreamingCommunity") if getattr(sys, 'frozen', False) else os.path.dirname(__file__)
api_dir = os.path.join(base_path, 'Api', 'Site')
- init_files = glob.glob(os.path.join(api_dir, '*', '__init__.py'))
-
- # Retrieve modules and their indices
- for init_file in init_files:
-
- # Get folder name as module name
+
+ # Get all modules with their indices and sort them
+ modules = []
+ for init_file in glob.glob(os.path.join(api_dir, '*', '__init__.py')):
module_name = os.path.basename(os.path.dirname(init_file))
-
- # Se il modulo è nella lista da escludere, saltalo
+
if module_name in excluded_sites:
continue
-
- logging.info(f"Load module name: {module_name}")
-
+
try:
- # Dynamically import the module
mod = importlib.import_module(f'StreamingCommunity.Api.Site.{module_name}')
-
- # Get 'indice' from the module
- indice = getattr(mod, 'indice')
- use_for = getattr(mod, '_useFor')
-
- if not getattr(mod, '_deprecate'):
- modules.append((module_name, indice, use_for))
-
+ if not getattr(mod, '_deprecate', False):
+ modules.append((module_name, getattr(mod, 'indice'), getattr(mod, '_useFor')))
+ logging.info(f"Load module name: {module_name}")
except Exception as e:
console.print(f"[red]Failed to import module {module_name}: {str(e)}")
-
- # Sort modules by 'indice'
- modules.sort(key=lambda x: x[1])
-
- # Load search functions in the sorted order
- for module_name, _, use_for in modules:
-
- # Construct a unique alias for the module
- module_alias = f'{module_name}_search'
-
+
+ # Sort by index and load search functions
+ for module_name, _, use_for in sorted(modules, key=lambda x: x[1]):
try:
-
- # Dynamically import the module
mod = importlib.import_module(f'StreamingCommunity.Api.Site.{module_name}')
-
- # Get the search function from the module (assuming the function is named 'search' and defined in __init__.py)
- search_function = getattr(mod, 'search')
-
- # Add the function to the loaded functions dictionary
- loaded_functions[module_alias] = (search_function, use_for)
-
+ loaded_functions[f'{module_name}_search'] = (getattr(mod, 'search'), use_for)
except Exception as e:
console.print(f"[red]Failed to load search function from module {module_name}: {str(e)}")
-
+
return loaded_functions
def initialize():
-
- # Get start message
+ """Initialize the application with system checks and setup."""
start_message()
-
- # Get system info
os_summary.get_system_summary()
-
- # Set terminal size for win 7
+
+ # Windows 7 terminal size fix
if platform.system() == "Windows" and "7" in platform.version():
os.system('mode 120, 40')
-
- # Check python version
+
+ # Python version check
if sys.version_info < (3, 7):
console.log("[red]Install python version > 3.7.16")
sys.exit(0)
-
- # Trending tmbd
+
+ # Show trending content
if SHOW_TRENDING:
print()
tmdb.display_trending_films()
tmdb.display_trending_tv_shows()
-
- # Attempting GitHub update
+
+ # Attempt GitHub update
try:
git_update()
- except:
- console.log("[red]Error with loading github.")
+ except Exception as e:
+ console.log(f"[red]Error with loading github: {str(e)}")
+
+
+def _expand_user_path(path: str) -> str:
+ """Expand '~' and environment variables and normalize the path."""
+ if not path:
+ return path
+ return os.path.normpath(os.path.expandvars(os.path.expanduser(path)))
+
+
+def _should_run_on_current_os(hook: dict) -> bool:
+ """Check if a hook is allowed on current OS."""
+ allowed_systems = hook.get('os')
+ if not allowed_systems:
+ return True
+ try:
+ normalized = [str(s).strip().lower() for s in allowed_systems]
+ except Exception:
+ return True
+ return os_manager.system in normalized
+
+
+def _build_command_for_hook(hook: dict) -> Tuple[list, dict]:
+ """Build the subprocess command and environment for a hook definition."""
+ hook_type = str(hook.get('type', '')).strip().lower()
+ script_path = hook.get('path')
+ inline_command = hook.get('command')
+ args = hook.get('args', [])
+ env = hook.get('env') or {}
+ workdir = hook.get('cwd')
+
+ if isinstance(args, str):
+ args = [a for a in args.split(' ') if a]
+ elif not isinstance(args, list):
+ args = []
+
+ if script_path:
+ script_path = _expand_user_path(script_path)
+ if not os.path.isabs(script_path):
+ script_path = os.path.abspath(script_path)
+
+ if workdir:
+ workdir = _expand_user_path(workdir)
+
+ base_env = os.environ.copy()
+ for k, v in env.items():
+ base_env[str(k)] = str(v)
+
+ if hook_type == 'python':
+ if not script_path:
+ raise ValueError("Missing 'path' for python hook")
+ command = [sys.executable, script_path] + args
+ return ([c for c in command if c], {'env': base_env, 'cwd': workdir})
+
+ if os_manager.system in ('linux', 'darwin'):
+ if hook_type in ('bash', 'sh', 'shell'):
+ if inline_command:
+ command = ['/bin/bash', '-lc', inline_command]
+ else:
+ if not script_path:
+ raise ValueError("Missing 'path' for bash/sh hook")
+ command = ['/bin/bash', script_path] + args
+ return (command, {'env': base_env, 'cwd': workdir})
+
+ if os_manager.system == 'windows':
+ if hook_type in ('bat', 'cmd', 'shell'):
+ if inline_command:
+ command = ['cmd', '/c', inline_command]
+ else:
+ if not script_path:
+ raise ValueError("Missing 'path' for bat/cmd hook")
+ command = ['cmd', '/c', script_path] + args
+ return (command, {'env': base_env, 'cwd': workdir})
+
+ raise ValueError(f"Unsupported hook type '{hook_type}' on OS '{os_manager.system}'")
+
+
+def _iter_hooks(stage: str):
+ """Yield hook dicts for a given stage ('pre_run' | 'post_run')."""
+ try:
+ hooks_section = config_manager.config.get('HOOKS', {})
+ hooks_list = hooks_section.get(stage, []) or []
+ if not isinstance(hooks_list, list):
+ return
+ for hook in hooks_list:
+ if not isinstance(hook, dict):
+ continue
+ yield hook
+ except Exception:
+ return
+
+
+def execute_hooks(stage: str) -> None:
+ """Execute configured hooks for the given stage. Stage can be 'pre_run' or 'post_run'."""
+ stage = str(stage).strip().lower()
+ if stage not in ('pre_run', 'post_run'):
+ return
+
+ for hook in _iter_hooks(stage):
+ name = hook.get('name') or f"{stage}_hook"
+ enabled = hook.get('enabled', True)
+ continue_on_error = hook.get('continue_on_error', True)
+ timeout = hook.get('timeout')
+
+ if not enabled:
+ logging.info(f"Skip hook (disabled): {name}")
+ continue
+
+ if not _should_run_on_current_os(hook):
+ logging.info(f"Skip hook (OS filter): {name}")
+ continue
+
+ try:
+ command, popen_kwargs = _build_command_for_hook(hook)
+ logging.info(f"Running hook: {name} -> {' '.join(command)}")
+ result = None
+ if timeout is not None:
+ result = subprocess.run(command, check=False, capture_output=True, text=True, timeout=int(timeout), **popen_kwargs)
+ else:
+ result = subprocess.run(command, check=False, capture_output=True, text=True, **popen_kwargs)
+
+ stdout = (result.stdout or '').strip()
+ stderr = (result.stderr or '').strip()
+ if stdout:
+ logging.info(f"Hook '{name}' stdout: {stdout}")
+ try:
+ console.print(f"[cyan][hook:{name} stdout][/cyan]\n{stdout}")
+ except Exception:
+ pass
+ if stderr:
+ logging.warning(f"Hook '{name}' stderr: {stderr}")
+ try:
+ console.print(f"[yellow][hook:{name} stderr][/yellow]\n{stderr}")
+ except Exception:
+ pass
+
+ if result.returncode != 0:
+ message = f"Hook '{name}' exited with code {result.returncode}"
+ if continue_on_error:
+ logging.error(message + " (continuing)")
+ continue
+ else:
+ logging.error(message + " (stopping)")
+ raise SystemExit(result.returncode)
+
+ except subprocess.TimeoutExpired:
+ message = f"Hook '{name}' timed out"
+ if continue_on_error:
+ logging.error(message + " (continuing)")
+ continue
+ else:
+ logging.error(message + " (stopping)")
+ raise SystemExit(124)
+ except Exception as e:
+ message = f"Hook '{name}' failed: {str(e)}"
+ if continue_on_error:
+ logging.error(message + " (continuing)")
+ continue
+ else:
+ logging.error(message + " (stopping)")
+ raise
def restart_script():
- """Riavvia lo script con gli stessi argomenti della riga di comando."""
+ """Restart script with same command line arguments."""
print("\nRiavvio dello script...\n")
- python = sys.executable
- os.execv(python, [python] + sys.argv)
+ os.execv(sys.executable, [sys.executable] + sys.argv)
def force_exit():
- """Forza la chiusura dello script in qualsiasi contesto."""
-
+ """Force script termination in any context."""
print("\nChiusura dello script in corso...")
-
- # 1 Chiudi tutti i thread tranne il principale
+
+ # Close all threads except main
for t in threading.enumerate():
if t is not threading.main_thread():
print(f"Chiusura thread: {t.name}")
t.join(timeout=1)
-
- # 2 Ferma asyncio, se attivo
+
+ # Stop asyncio if active
try:
loop = asyncio.get_event_loop()
if loop.is_running():
@@ -182,14 +306,14 @@ def force_exit():
loop.stop()
except RuntimeError:
pass
-
- # 3 Esce con sys.exit(), se fallisce usa os._exit()
+
+ # Exit gracefully or force
try:
print("Uscita con sys.exit(0)")
sys.exit(0)
except SystemExit:
pass
-
+
print("Uscita forzata con os._exit(0)")
os._exit(0)
@@ -199,8 +323,51 @@ def _extract_hostname(url_string: str) -> str:
return urlparse(url_string).hostname
-def main(script_id = 0):
+def check_dns_and_exit_if_needed():
+ """Check DNS configuration and exit if required."""
+ if BYPASS_DNS:
+ return
+
+ # raccogli gli hostname dalle site config
+ hostname_list = [
+ hostname
+ for site_info in config_manager.configSite.values()
+ if (hostname := _extract_hostname(site_info.get('full_url')))
+ ]
+
+ if not internet_manager.check_dns_resolve(hostname_list):
+ console.print("[red] ERROR: DNS configuration is required!")
+ console.print("[red]The program cannot function correctly without proper DNS settings.")
+ console.print("[yellow]Please configure one of these DNS servers:")
+ console.print("[red]• Cloudflare (1.1.1.1) 'https://developers.cloudflare.com/1.1.1.1/setup/windows/'")
+ console.print("[red]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'")
+ console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.")
+ os._exit(0)
+
+
+def setup_argument_parser(search_functions):
+ """Setup and return configured argument parser."""
+ module_info = {}
+ for alias, (_func, _use_for) in search_functions.items():
+ module_name = alias.split("_")[0].lower()
+ try:
+ mod = importlib.import_module(f'StreamingCommunity.Api.Site.{module_name}')
+ module_info[module_name] = int(getattr(mod, 'indice'))
+ except Exception:
+ continue
+
+ available_names = ", ".join(sorted(module_info.keys()))
+ available_indices = ", ".join([f"{idx}={name.capitalize()}" for name, idx in sorted(module_info.items(), key=lambda x: x[1])])
+ parser = argparse.ArgumentParser(
+ description=f"StreamingCommunity (available sites: {available_names})"
+ )
+ parser.add_argument('--site', help=f"Site name ({available_names})")
+ parser.add_argument('--index', type=int, help=f"Site index ({available_indices})")
+ return parser
+
+
+def main(script_id = 0):
color_map = {
"anime": "red",
"film_&_serie": "yellow",
@@ -225,103 +392,92 @@ def main(script_id = 0):
log_not = Logger()
initialize()
- # Get all site hostname
- hostname_list = [hostname for site_info in config_manager.configSite.values() if (hostname := _extract_hostname(site_info.get('full_url')))]
-
- if not BYPASS_DNS:
- if not internet_manager.check_dns_resolve(hostname_list):
- console.print("[red] ERROR: DNS configuration is required!")
- console.print("[red]The program cannot function correctly without proper DNS settings.")
- console.print("[yellow]Please configure one of these DNS servers:")
- console.print("[red]• Cloudflare (1.1.1.1) 'https://developers.cloudflare.com/1.1.1.1/setup/windows/'")
- console.print("[red]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'")
- console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.")
+ # Optionally expose HTTP API
+ try:
+ if config_manager.get_bool('DEFAULT', 'expose_http_api'):
+ t = threading.Thread(target=http_api.start_api_server, daemon=True)
+ t.start()
+ console.print(f"[green]HTTP API exposed on port {config_manager.get('DEFAULT','http_api_port')}[/green]")
+ console.print(
+ "[yellow]Non-interactive mode: HTTP API is enabled. Console input is disabled; use the HTTP API only. Press Ctrl+C to exit.[/yellow]"
+ )
+ try:
+ while True:
+ time.sleep(1)
+ except KeyboardInterrupt:
+ force_exit()
+ except Exception as e:
+ console.print(f"[yellow]Unable to start HTTP API server: {e}")
- os._exit(0)
+ # DNS check (moved to helper)
+ check_dns_and_exit_if_needed()
# Load search functions
search_functions = load_search_functions()
logging.info(f"Load module in: {time.time() - start} s")
- # Create argument parser
+ # Create / configure argument parser
+ parser = setup_argument_parser(search_functions)
+ args = parser.parse_args()
+
parser = argparse.ArgumentParser(
- description='Script to download movies and series from the internet. Use these commands to configure the script and control its behavior.'
+ description='Script to download movies and series from the internet.',
+ formatter_class=argparse.RawTextHelpFormatter,
+ epilog=f"Available sites by name: {available_names}\nAvailable sites by index: {available_indices}"
)
-
+
+ # Add arguments
parser.add_argument("script_id", nargs="?", default="unknown", help="ID dello script")
-
- # Add arguments for the main configuration parameters
- parser.add_argument(
- '--add_siteName', type=bool, help='Enable or disable adding the site name to the file name (e.g., true/false).'
- )
- parser.add_argument(
- '--not_close', type=bool, help='If set to true, the script will not close the console after execution (e.g., true/false).'
- )
-
- # Add arguments for M3U8 configuration
- parser.add_argument(
- '--default_video_worker', type=int, help='Number of workers for video during M3U8 download (default: 12).'
- )
- parser.add_argument(
- '--default_audio_worker', type=int, help='Number of workers for audio during M3U8 download (default: 12).'
- )
-
- # Add options for audio and subtitles
- parser.add_argument(
- '--specific_list_audio', type=str, help='Comma-separated list of specific audio languages to download (e.g., ita,eng).'
- )
- parser.add_argument(
- '--specific_list_subtitles', type=str, help='Comma-separated list of specific subtitle languages to download (e.g., eng,spa).'
- )
-
- # Add global search option
- parser.add_argument(
- '--global', action='store_true', help='Perform a global search across multiple sites.'
- )
-
- # Add category selection argument
- parser.add_argument(
- '--category', type=int, help='Select category directly (1: anime, 2: film_&_serie, 3: serie, 4: torrent).'
- )
-
- # Add arguments for search functions
+ parser.add_argument('--add_siteName', type=bool, help='Enable/disable adding site name to file name')
+ parser.add_argument('--not_close', type=bool, help='Keep console open after execution')
+ parser.add_argument('--default_video_worker', type=int, help='Video workers for M3U8 download (default: 12)')
+ parser.add_argument('--default_audio_worker', type=int, help='Audio workers for M3U8 download (default: 12)')
+ parser.add_argument('--specific_list_audio', type=str, help='Audio languages (e.g., ita,eng)')
+ parser.add_argument('--specific_list_subtitles', type=str, help='Subtitle languages (e.g., eng,spa)')
+ parser.add_argument('--global', action='store_true', help='Global search across sites')
+ parser.add_argument('--category', type=int, help='Category (1: anime, 2: film_&_serie, 3: serie, 4: torrent)')
parser.add_argument('-s', '--search', default=None, help='Search terms')
+ parser.add_argument('--auto-first', action='store_true', help='Auto-download first result (use with --site and --search)')
+ parser.add_argument('--site', type=str, help='Site by name or index')
- # Parse command-line arguments
- args = parser.parse_args()
+ return parser
- search_terms = args.search
- # Map command-line arguments to the config values
- config_updates = {}
- if args.add_siteName is not None:
- config_updates['DEFAULT.add_siteName'] = args.add_siteName
- if args.not_close is not None:
- config_updates['DEFAULT.not_close'] = args.not_close
- if args.default_video_worker is not None:
- config_updates['M3U8_DOWNLOAD.default_video_worker'] = args.default_video_worker
- if args.default_audio_worker is not None:
- config_updates['M3U8_DOWNLOAD.default_audio_worker'] = args.default_audio_worker
+def apply_config_updates(args):
+ """Apply command line arguments to configuration."""
+ config_updates = {}
+
+ arg_mappings = {
+ 'add_siteName': 'DEFAULT.add_siteName',
+ 'not_close': 'DEFAULT.not_close',
+ 'default_video_worker': 'M3U8_DOWNLOAD.default_video_worker',
+ 'default_audio_worker': 'M3U8_DOWNLOAD.default_audio_worker'
+ }
+
+ for arg_name, config_key in arg_mappings.items():
+ if getattr(args, arg_name) is not None:
+ config_updates[config_key] = getattr(args, arg_name)
+
+ # Handle list arguments
if args.specific_list_audio is not None:
config_updates['M3U8_DOWNLOAD.specific_list_audio'] = args.specific_list_audio.split(',')
if args.specific_list_subtitles is not None:
config_updates['M3U8_DOWNLOAD.specific_list_subtitles'] = args.specific_list_subtitles.split(',')
-
- # Apply the updates to the config file
+
+ # Apply updates
for key, value in config_updates.items():
section, option = key.split('.')
config_manager.set_key(section, option, value)
+
+ if config_updates:
+ config_manager.save_config()
- config_manager.save_config()
-
- # Check if global search is requested
- if getattr(args, 'global'):
- global_search(search_terms)
- return
- # Create mappings using module indice
+def build_function_mappings(search_functions):
+ """Build mappings between indices/names and functions."""
input_to_function = {}
choice_labels = {}
+ module_name_to_function = {}
for alias, (func, use_for) in search_functions.items():
module_name = alias.split("_")[0]
@@ -330,84 +486,131 @@ def main(script_id = 0):
site_index = str(getattr(mod, 'indice'))
input_to_function[site_index] = func
choice_labels[site_index] = (module_name.capitalize(), use_for.lower())
+ module_name_to_function[module_name.lower()] = func
except Exception as e:
console.print(f"[red]Error mapping module {module_name}: {str(e)}")
+
+ return input_to_function, choice_labels, module_name_to_function
- if args.category:
- selected_category = category_map.get(args.category)
- category_sites = []
- for key, label in choice_labels.items():
- if label[1] == selected_category:
- category_sites.append((key, label[0]))
+def handle_direct_site_selection(args, input_to_function, module_name_to_function, search_terms):
+ """Handle direct site selection via command line."""
+ if not args.site:
+ return False
+
+ site_key = str(args.site).strip().lower()
+ func_to_run = input_to_function.get(site_key) or module_name_to_function.get(site_key)
+
+ if func_to_run is None:
+ available_sites = ", ".join(sorted(module_name_to_function.keys()))
+ console.print(f"[red]Unknown site:[/red] '{args.site}'. Available: [yellow]{available_sites}[/yellow]")
+ return False
+
+ # Handle auto-first option
+ if args.auto_first and search_terms:
+ try:
+ database = func_to_run(search_terms, get_onlyDatabase=True)
+ if database and hasattr(database, 'media_list') and database.media_list:
+ first_item = database.media_list[0]
+ item_dict = first_item.__dict__.copy() if hasattr(first_item, '__dict__') else {}
+ func_to_run(direct_item=item_dict)
+ return True
+ else:
+ console.print("[yellow]No results found. Falling back to interactive mode.[/yellow]")
+ except Exception as e:
+ console.print(f"[red]Auto-first failed:[/red] {str(e)}")
+
+ run_function(func_to_run, search_terms=search_terms)
+ return True
+
+
+def get_user_site_selection(args, choice_labels):
+ """Get site selection from user (interactive or category-based)."""
+ bot = get_bot_instance() if TELEGRAM_BOT else None
+
+ if args.category:
+ selected_category = CATEGORY_MAP.get(args.category)
+ category_sites = [(key, label[0]) for key, label in choice_labels.items() if label[1] == selected_category]
+
if len(category_sites) == 1:
- category = category_sites[0][0]
console.print(f"[green]Selezionato automaticamente: {category_sites[0][1]}[/green]")
-
+ return category_sites[0][0]
+
+ # Multiple sites in category
+ color = COLOR_MAP.get(selected_category, 'white')
+ prompt_items = [f"[{color}]({k}) {v}[/{color}]" for k, v in category_sites]
+ prompt_line = ", ".join(prompt_items)
+
+ if TELEGRAM_BOT:
+ console.print(f"\nInsert site: {prompt_line}")
+ return bot.ask("select_site", f"Insert site: {prompt_line}", None)
else:
- sito_prompt_items = [f"[{color_map.get(selected_category, 'white')}]({k}) {v}[/{color_map.get(selected_category, 'white')}]"
- for k, v in category_sites]
- sito_prompt_line = ", ".join(sito_prompt_items)
-
- if TELEGRAM_BOT:
- console.print(f"\nInsert site: {sito_prompt_line}")
- category = bot.ask(
- "select_site",
- f"Insert site: {sito_prompt_line}",
- None
- )
- else:
- category = msg.ask(f"\n[cyan]Insert site: {sito_prompt_line}", choices=[k for k, _ in category_sites], show_choices=False)
-
+ return msg.ask(f"\n[cyan]Insert site: {prompt_line}", choices=[k for k, _ in category_sites], show_choices=False)
+
else:
- legend_text = " | ".join([f"[{color}]{category.capitalize()}[/{color}]" for category, color in color_map.items()])
+ # Show all sites
+ legend_text = " | ".join([f"[{color}]{cat.capitalize()}[/{color}]" for cat, color in COLOR_MAP.items()])
console.print(f"\n[bold cyan]Category Legend:[/bold cyan] {legend_text}")
-
- prompt_message = "[cyan]Insert site: " + ", ".join(
- [f"[{color_map.get(label[1], 'white')}]({key}) {label[0]}[/{color_map.get(label[1], 'white')}]"
- for key, label in choice_labels.items()]
- )
-
+
if TELEGRAM_BOT:
- category_legend_str = "Categorie: \n" + " | ".join([
- f"{category.capitalize()}" for category in color_map.keys()
+ category_legend = "Categorie: \n" + " | ".join([cat.capitalize() for cat in COLOR_MAP.keys()])
+ prompt_message = "Inserisci il sito:\n" + "\n".join([f"{key}: {label[0]}" for key, label in choice_labels.items()])
+ console.print(f"\n{prompt_message}")
+ return bot.ask("select_provider", f"{category_legend}\n\n{prompt_message}", None)
+ else:
+ prompt_message = "[cyan]Insert site: " + ", ".join([
+ f"[{COLOR_MAP.get(label[1], 'white')}]({key}) {label[0]}[/{COLOR_MAP.get(label[1], 'white')}]"
+ for key, label in choice_labels.items()
])
+ return msg.ask(prompt_message, choices=list(choice_labels.keys()), default="0", show_choices=False, show_default=False)
- prompt_message_telegram = "Inserisci il sito:\n" + "\n".join(
- [f"{key}: {label[0]}" for key, label in choice_labels.items()]
- )
- console.print(f"\n{prompt_message_telegram}")
+def main(script_id=0):
+ if TELEGRAM_BOT:
+ get_bot_instance().send_message(f"Avviato script {script_id}", None)
- category = bot.ask(
- "select_provider",
- f"{category_legend_str}\n\n{prompt_message_telegram}",
- None
- )
+ start = time.time()
+ Logger()
+ execute_hooks('pre_run')
+ initialize()
- else:
- category = msg.ask(prompt_message, choices=list(choice_labels.keys()), default="0", show_choices=False, show_default=False)
+ try:
+ check_dns_and_exit_if_needed()
- # Run the corresponding function based on user input
- if category in input_to_function:
- run_function(input_to_function[category], search_terms=search_terms)
-
- else:
- if TELEGRAM_BOT:
- bot.send_message(f"Categoria non valida", None)
+ search_functions = load_search_functions()
+ logging.info(f"Load module in: {time.time() - start} s")
- console.print("[red]Invalid category.")
+ parser = setup_argument_parser(search_functions)
+ args = parser.parse_args()
- if NOT_CLOSE_CONSOLE:
- restart_script()
+ apply_config_updates(args)
- else:
- force_exit()
+ if getattr(args, 'global'):
+ global_search(args.search)
+ return
- if TELEGRAM_BOT:
- bot.send_message(f"Chiusura in corso", None)
+ input_to_function, choice_labels, module_name_to_function = build_function_mappings(search_functions)
+
+ if handle_direct_site_selection(args, input_to_function, module_name_to_function, args.search):
+ return
- # Delete script_id
- script_id = TelegramSession.get_session()
- if script_id != "unknown":
- TelegramSession.deleteScriptId(script_id)
\ No newline at end of file
+ category = get_user_site_selection(args, choice_labels)
+
+ if category in input_to_function:
+ run_function(input_to_function[category], search_terms=args.search)
+ else:
+ if TELEGRAM_BOT:
+ get_bot_instance().send_message("Categoria non valida", None)
+ console.print("[red]Invalid category.")
+
+ if getattr(args, 'not_close'):
+ restart_script()
+ else:
+ force_exit()
+ if TELEGRAM_BOT:
+ get_bot_instance().send_message("Chiusura in corso", None)
+ script_id = TelegramSession.get_session()
+ if script_id != "unknown":
+ TelegramSession.deleteScriptId(script_id)
+ finally:
+ execute_hooks('post_run')
\ No newline at end of file
diff --git a/Test/Api/ApiExample.py b/Test/Api/ApiExample.py
new file mode 100644
index 00000000..c016be2b
--- /dev/null
+++ b/Test/Api/ApiExample.py
@@ -0,0 +1,139 @@
+"""Example script that calls the local HTTP API (if enabled).
+
+Run this after enabling `expose_http_api` in `config.old.json`.
+"""
+import json
+from typing import Any, Dict
+import httpx
+
+BASE = 'http://127.0.0.1:8080'
+# Configure a client with a generous timeout to accommodate provider work
+CLIENT_TIMEOUT = 60.0
+client = httpx.Client(timeout=CLIENT_TIMEOUT)
+
+
+def list_providers() -> Dict[str, Any]:
+ try:
+ r = client.get(f"{BASE}/providers")
+ r.raise_for_status()
+ data = r.json()
+ except httpx.ReadTimeout:
+ print(f'Error: request to /providers timed out after {CLIENT_TIMEOUT}s')
+ return {}
+ except httpx.RequestError as e:
+ print(f'Error contacting API: {e}')
+ return {}
+ print('Providers:')
+ print(json.dumps(data, indent=2, ensure_ascii=False))
+ return data
+
+
+def search_all(query: str) -> Dict[str, Any]:
+ payload = {'provider': 'all', 'query': query}
+ try:
+ r = client.post(f"{BASE}/search", json=payload)
+ print(f"Search all for '{query}': status={r.status_code}")
+ try:
+ data = r.json()
+ print(json.dumps(data, indent=2, ensure_ascii=False))
+ return data
+ except Exception:
+ print(r.text)
+ return {}
+ except httpx.ReadTimeout:
+ print(f"Error: search request timed out after {CLIENT_TIMEOUT}s")
+ return {}
+ except httpx.RequestError as e:
+ print(f"Error contacting API: {e}")
+ return {}
+
+
+def search_provider(provider: str, query: str) -> Dict[str, Any]:
+ try:
+ payload = {'provider': provider, 'query': query}
+ r = client.post(f"{BASE}/search", json=payload)
+ print(f"Search provider {provider} for '{query}': status={r.status_code}")
+ data = r.json()
+ print(json.dumps(data, indent=2, ensure_ascii=False))
+ return data
+ except httpx.ReadTimeout:
+ print(f"Error: search provider request timed out after {CLIENT_TIMEOUT}s")
+ return {}
+ except httpx.RequestError as e:
+ print(f"Error contacting API: {e}")
+ return {}
+
+
+def module_call(module: str, function: str, kwargs: Dict[str, Any], background: bool = False) -> Dict[str, Any]:
+ payload = {'module': module, 'function': function, 'kwargs': kwargs, 'background': background}
+ try:
+ r = client.post(f"{BASE}/module_call", json=payload)
+ print(f"Module call {module}.{function} (background={background}): status={r.status_code}")
+ try:
+ data = r.json()
+ print(json.dumps(data, indent=2, ensure_ascii=False))
+ return data
+ except Exception:
+ print(r.text)
+ return {}
+ except httpx.ReadTimeout:
+ print(f"Error: module_call timed out after {CLIENT_TIMEOUT}s")
+ return {}
+ except httpx.RequestError as e:
+ print(f"Error contacting API: {e}")
+ return {}
+
+
+def run_examples():
+ try:
+ providers = list_providers()
+ except Exception as e:
+ print('Failed to list providers:', e)
+ return
+
+ # Example: search across all providers
+ search_all('Matrix')
+
+ # Example: search a specific provider (use one provider from the list if present)
+ prov_list = providers.get('providers', [])
+ if prov_list:
+ first = prov_list[0]
+ search_provider(first.get('name'), 'Matrix')
+
+ # Example: call a module function synchronously (get_onlyDatabase=True)
+ # Note: most provider search functions expect parameter name 'string_to_search'
+ module_call('streamingcommunity', 'search', {'string_to_search': 'Matrix', 'get_onlyDatabase': True})
+
+ # Example: start a module call in background
+ module_call('streamingcommunity', 'search', {'string_to_search': 'Matrix', 'get_onlyDatabase': True}, background=True)
+
+ # Example: create a download job (if you have a valid item from a provider)
+ # We'll try to take the first movie from streamingcommunity results if present
+ sc_results = search_provider('streamingcommunity', 'Matrix')
+ try:
+ items = sc_results.get('results', {}).get('streamingcommunity', [])
+ if items:
+ first_item = items[0]
+ # create job to download film
+ payload = {'module': 'streamingcommunity', 'action': 'download_film', 'item': first_item}
+ r = client.post(f"{BASE}/jobs", json=payload)
+ print('Create job response:', r.status_code, r.text)
+ job = r.json()
+ job_id = job.get('job_id')
+ if job_id:
+ # poll job until finished
+ import time
+ while True:
+ rr = client.get(f"{BASE}/jobs/{job_id}")
+ data = rr.json()
+ print('Job status:', data.get('status'))
+ if data.get('status') in ('finished', 'failed'):
+ print('Final job data:', json.dumps(data, indent=2, ensure_ascii=False))
+ break
+ time.sleep(1)
+ except Exception as e:
+ print('Job example failed:', e)
+
+
+if __name__ == '__main__':
+ run_examples()
diff --git a/Test/Download/HLS.py b/Test/Download/HLS.py
index f57bb3d6..dccd7b22 100644
--- a/Test/Download/HLS.py
+++ b/Test/Download/HLS.py
@@ -1,16 +1,15 @@
# 23.06.24
+import os
+import sys
import unittest
+
# Fix import
-import sys
-import os
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(src_path)
-
-# Import
from StreamingCommunity.Util.os import os_summary
os_summary.get_system_summary()
from StreamingCommunity.Util.logger import Logger
diff --git a/Test/Download/MP4.py b/Test/Download/MP4.py
index 58a1105d..94185a32 100644
--- a/Test/Download/MP4.py
+++ b/Test/Download/MP4.py
@@ -1,16 +1,15 @@
# 23.06.24
+import os
+import sys
import unittest
+
# Fix import
-import sys
-import os
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(src_path)
-
-# Import
from StreamingCommunity.Util.os import os_summary
os_summary.get_system_summary()
from StreamingCommunity.Util.logger import Logger
diff --git a/Test/Download/TOR.py b/Test/Download/TOR.py
index c2fb5bc5..58cb50f5 100644
--- a/Test/Download/TOR.py
+++ b/Test/Download/TOR.py
@@ -1,14 +1,14 @@
# 23.06.24
-# Fix import
-import sys
import os
+import sys
+
+
+# Fix import
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(src_path)
-
-# Import
from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Util.logger import Logger
from StreamingCommunity import TOR_downloader
@@ -16,7 +16,7 @@
# Test
start_message()
-logger = Logger()
+Logger()
manager = TOR_downloader()
magnet_link = """magnet:?xt=urn:btih:0E0CDB5387B4C71C740BD21E8144F3735C3F899E&dn=Krapopolis.S02E14.720p.x265-TiPEX&tr=udp%3A%2F%2Ftracker.torrent.eu.org%3A451%2Fannounce&tr=udp%3A%2F%2Fopen.stealth.si%3A80%2Fannounce&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=udp%3A%2F%2Fexplodie.org%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.tiny-vps.com%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.dler.com%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.darkness.services%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=http%3A%2F%2Ftracker.openbittorrent.com%3A80%2Fannounce&tr=udp%3A%2F%2Fopentracker.i2p.rocks%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.internetwarriors.net%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fcoppersurfer.tk%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.zer0day.to%3A1337%2Fannounce"""
diff --git a/Test/EasyDownload/DASH.py b/Test/EasyDownload/DASH.py
index 84242ede..fb0fb1d8 100644
--- a/Test/EasyDownload/DASH.py
+++ b/Test/EasyDownload/DASH.py
@@ -1,14 +1,14 @@
# 29.07.25
-# Fix import
-import sys
import os
+import sys
+
+
+# Fix import
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(src_path)
-
-# Import
from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Util.os import os_summary, get_wvd_path
os_summary.get_system_summary()
@@ -22,17 +22,16 @@
license_url = ""
mpd_url = ""
-
-r_proc = DASH_Downloader(
+dash_process = DASH_Downloader(
cdm_device=get_wvd_path(),
license_url=license_url,
mpd_url=mpd_url,
output_path="out.mp4",
)
-r_proc.parse_manifest()
+dash_process.parse_manifest()
-if r_proc.download_and_decrypt():
- r_proc.finalize_output()
+if dash_process.download_and_decrypt():
+ dash_process.finalize_output()
-status = r_proc.get_status()
+status = dash_process.get_status()
print(status)
\ No newline at end of file
diff --git a/Test/EasyDownload/HLS.py b/Test/EasyDownload/HLS.py
index c239714f..2c14b6f8 100644
--- a/Test/EasyDownload/HLS.py
+++ b/Test/EasyDownload/HLS.py
@@ -1,14 +1,14 @@
# 23.06.24
-# Fix import
-import sys
import os
+import sys
+
+
+# Fix import
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(src_path)
-
-# Import
from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Util.os import os_summary
os_summary.get_system_summary()
@@ -17,11 +17,11 @@
start_message()
-logger = Logger()
-result = HLS_Downloader(
+Logger()
+hls_process = HLS_Downloader(
output_path=".\\Video\\test.mp4",
m3u8_url="https://acdn.ak-stream-videoplatform.sky.it/hls/2024/11/21/968275/master.m3u8"
).start()
-thereIsError = result['error'] is not None
+thereIsError = hls_process['error'] is not None
print(thereIsError)
\ No newline at end of file
diff --git a/Test/EasyDownload/MP4.py b/Test/EasyDownload/MP4.py
index 03dabc56..14092159 100644
--- a/Test/EasyDownload/MP4.py
+++ b/Test/EasyDownload/MP4.py
@@ -1,14 +1,14 @@
# 23.06.24
-# Fix import
-import sys
import os
+import sys
+
+
+# Fix import
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(src_path)
-
-# Import
from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Util.os import os_summary
os_summary.get_system_summary()
@@ -17,7 +17,7 @@
start_message()
-logger = Logger()
+Logger()
path, kill_handler = MP4_downloader(
url="https://148-251-75-109.top/Getintopc.com/IDA_Pro_2020.mp4",
path=r".\\Video\\undefined.mp4"
diff --git a/Test/Player/helper/vixcloud.py b/Test/Player/helper/vixcloud.py
index 3c1cf78d..2e665092 100644
--- a/Test/Player/helper/vixcloud.py
+++ b/Test/Player/helper/vixcloud.py
@@ -1,12 +1,14 @@
-# Fix import
-import sys
+
import os
+import sys
+import json
+
+
+# Fix import
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(src_path)
-# Import
-import json
from StreamingCommunity.Api.Player.Helper.Vixcloud.js_parser import JavaScriptParser
from StreamingCommunity.Api.Player.Helper.Vixcloud.util import WindowVideo, WindowParameter, StreamsCollection
diff --git a/Test/Player/mixdrop.py b/Test/Player/mixdrop.py
index b1326ccb..4ed1d694 100644
--- a/Test/Player/mixdrop.py
+++ b/Test/Player/mixdrop.py
@@ -1,22 +1,19 @@
-# 23.11.24
-# Fix import
-import sys
import os
+import sys
+
+
+# Fix path
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(src_path)
-
-# Import
-from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Util.logger import Logger
from StreamingCommunity.Api.Player.mixdrop import VideoSource
# Test
-start_message()
-logger = Logger()
+Logger()
video_source = VideoSource("https://cb01net.uno/pino-daniele-nero-a-meta-hd-2024/")
master_playlist = video_source.get_playlist()
print(master_playlist)
\ No newline at end of file
diff --git a/Test/Player/supervideo.py b/Test/Player/supervideo.py
index b485ab17..9ec1e4ad 100644
--- a/Test/Player/supervideo.py
+++ b/Test/Player/supervideo.py
@@ -1,22 +1,19 @@
-# 23.11.24
-# Fix import
-import sys
import os
+import sys
+
+
+# Fix path
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(src_path)
-
-# Import
-from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Util.logger import Logger
from StreamingCommunity.Api.Player.supervideo import VideoSource
# Test
-start_message()
-logger = Logger()
+Logger()
video_source = VideoSource("https://supervideo.tv/78np7kfiyklu")
master_playlist = video_source.get_playlist()
print(master_playlist)
\ No newline at end of file
diff --git a/Test/Player/vixcloud.py b/Test/Player/vixcloud.py
index 65e026c6..54598dd0 100644
--- a/Test/Player/vixcloud.py
+++ b/Test/Player/vixcloud.py
@@ -1,22 +1,19 @@
-# 23.11.24
-# Fix import
-import sys
import os
+import sys
+
+
+# Fix path
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(src_path)
-
-# Import
-from StreamingCommunity.Util.message import start_message
from StreamingCommunity.Util.logger import Logger
from StreamingCommunity.Api.Player.vixcloud import VideoSource
# Test
-start_message()
-logger = Logger()
+Logger()
video_source = VideoSource("streamingcommunity")
video_source.setup("1171b9202c71489193f5fed2bc7b43bb", "computer", 778)
video_source.get_iframe()
diff --git a/Test/Util/ffmpegVersion.py b/Test/Util/ffmpegVersion.py
index 1fce9346..296e37bc 100644
--- a/Test/Util/ffmpegVersion.py
+++ b/Test/Util/ffmpegVersion.py
@@ -1,13 +1,12 @@
-# 05.02.25
-
-# Fix import
-import sys
import os
+import sys
+
+
+# Fix path
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(src_path)
-
from StreamingCommunity.Util.ffmpeg_installer import FFMPEGDownloader
diff --git a/Test/Util/hooks.py b/Test/Util/hooks.py
new file mode 100644
index 00000000..f2dbc800
--- /dev/null
+++ b/Test/Util/hooks.py
@@ -0,0 +1,75 @@
+# Simple manual test for pre/post hooks execution
+
+import os
+import sys
+import tempfile
+
+from StreamingCommunity.Util.config_json import config_manager
+from StreamingCommunity.run import execute_hooks
+
+
+def main():
+ # Prepare temp folder and python script
+ with tempfile.TemporaryDirectory() as tmp:
+ out_file = os.path.join(tmp, "hook_out.txt")
+ script_path = os.path.join(tmp, "hook_script.py")
+
+ with open(script_path, "w", encoding="utf-8") as f:
+ f.write(
+ "import os\n"
+ "with open(os.environ.get('HOOK_OUT'), 'a', encoding='utf-8') as fp:\n"
+ " fp.write('ran\\n')\n"
+ )
+
+ original_hooks = (
+ config_manager.config.get("HOOKS", {}).copy()
+ if config_manager.config.get("HOOKS")
+ else {}
+ )
+
+ try:
+ # Configure hooks: run the python script pre and post
+ config_manager.config.setdefault("HOOKS", {})
+ config_manager.config["HOOKS"]["pre_run"] = [
+ {
+ "name": "test-pre",
+ "type": "python",
+ "path": script_path,
+ "env": {"HOOK_OUT": out_file},
+ "enabled": True,
+ "continue_on_error": False,
+ }
+ ]
+ config_manager.config["HOOKS"]["post_run"] = [
+ {
+ "name": "test-post",
+ "type": "python",
+ "path": script_path,
+ "env": {"HOOK_OUT": out_file},
+ "enabled": True,
+ "continue_on_error": False,
+ }
+ ]
+
+ # Execute and assert
+ execute_hooks("pre_run")
+ execute_hooks("post_run")
+
+ with open(out_file, "r", encoding="utf-8") as fp:
+ content = fp.read().strip()
+ assert content.splitlines() == ["ran", "ran"], (
+ f"Unexpected content: {content!r}"
+ )
+
+ print("OK: hooks executed (pre + post)")
+
+ finally:
+ # Restore original hooks configuration
+ if original_hooks:
+ config_manager.config["HOOKS"] = original_hooks
+ else:
+ config_manager.config.pop("HOOKS", None)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/Test/Util/loadSearchApi.py b/Test/Util/loadSearchApi.py
index a1d5ce01..3ceae6a0 100644
--- a/Test/Util/loadSearchApi.py
+++ b/Test/Util/loadSearchApi.py
@@ -1,15 +1,15 @@
-# 22.03.25
-# Fix import
-import sys
import os
+import sys
+import unittest
+import logging
+
+
+# Fix path
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(src_path)
-# Import
-import unittest
-import logging
from StreamingCommunity.global_search import load_search_functions
class TestLoadSearchFunctions(unittest.TestCase):
diff --git a/Test/Util/osPath.py b/Test/Util/osPath.py
index df5318ea..2dadfc94 100644
--- a/Test/Util/osPath.py
+++ b/Test/Util/osPath.py
@@ -1,16 +1,15 @@
-# 22.01.25
-# Fix import
-import sys
import os
+import sys
+import unittest
+from unittest.mock import patch
+
+
+# Fix path
src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(src_path)
-
-# Import
-import unittest
-from unittest.mock import patch
from StreamingCommunity.Util.os import OsManager
class TestOsManager(unittest.TestCase):
diff --git a/config.json b/config.json
index 74c6298c..80919df6 100644
--- a/config.json
+++ b/config.json
@@ -2,13 +2,16 @@
"DEFAULT": {
"debug": false,
"show_message": true,
+ "expose_http_api": false,
+ "http_api_port": 8080,
+ "http_api_username": "anyusername",
+ "http_api_password": "pleaseusepassword",
+ "http_api_provider_timeout": 20,
"clean_console": true,
"show_trending": true,
- "use_api": true,
- "not_close": false,
+ "fetch_domain_online": true,
+ "validate_github_config": false,
"telegram_bot": false,
- "download_site_data": true,
- "validate_github_config": true,
"bypass_dns": false
},
"OUT_FOLDER": {
@@ -46,9 +49,7 @@
"use_acodec": true,
"use_bitrate": true,
"use_gpu": false,
- "default_preset": "ultrafast"
- },
- "M3U8_PARSER": {
+ "default_preset": "ultrafast",
"force_resolution": "Best"
},
"REQUESTS": {
@@ -56,10 +57,17 @@
"timeout": 20,
"max_retry": 8
},
+ "HOOKS": {
+ "pre_run": [],
+ "post_run": []
+ },
"SITE_LOGIN": {
"crunchyroll": {
"etp_rt": "",
"x_cr_tab_id": ""
+ },
+ "mediasetinfinity": {
+ "beToken": ""
}
}
}
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index e5b20ec2..672c517e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-httpx
+httpx
bs4
rich
tqdm
@@ -13,4 +13,7 @@ pycryptodomex
ua-generator
qbittorrent-api
pyTelegramBotAPI
-pywidevine
\ No newline at end of file
+pywidevine
+fastapi
+uvicorn[standard]
+ruff
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 309554f1..6739c2fe 100644
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,8 @@ def get_version():
try:
import pkg_resources
return pkg_resources.get_distribution('StreamingCommunity').version
- except:
+
+ except Exception:
version_file_path = os.path.join(os.path.dirname(__file__), "StreamingCommunity", "Upload", "version.py")
with open(version_file_path, "r", encoding="utf-8") as f:
version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]", f.read(), re.M)
diff --git a/update.py b/update.py
index 9f9a1bd9..a27252f2 100644
--- a/update.py
+++ b/update.py
@@ -14,12 +14,12 @@
from rich.panel import Panel
from rich.table import Table
+from StreamingCommunity.Upload.version import __author__, __title__
# Variable
max_timeout = 15
console = Console()
local_path = os.path.join(".")
-from StreamingCommunity.Upload.version import __author__, __title__
def move_content(source: str, destination: str):