Compare commits
140 Commits
db7700a6b9
...
v0.2.4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
12509cd4e2 | ||
|
|
2595bfe4f4 | ||
|
|
3df11be5e9 | ||
|
|
fc9d94aeee | ||
|
|
111df11199 | ||
|
|
f0d4c36bc3 | ||
|
|
ef0d6e9274 | ||
|
|
d05b01cfb2 | ||
|
|
9dc08d48e9 | ||
|
|
20bdfc0dd7 | ||
|
|
4365e083dc | ||
|
|
528915a235 | ||
|
|
9a980b5766 | ||
|
|
5eee7e1161 | ||
|
|
0a41998e29 | ||
|
|
ebdc23c3ce | ||
|
|
9611930949 | ||
|
|
609f93b783 | ||
|
|
52c6462fa8 | ||
|
|
358ef18f77 | ||
|
|
fc729a2414 | ||
|
|
0939a0c6c2 | ||
|
|
c384d54c12 | ||
|
|
71553aad32 | ||
|
|
d19e69990a | ||
|
|
be0f4b4c4e | ||
|
|
01b5fdb289 | ||
|
|
60ae58500a | ||
|
|
f9c8b8ac5e | ||
|
|
72c735c3ee | ||
|
|
5871ae30ad | ||
|
|
381a62046b | ||
|
|
52724ecc5b | ||
|
|
f288d445e4 | ||
|
|
d9db6da191 | ||
|
|
5443881ea1 | ||
|
|
8946b57456 | ||
|
|
686239491b | ||
|
|
126ba4487c | ||
|
|
447cda19ef | ||
|
|
f1ba913a98 | ||
|
|
59336aafb7 | ||
|
|
fd5ad3ed56 | ||
|
|
2d03a3bb10 | ||
|
|
4dc02d52a2 | ||
|
|
ed0cea9c26 | ||
|
|
15bfbdbe88 | ||
|
|
c354ba09ba | ||
| 2eeea08be0 | |||
| fbfc8ea965 | |||
|
|
6ec5db2ea2 | ||
|
|
8feced6f1c | ||
|
|
285649c30a | ||
|
|
558da817f1 | ||
|
|
2a84327f69 | ||
| 535b11dca5 | |||
| 8edc715795 | |||
| cd203703e8 | |||
| 8f2367b71e | |||
| 101c7605d2 | |||
| a5b58e34e4 | |||
| a32e86550c | |||
| 5de3778ae5 | |||
| 81aab0657e | |||
| 8514a0c152 | |||
| c846147c64 | |||
| e52297b2ba | |||
| 655833f13e | |||
| 03dd02ed87 | |||
| b6ee197536 | |||
| d8374ae9f2 | |||
| f262eaa120 | |||
| d940a6e92a | |||
| e1395aeca0 | |||
| 48841c5750 | |||
| d558bbf6bd | |||
| b05d989581 | |||
| bc8af53525 | |||
| 6bd1587947 | |||
| 7d6531b40e | |||
| ab435a4c76 | |||
| 0a88e366b1 | |||
| 1c80cd7d7d | |||
| a45c180aaa | |||
| 0b204ff19c | |||
| d7ec5f7620 | |||
| 3f64304374 | |||
| b459272149 | |||
| 4b05fc194b | |||
| 9d088819ab | |||
| e20f7a1f67 | |||
| 9d683dfa84 | |||
| 867756c661 | |||
| f81a6edb07 | |||
| ec4bce473c | |||
| bf882b741f | |||
| a4e25b5ec8 | |||
| ff6bacb0d5 | |||
| f32b7a06c0 | |||
| 7ceed58e7b | |||
| 153f401dd3 | |||
| 7f1f34fb9f | |||
| 21fe7cb1eb | |||
| 9e63184524 | |||
| 3742221189 | |||
| 478ac15ab8 | |||
| ef0a01bc9b | |||
| 802c11be44 | |||
| 4cbb135772 | |||
| 3d52442471 | |||
| 81640192ab | |||
| 81d760aabe | |||
| c0eff679f7 | |||
| 07097058d7 | |||
| cd7a338541 | |||
| be652f8efb | |||
| dd51b14d49 | |||
| a471808392 | |||
| b3da8ce738 | |||
| fe0c078c3f | |||
| 962522b974 | |||
| 24367ea08a | |||
| f0eebd0bea | |||
| c8e21b9260 | |||
| cdc1664779 | |||
|
|
2849eda05a | ||
|
|
cfb2df8d66 | ||
|
|
12c8ad3782 | ||
|
|
74a39a8f9a | ||
|
|
5eacb0d0cb | ||
|
|
e8c0c3d646 | ||
|
|
6b2671a1f5 | ||
|
|
2d8622506e | ||
|
|
86cc7dfc6f | ||
|
|
d84bee74c4 | ||
|
|
488caa7a08 | ||
|
|
62877dfed6 | ||
|
|
87ff94e204 | ||
|
|
0c78ed7cf7 | ||
|
|
4db9bfd103 |
21
.gitignore
vendored
21
.gitignore
vendored
@@ -1,10 +1,23 @@
|
|||||||
__pycache__
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
junk/
|
junk/
|
||||||
.vscode
|
.vscode
|
||||||
.ipynb_checkpoints/
|
.ipynb_checkpoints/
|
||||||
ansible/inventory/hawaii.yml
|
tools/ansible/inventory/hawaii.yml
|
||||||
ansible/inventory/peppermint.yml
|
tools/ansible/inventory/peppermint.yml
|
||||||
|
tools/ansible/inventory/cappuccino.yml
|
||||||
|
tools/ansible/inventory/group_vars/all.yml
|
||||||
ffx_test_report.log
|
ffx_test_report.log
|
||||||
bin/conversiontest.py
|
bin/conversiontest.py
|
||||||
*.egg-info/
|
|
||||||
|
|
||||||
|
build/
|
||||||
|
dist/
|
||||||
|
*.egg-info/
|
||||||
|
.venv/
|
||||||
|
venv/
|
||||||
|
.codex
|
||||||
|
|
||||||
|
|
||||||
|
*.mkv
|
||||||
|
*.webm
|
||||||
|
ffmpeg2pass-0.log
|
||||||
|
|||||||
141
README.md
141
README.md
@@ -1,48 +1,147 @@
|
|||||||
# FFX
|
# FFX
|
||||||
|
|
||||||
|
FFX is a local CLI and Textual TUI for inspecting TV episode files, storing normalization rules in SQLite, and converting outputs into a predictable stream, metadata, and filename layout.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- Linux-like environment
|
||||||
|
- `python3`
|
||||||
|
- `ffmpeg`
|
||||||
|
- `ffprobe`
|
||||||
|
- `cpulimit`
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
per https:
|
FFX uses a two-step local setup flow.
|
||||||
|
|
||||||
|
### 1. Install The Bundle
|
||||||
|
|
||||||
|
This step creates or reuses the persistent bundle virtualenv in `~/.local/share/ffx.venv`, installs FFX into it, and ensures `ffx` is exposed through a shell alias.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
pip install https://<URL>/<Releaser>/ffx.git@<Branch>
|
bash tools/setup.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
per git:
|
If you also want the Python packages needed for the modern test suite:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
pip install git+ssh://<Username>@<URL>/<Releaser>/ffx.git@<Branch>
|
bash tools/setup.sh --with-tests
|
||||||
```
|
```
|
||||||
|
|
||||||
## Version history
|
You can verify the bundle state without changing anything:
|
||||||
|
|
||||||
### 0.1.1
|
```sh
|
||||||
|
bash tools/setup.sh --check
|
||||||
|
```
|
||||||
|
|
||||||
Bugfixes, TMBD identify shows
|
### 2. Prepare System Dependencies And Local User Files
|
||||||
|
|
||||||
### 0.1.2
|
This step installs or verifies workstation dependencies and seeds local config and data directories. It is the step wrapped by the CLI command `ffx configure_workstation`.
|
||||||
|
|
||||||
Bugfixes
|
Run it directly:
|
||||||
|
|
||||||
### 0.1.3
|
```sh
|
||||||
|
bash tools/configure_workstation.sh
|
||||||
|
```
|
||||||
|
|
||||||
Subtitle file imports
|
Or through the installed CLI:
|
||||||
|
|
||||||
### 0.2.0
|
```sh
|
||||||
|
ffx configure_workstation
|
||||||
|
```
|
||||||
|
|
||||||
Tests, Config-File
|
Check-only mode is available in both forms:
|
||||||
|
|
||||||
### 0.2.1
|
```sh
|
||||||
|
bash tools/configure_workstation.sh --check
|
||||||
|
ffx configure_workstation --check
|
||||||
|
```
|
||||||
|
|
||||||
Signature, Tags cleaning, Bugfixes, Refactoring
|
`tools/configure_workstation.sh` does not manage the bundle virtualenv. Python-side test packages belong to `tools/setup.sh --with-tests`.
|
||||||
|
|
||||||
### 0.2.2
|
## Basic Usage
|
||||||
|
|
||||||
CLI-Overrides
|
Examples:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
ffx version
|
||||||
|
ffx inspect /path/to/episode.mkv
|
||||||
|
ffx convert /path/to/episode.mkv
|
||||||
|
ffx shows
|
||||||
|
```
|
||||||
|
|
||||||
|
## Modern Tests
|
||||||
|
|
||||||
|
Install Python test packages first:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
bash tools/setup.sh --with-tests
|
||||||
|
```
|
||||||
|
|
||||||
|
Then run the modern automatically discovered test suite:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
./tools/test.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
This runner uses `pytest` and intentionally excludes the legacy harness under `tests/legacy/`.
|
||||||
|
|
||||||
|
## Default Local Paths
|
||||||
|
|
||||||
|
- Config: `~/.local/etc/ffx.json`
|
||||||
|
- Database: `~/.local/var/ffx/ffx.db`
|
||||||
|
- Log file: `~/.local/var/log/ffx.log`
|
||||||
|
- Bundle venv: `~/.local/share/ffx.venv`
|
||||||
|
|
||||||
|
## TMDB
|
||||||
|
|
||||||
|
TMDB-backed metadata enrichment requires `TMDB_API_KEY` to be set in the environment.
|
||||||
|
|
||||||
|
## Version History
|
||||||
|
|
||||||
|
### 0.2.4
|
||||||
|
|
||||||
|
- lightweight CLI commands now stay import-light via lazy runtime loading
|
||||||
|
- setup/config templating moved to `assets/ffx.json.j2`
|
||||||
|
- aligned two-step local setup wrappers: `ffx setup` and `ffx configure_workstation`
|
||||||
|
- combined `ffprobe` payload reuse in `FileProperties`
|
||||||
|
- configurable crop-detect sampling plus per-process crop result caching
|
||||||
|
- single-query controller accessors and conditional DB schema bootstrap
|
||||||
|
- shared screen bootstrap/controller wiring for large detail screens
|
||||||
|
- configurable default season/episode digit lengths
|
||||||
|
- digit-aware `rename` and padded `unmux` filename markers
|
||||||
|
|
||||||
### 0.2.3
|
### 0.2.3
|
||||||
|
|
||||||
PyPi packaging
|
- PyPI packaging
|
||||||
Templating output filename
|
- output filename templating
|
||||||
Season shiftung
|
- season shifting
|
||||||
DB-Versionierung
|
- DB versioning
|
||||||
|
|
||||||
|
### 0.2.2
|
||||||
|
|
||||||
|
- CLI overrides
|
||||||
|
|
||||||
|
### 0.2.1
|
||||||
|
|
||||||
|
- signature handling
|
||||||
|
- tag cleanup
|
||||||
|
- bugfixes and refactoring
|
||||||
|
|
||||||
|
### 0.2.0
|
||||||
|
|
||||||
|
- tests
|
||||||
|
- config file
|
||||||
|
|
||||||
|
### 0.1.3
|
||||||
|
|
||||||
|
- subtitle file imports
|
||||||
|
|
||||||
|
### 0.1.2
|
||||||
|
|
||||||
|
- bugfixes
|
||||||
|
|
||||||
|
### 0.1.1
|
||||||
|
|
||||||
|
- bugfixes
|
||||||
|
- TMDB show identification
|
||||||
|
|||||||
36
assets/ffx.json.j2
Normal file
36
assets/ffx.json.j2
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
{
|
||||||
|
"databasePath": {{ database_path_json }},
|
||||||
|
"logDirectory": {{ log_directory_json }},
|
||||||
|
"subtitlesDirectory": {{ subtitles_directory_json }},
|
||||||
|
"defaultIndexSeasonDigits": {{ default_index_season_digits }},
|
||||||
|
"defaultIndexEpisodeDigits": {{ default_index_episode_digits }},
|
||||||
|
"defaultIndicatorSeasonDigits": {{ default_indicator_season_digits }},
|
||||||
|
"defaultIndicatorEpisodeDigits": {{ default_indicator_episode_digits }},
|
||||||
|
"metadata": {
|
||||||
|
"signature": {
|
||||||
|
"RECODED_WITH": "FFX"
|
||||||
|
},
|
||||||
|
"remove": [
|
||||||
|
"VERSION-eng",
|
||||||
|
"creation_time",
|
||||||
|
"NAME"
|
||||||
|
],
|
||||||
|
"streams": {
|
||||||
|
"remove": [
|
||||||
|
"BPS",
|
||||||
|
"NUMBER_OF_FRAMES",
|
||||||
|
"NUMBER_OF_BYTES",
|
||||||
|
"_STATISTICS_WRITING_APP",
|
||||||
|
"_STATISTICS_WRITING_DATE_UTC",
|
||||||
|
"_STATISTICS_TAGS",
|
||||||
|
"BPS-eng",
|
||||||
|
"DURATION-eng",
|
||||||
|
"NUMBER_OF_FRAMES-eng",
|
||||||
|
"NUMBER_OF_BYTES-eng",
|
||||||
|
"_STATISTICS_WRITING_APP-eng",
|
||||||
|
"_STATISTICS_WRITING_DATE_UTC-eng",
|
||||||
|
"_STATISTICS_TAGS-eng"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "ffx"
|
name = "ffx"
|
||||||
description = "FFX recoding and metadata managing tool"
|
description = "FFX recoding and metadata managing tool"
|
||||||
version = "0.2.3"
|
version = "0.2.4"
|
||||||
license = {file = "LICENSE.md"}
|
license = {file = "LICENSE.md"}
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"requests",
|
"requests",
|
||||||
@@ -27,6 +27,11 @@ Homepage = "https://gitea.maveno.de/Javanaut/ffx"
|
|||||||
Repository = "https://gitea.maveno.de/Javanaut/ffx.git"
|
Repository = "https://gitea.maveno.de/Javanaut/ffx.git"
|
||||||
Issues = "https://gitea.maveno.de/Javanaut/ffx/issues"
|
Issues = "https://gitea.maveno.de/Javanaut/ffx/issues"
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
test = [
|
||||||
|
"pytest",
|
||||||
|
]
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = [
|
requires = [
|
||||||
"setuptools",
|
"setuptools",
|
||||||
@@ -35,4 +40,15 @@ requires = [
|
|||||||
build-backend = "setuptools.build_meta"
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
ffx = "ffx.ffx:ffx"
|
ffx = "ffx.cli:ffx"
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
testpaths = ["tests"]
|
||||||
|
python_files = ["test_*.py"]
|
||||||
|
norecursedirs = ["tests/legacy", "tests/support"]
|
||||||
|
addopts = "-ra"
|
||||||
|
markers = [
|
||||||
|
"integration: exercises the FFX bundle with real ffmpeg/ffprobe processes",
|
||||||
|
"pattern_management: covers requirements/pattern_management.md",
|
||||||
|
"subtrack_mapping: covers requirements/subtrack_mapping.md",
|
||||||
|
]
|
||||||
|
|||||||
9
src/ffx/__main__.py
Normal file
9
src/ffx/__main__.py
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
from .cli import ffx
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
ffx()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -9,6 +9,7 @@ class AudioLayout(Enum):
|
|||||||
LAYOUT_7_1 = {"label": "7.1", "index": 4} #TODO: Does this exist?
|
LAYOUT_7_1 = {"label": "7.1", "index": 4} #TODO: Does this exist?
|
||||||
|
|
||||||
LAYOUT_6CH = {"label": "6ch", "index": 5}
|
LAYOUT_6CH = {"label": "6ch", "index": 5}
|
||||||
|
LAYOUT_5_0 = {"label": "5.0(side)", "index": 6}
|
||||||
|
|
||||||
LAYOUT_UNDEFINED = {"label": "undefined", "index": 0}
|
LAYOUT_UNDEFINED = {"label": "undefined", "index": 0}
|
||||||
|
|
||||||
@@ -29,6 +30,15 @@ class AudioLayout(Enum):
|
|||||||
except:
|
except:
|
||||||
return AudioLayout.LAYOUT_UNDEFINED
|
return AudioLayout.LAYOUT_UNDEFINED
|
||||||
|
|
||||||
|
# @staticmethod
|
||||||
|
# def fromIndex(index : int):
|
||||||
|
# try:
|
||||||
|
# target_index = int(index)
|
||||||
|
# except (TypeError, ValueError):
|
||||||
|
# return AudioLayout.LAYOUT_UNDEFINED
|
||||||
|
# return next((a for a in AudioLayout if a.value['index'] == target_index),
|
||||||
|
# AudioLayout.LAYOUT_UNDEFINED)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def fromIndex(index : int):
|
def fromIndex(index : int):
|
||||||
try:
|
try:
|
||||||
|
|||||||
1396
src/ffx/cli.py
Executable file
1396
src/ffx/cli.py
Executable file
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,12 @@
|
|||||||
import os, json
|
import os, json
|
||||||
|
|
||||||
|
from .constants import (
|
||||||
|
DEFAULT_SHOW_INDEX_EPISODE_DIGITS,
|
||||||
|
DEFAULT_SHOW_INDEX_SEASON_DIGITS,
|
||||||
|
DEFAULT_SHOW_INDICATOR_EPISODE_DIGITS,
|
||||||
|
DEFAULT_SHOW_INDICATOR_SEASON_DIGITS,
|
||||||
|
)
|
||||||
|
|
||||||
class ConfigurationController():
|
class ConfigurationController():
|
||||||
|
|
||||||
CONFIG_FILENAME = 'ffx.json'
|
CONFIG_FILENAME = 'ffx.json'
|
||||||
@@ -8,7 +15,12 @@ class ConfigurationController():
|
|||||||
|
|
||||||
DATABASE_PATH_CONFIG_KEY = 'databasePath'
|
DATABASE_PATH_CONFIG_KEY = 'databasePath'
|
||||||
LOG_DIRECTORY_CONFIG_KEY = 'logDirectory'
|
LOG_DIRECTORY_CONFIG_KEY = 'logDirectory'
|
||||||
|
SUBTITLES_DIRECTORY_CONFIG_KEY = 'subtitlesDirectory'
|
||||||
OUTPUT_FILENAME_TEMPLATE_KEY = 'outputFilenameTemplate'
|
OUTPUT_FILENAME_TEMPLATE_KEY = 'outputFilenameTemplate'
|
||||||
|
DEFAULT_INDEX_SEASON_DIGITS_CONFIG_KEY = 'defaultIndexSeasonDigits'
|
||||||
|
DEFAULT_INDEX_EPISODE_DIGITS_CONFIG_KEY = 'defaultIndexEpisodeDigits'
|
||||||
|
DEFAULT_INDICATOR_SEASON_DIGITS_CONFIG_KEY = 'defaultIndicatorSeasonDigits'
|
||||||
|
DEFAULT_INDICATOR_EPISODE_DIGITS_CONFIG_KEY = 'defaultIndicatorEpisodeDigits'
|
||||||
|
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@@ -49,6 +61,48 @@ class ConfigurationController():
|
|||||||
def getDatabaseFilePath(self):
|
def getDatabaseFilePath(self):
|
||||||
return self.__databaseFilePath
|
return self.__databaseFilePath
|
||||||
|
|
||||||
|
def getSubtitlesDirectoryPath(self):
|
||||||
|
subtitlesDirectory = self.__configurationData.get(
|
||||||
|
ConfigurationController.SUBTITLES_DIRECTORY_CONFIG_KEY,
|
||||||
|
'',
|
||||||
|
)
|
||||||
|
return os.path.expanduser(str(subtitlesDirectory)) if subtitlesDirectory else ''
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def getConfiguredIntegerValue(cls, configurationData: dict, configKey: str, defaultValue: int) -> int:
|
||||||
|
configuredValue = configurationData.get(configKey, defaultValue)
|
||||||
|
try:
|
||||||
|
return int(configuredValue)
|
||||||
|
except (TypeError, ValueError):
|
||||||
|
return int(defaultValue)
|
||||||
|
|
||||||
|
def getDefaultIndexSeasonDigits(self):
|
||||||
|
return ConfigurationController.getConfiguredIntegerValue(
|
||||||
|
self.__configurationData,
|
||||||
|
ConfigurationController.DEFAULT_INDEX_SEASON_DIGITS_CONFIG_KEY,
|
||||||
|
DEFAULT_SHOW_INDEX_SEASON_DIGITS,
|
||||||
|
)
|
||||||
|
|
||||||
|
def getDefaultIndexEpisodeDigits(self):
|
||||||
|
return ConfigurationController.getConfiguredIntegerValue(
|
||||||
|
self.__configurationData,
|
||||||
|
ConfigurationController.DEFAULT_INDEX_EPISODE_DIGITS_CONFIG_KEY,
|
||||||
|
DEFAULT_SHOW_INDEX_EPISODE_DIGITS,
|
||||||
|
)
|
||||||
|
|
||||||
|
def getDefaultIndicatorSeasonDigits(self):
|
||||||
|
return ConfigurationController.getConfiguredIntegerValue(
|
||||||
|
self.__configurationData,
|
||||||
|
ConfigurationController.DEFAULT_INDICATOR_SEASON_DIGITS_CONFIG_KEY,
|
||||||
|
DEFAULT_SHOW_INDICATOR_SEASON_DIGITS,
|
||||||
|
)
|
||||||
|
|
||||||
|
def getDefaultIndicatorEpisodeDigits(self):
|
||||||
|
return ConfigurationController.getConfiguredIntegerValue(
|
||||||
|
self.__configurationData,
|
||||||
|
ConfigurationController.DEFAULT_INDICATOR_EPISODE_DIGITS_CONFIG_KEY,
|
||||||
|
DEFAULT_SHOW_INDICATOR_EPISODE_DIGITS,
|
||||||
|
)
|
||||||
|
|
||||||
def getData(self):
|
def getData(self):
|
||||||
return self.__configurationData
|
return self.__configurationData
|
||||||
|
|||||||
@@ -1,15 +1,30 @@
|
|||||||
VERSION='0.2.3'
|
VERSION='0.2.4'
|
||||||
DATABASE_VERSION = 2
|
DATABASE_VERSION = 2
|
||||||
|
|
||||||
DEFAULT_QUALITY = 32
|
DEFAULT_QUALITY = 32
|
||||||
DEFAULT_AV1_PRESET = 5
|
DEFAULT_AV1_PRESET = 5
|
||||||
|
|
||||||
|
DEFAULT_VIDEO_ENCODER_LABEL = "vp9"
|
||||||
|
DEFAULT_CONTAINER_FORMAT = "webm"
|
||||||
|
DEFAULT_CONTAINER_EXTENSION = "webm"
|
||||||
|
SUPPORTED_INPUT_FILE_EXTENSIONS = ("mkv", "mp4", "avi", "flv", "webm")
|
||||||
|
FFMPEG_COMMAND_TOKENS = ("ffmpeg", "-y")
|
||||||
|
FFMPEG_NULL_OUTPUT_TOKENS = ("-f", "null", "/dev/null")
|
||||||
|
|
||||||
DEFAULT_STEREO_BANDWIDTH = "112"
|
DEFAULT_STEREO_BANDWIDTH = "112"
|
||||||
DEFAULT_AC3_BANDWIDTH = "256"
|
DEFAULT_AC3_BANDWIDTH = "256"
|
||||||
DEFAULT_DTS_BANDWIDTH = "320"
|
DEFAULT_DTS_BANDWIDTH = "320"
|
||||||
DEFAULT_7_1_BANDWIDTH = "384"
|
DEFAULT_7_1_BANDWIDTH = "384"
|
||||||
|
|
||||||
DEFAULT_CROP_START = 60
|
DEFAULT_CROPDETECT_SEEK_SECONDS = 60
|
||||||
DEFAULT_CROP_LENGTH = 180
|
DEFAULT_CROPDETECT_DURATION_SECONDS = 180
|
||||||
|
|
||||||
|
DEFAULT_cut_start = 60
|
||||||
|
DEFAULT_cut_length = 180
|
||||||
|
|
||||||
|
DEFAULT_SHOW_INDEX_SEASON_DIGITS = 2
|
||||||
|
DEFAULT_SHOW_INDEX_EPISODE_DIGITS = 2
|
||||||
|
DEFAULT_SHOW_INDICATOR_SEASON_DIGITS = 2
|
||||||
|
DEFAULT_SHOW_INDICATOR_EPISODE_DIGITS = 2
|
||||||
|
|
||||||
DEFAULT_OUTPUT_FILENAME_TEMPLATE = '{{ ffx_show_name }} - {{ ffx_index }}{{ ffx_index_separator }}{{ ffx_episode_name }}{{ ffx_indicator_separator }}{{ ffx_indicator }}'
|
DEFAULT_OUTPUT_FILENAME_TEMPLATE = '{{ ffx_show_name }} - {{ ffx_index }}{{ ffx_index_separator }}{{ ffx_episode_name }}{{ ffx_indicator_separator }}{{ ffx_indicator }}'
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
import os, click
|
import os, click
|
||||||
|
|
||||||
from sqlalchemy import create_engine
|
from sqlalchemy import create_engine, inspect
|
||||||
from sqlalchemy.orm import sessionmaker
|
from sqlalchemy.orm import sessionmaker
|
||||||
|
|
||||||
|
# Import the full model package so SQLAlchemy registers every mapped class
|
||||||
|
# before metadata creation and the first ORM query.
|
||||||
|
import ffx.model
|
||||||
from ffx.model.show import Base
|
from ffx.model.show import Base
|
||||||
|
|
||||||
from ffx.model.property import Property
|
from ffx.model.property import Property
|
||||||
@@ -11,6 +14,7 @@ from ffx.constants import DATABASE_VERSION
|
|||||||
|
|
||||||
|
|
||||||
DATABASE_VERSION_KEY = 'database_version'
|
DATABASE_VERSION_KEY = 'database_version'
|
||||||
|
EXPECTED_TABLE_NAMES = set(Base.metadata.tables.keys())
|
||||||
|
|
||||||
class DatabaseVersionException(Exception):
|
class DatabaseVersionException(Exception):
|
||||||
def __init__(self, errorMessage):
|
def __init__(self, errorMessage):
|
||||||
@@ -34,7 +38,7 @@ def databaseContext(databasePath: str = ''):
|
|||||||
databaseContext['engine'] = create_engine(databaseContext['url'])
|
databaseContext['engine'] = create_engine(databaseContext['url'])
|
||||||
databaseContext['session'] = sessionmaker(bind=databaseContext['engine'])
|
databaseContext['session'] = sessionmaker(bind=databaseContext['engine'])
|
||||||
|
|
||||||
Base.metadata.create_all(databaseContext['engine'])
|
bootstrapDatabaseIfNeeded(databaseContext)
|
||||||
|
|
||||||
# isSyncronuous = False
|
# isSyncronuous = False
|
||||||
# while not isSyncronuous:
|
# while not isSyncronuous:
|
||||||
@@ -51,6 +55,19 @@ def databaseContext(databasePath: str = ''):
|
|||||||
|
|
||||||
return databaseContext
|
return databaseContext
|
||||||
|
|
||||||
|
|
||||||
|
def databaseNeedsBootstrap(databaseContext) -> bool:
|
||||||
|
inspector = inspect(databaseContext['engine'])
|
||||||
|
existingTableNames = set(inspector.get_table_names())
|
||||||
|
return not EXPECTED_TABLE_NAMES.issubset(existingTableNames)
|
||||||
|
|
||||||
|
|
||||||
|
def bootstrapDatabaseIfNeeded(databaseContext):
|
||||||
|
if not databaseNeedsBootstrap(databaseContext):
|
||||||
|
return
|
||||||
|
|
||||||
|
Base.metadata.create_all(databaseContext['engine'])
|
||||||
|
|
||||||
def ensureDatabaseVersion(databaseContext):
|
def ensureDatabaseVersion(databaseContext):
|
||||||
|
|
||||||
currentDatabaseVersion = getDatabaseVersion(databaseContext)
|
currentDatabaseVersion = getDatabaseVersion(databaseContext)
|
||||||
@@ -67,9 +84,9 @@ def getDatabaseVersion(databaseContext):
|
|||||||
|
|
||||||
Session = databaseContext['session']
|
Session = databaseContext['session']
|
||||||
s = Session()
|
s = Session()
|
||||||
q = s.query(Property).filter(Property.key == DATABASE_VERSION_KEY)
|
versionProperty = s.query(Property).filter(Property.key == DATABASE_VERSION_KEY).first()
|
||||||
|
|
||||||
return int(q.first().value) if q.count() else 0
|
return int(versionProperty.value) if versionProperty is not None else 0
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"getDatabaseVersion(): {repr(ex)}")
|
raise click.ClickException(f"getDatabaseVersion(): {repr(ex)}")
|
||||||
|
|||||||
740
src/ffx/ffx.py
740
src/ffx/ffx.py
@@ -1,740 +0,0 @@
|
|||||||
#! /usr/bin/python3
|
|
||||||
|
|
||||||
import os, click, time, logging
|
|
||||||
|
|
||||||
from ffx.configuration_controller import ConfigurationController
|
|
||||||
|
|
||||||
from ffx.file_properties import FileProperties
|
|
||||||
|
|
||||||
from ffx.ffx_app import FfxApp
|
|
||||||
from ffx.ffx_controller import FfxController
|
|
||||||
from ffx.tmdb_controller import TmdbController
|
|
||||||
|
|
||||||
from ffx.database import databaseContext
|
|
||||||
|
|
||||||
from ffx.media_descriptor import MediaDescriptor
|
|
||||||
from ffx.track_descriptor import TrackDescriptor
|
|
||||||
from ffx.show_descriptor import ShowDescriptor
|
|
||||||
|
|
||||||
from ffx.track_type import TrackType
|
|
||||||
from ffx.video_encoder import VideoEncoder
|
|
||||||
from ffx.track_disposition import TrackDisposition
|
|
||||||
from ffx.track_codec import TrackCodec
|
|
||||||
|
|
||||||
from ffx.process import executeProcess
|
|
||||||
from ffx.helper import filterFilename, substituteTmdbFilename
|
|
||||||
from ffx.helper import getEpisodeFileBasename
|
|
||||||
|
|
||||||
from ffx.constants import DEFAULT_STEREO_BANDWIDTH, DEFAULT_AC3_BANDWIDTH, DEFAULT_DTS_BANDWIDTH, DEFAULT_7_1_BANDWIDTH
|
|
||||||
|
|
||||||
from ffx.filter.quality_filter import QualityFilter
|
|
||||||
from ffx.filter.preset_filter import PresetFilter
|
|
||||||
|
|
||||||
from ffx.filter.nlmeans_filter import NlmeansFilter
|
|
||||||
|
|
||||||
from ffx.constants import VERSION
|
|
||||||
|
|
||||||
from ffx.shifted_season_controller import ShiftedSeasonController
|
|
||||||
|
|
||||||
|
|
||||||
@click.group()
|
|
||||||
@click.pass_context
|
|
||||||
@click.option('--database-file', type=str, default='', help='Path to database file')
|
|
||||||
@click.option('-v', '--verbose', type=int, default=0, help='Set verbosity of output')
|
|
||||||
@click.option("--dry-run", is_flag=True, default=False)
|
|
||||||
def ffx(ctx, database_file, verbose, dry_run):
|
|
||||||
"""FFX"""
|
|
||||||
|
|
||||||
ctx.obj = {}
|
|
||||||
|
|
||||||
ctx.obj['config'] = ConfigurationController()
|
|
||||||
|
|
||||||
ctx.obj['database'] = databaseContext(databasePath=database_file
|
|
||||||
if database_file else ctx.obj['config'].getDatabaseFilePath())
|
|
||||||
|
|
||||||
ctx.obj['dry_run'] = dry_run
|
|
||||||
ctx.obj['verbosity'] = verbose
|
|
||||||
|
|
||||||
# Critical 50
|
|
||||||
# Error 40
|
|
||||||
# Warning 30
|
|
||||||
# Info 20
|
|
||||||
# Debug 10
|
|
||||||
fileLogVerbosity = max(40 - verbose * 10, 10)
|
|
||||||
consoleLogVerbosity = max(20 - verbose * 10, 10)
|
|
||||||
|
|
||||||
ctx.obj['logger'] = logging.getLogger('FFX')
|
|
||||||
ctx.obj['logger'].setLevel(logging.DEBUG)
|
|
||||||
|
|
||||||
ffxFileHandler = logging.FileHandler(ctx.obj['config'].getLogFilePath())
|
|
||||||
ffxFileHandler.setLevel(fileLogVerbosity)
|
|
||||||
ffxConsoleHandler = logging.StreamHandler()
|
|
||||||
ffxConsoleHandler.setLevel(consoleLogVerbosity)
|
|
||||||
|
|
||||||
fileFormatter = logging.Formatter(
|
|
||||||
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
|
||||||
ffxFileHandler.setFormatter(fileFormatter)
|
|
||||||
consoleFormatter = logging.Formatter(
|
|
||||||
'%(message)s')
|
|
||||||
ffxConsoleHandler.setFormatter(consoleFormatter)
|
|
||||||
|
|
||||||
ctx.obj['logger'].addHandler(ffxConsoleHandler)
|
|
||||||
ctx.obj['logger'].addHandler(ffxFileHandler)
|
|
||||||
|
|
||||||
|
|
||||||
# Define a subcommand
|
|
||||||
@ffx.command()
|
|
||||||
def version():
|
|
||||||
click.echo(VERSION)
|
|
||||||
|
|
||||||
|
|
||||||
# Another subcommand
|
|
||||||
@ffx.command()
|
|
||||||
def help():
|
|
||||||
click.echo(f"ffx {VERSION}\n")
|
|
||||||
click.echo(f"Usage: ffx [input file] [output file] [vp9|av1] [q=[nn[,nn,...]]] [p=nn] [a=nnn[k]] [ac3=nnn[k]] [dts=nnn[k]] [crop]")
|
|
||||||
|
|
||||||
|
|
||||||
@ffx.command()
|
|
||||||
@click.pass_context
|
|
||||||
@click.argument('filename', nargs=1)
|
|
||||||
def inspect(ctx, filename):
|
|
||||||
|
|
||||||
ctx.obj['command'] = 'inspect'
|
|
||||||
ctx.obj['arguments'] = {}
|
|
||||||
ctx.obj['arguments']['filename'] = filename
|
|
||||||
|
|
||||||
app = FfxApp(ctx.obj)
|
|
||||||
app.run()
|
|
||||||
|
|
||||||
|
|
||||||
def getUnmuxSequence(trackDescriptor: TrackDescriptor, sourcePath, targetPrefix, targetDirectory = ''):
|
|
||||||
|
|
||||||
# executable and input file
|
|
||||||
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
|
|
||||||
|
|
||||||
trackType = trackDescriptor.getType()
|
|
||||||
|
|
||||||
targetPathBase = os.path.join(targetDirectory, targetPrefix) if targetDirectory else targetPrefix
|
|
||||||
|
|
||||||
# mapping
|
|
||||||
commandTokens += ['-map',
|
|
||||||
f"0:{trackType.indicator()}:{trackDescriptor.getSubIndex()}",
|
|
||||||
'-c',
|
|
||||||
'copy']
|
|
||||||
|
|
||||||
trackCodec = trackDescriptor.getCodec()
|
|
||||||
|
|
||||||
# output format
|
|
||||||
codecFormat = trackCodec.format()
|
|
||||||
if codecFormat is not None:
|
|
||||||
commandTokens += ['-f', codecFormat]
|
|
||||||
|
|
||||||
# output filename
|
|
||||||
commandTokens += [f"{targetPathBase}.{trackCodec.extension()}"]
|
|
||||||
|
|
||||||
return commandTokens
|
|
||||||
|
|
||||||
|
|
||||||
@ffx.command()
|
|
||||||
@click.pass_context
|
|
||||||
|
|
||||||
@click.argument('paths', nargs=-1)
|
|
||||||
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
|
|
||||||
@click.option("-o", "--output-directory", type=str, default='')
|
|
||||||
@click.option("-s", "--subtitles-only", is_flag=True, default=False)
|
|
||||||
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
|
|
||||||
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
|
|
||||||
def unmux(ctx,
|
|
||||||
paths,
|
|
||||||
label,
|
|
||||||
output_directory,
|
|
||||||
subtitles_only,
|
|
||||||
nice,
|
|
||||||
cpu):
|
|
||||||
|
|
||||||
existingSourcePaths = [p for p in paths if os.path.isfile(p)]
|
|
||||||
ctx.obj['logger'].debug(f"\nUnmuxing {len(existingSourcePaths)} files")
|
|
||||||
|
|
||||||
ctx.obj['resource_limits'] = {}
|
|
||||||
ctx.obj['resource_limits']['niceness'] = nice
|
|
||||||
ctx.obj['resource_limits']['cpu_percent'] = cpu
|
|
||||||
|
|
||||||
for sourcePath in existingSourcePaths:
|
|
||||||
|
|
||||||
fp = FileProperties(ctx.obj, sourcePath)
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
sourceMediaDescriptor = fp.getMediaDescriptor()
|
|
||||||
|
|
||||||
season = fp.getSeason()
|
|
||||||
episode = fp.getEpisode()
|
|
||||||
|
|
||||||
#TODO: Recognition für alle Formate anpassen
|
|
||||||
targetLabel = label if label else fp.getFileBasename()
|
|
||||||
targetIndicator = f"_S{season}E{episode}" if label and season != -1 and episode != -1 else ''
|
|
||||||
|
|
||||||
if label and not targetIndicator:
|
|
||||||
ctx.obj['logger'].warning(f"Skipping file {fp.getFilename()}: Label set but no indicator recognized")
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
ctx.obj['logger'].info(f"\nUnmuxing file {fp.getFilename()}\n")
|
|
||||||
|
|
||||||
for trackDescriptor in sourceMediaDescriptor.getAllTrackDescriptors():
|
|
||||||
|
|
||||||
if trackDescriptor.getType() == TrackType.SUBTITLE or not subtitles_only:
|
|
||||||
|
|
||||||
# SEASON_EPISODE_STREAM_LANGUAGE_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
|
|
||||||
targetPrefix = f"{targetLabel}{targetIndicator}_{trackDescriptor.getIndex()}_{trackDescriptor.getLanguage().threeLetter()}"
|
|
||||||
|
|
||||||
td: TrackDisposition
|
|
||||||
for td in sorted(trackDescriptor.getDispositionSet(), key=lambda d: d.index()):
|
|
||||||
targetPrefix += f"_{td.indicator()}"
|
|
||||||
|
|
||||||
unmuxSequence = getUnmuxSequence(trackDescriptor, sourcePath, targetPrefix, targetDirectory = output_directory)
|
|
||||||
|
|
||||||
if unmuxSequence:
|
|
||||||
if not ctx.obj['dry_run']:
|
|
||||||
|
|
||||||
#TODO #425: Codec Enum
|
|
||||||
ctx.obj['logger'].info(f"Unmuxing stream {trackDescriptor.getIndex()} into file {targetPrefix}.{trackDescriptor.getCodec().extension()}")
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"Executing unmuxing sequence")
|
|
||||||
|
|
||||||
out, err, rc = executeProcess(unmuxSequence, context = ctx.obj)
|
|
||||||
if rc:
|
|
||||||
ctx.obj['logger'].error(f"Unmuxing of stream {trackDescriptor.getIndex()} failed with error ({rc}) {err}")
|
|
||||||
else:
|
|
||||||
ctx.obj['logger'].warning(f"Skipping stream with unknown codec")
|
|
||||||
except Exception as ex:
|
|
||||||
ctx.obj['logger'].warning(f"Skipping File {sourcePath} ({ex})")
|
|
||||||
|
|
||||||
|
|
||||||
@ffx.command()
|
|
||||||
@click.pass_context
|
|
||||||
|
|
||||||
def shows(ctx):
|
|
||||||
|
|
||||||
ctx.obj['command'] = 'shows'
|
|
||||||
|
|
||||||
app = FfxApp(ctx.obj)
|
|
||||||
app.run()
|
|
||||||
|
|
||||||
|
|
||||||
def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
|
|
||||||
|
|
||||||
# Check for multiple default or forced dispositions if not set by user input or database requirements
|
|
||||||
#
|
|
||||||
# Query user for the correct sub indices, then configure flags in track descriptors associated with media descriptor accordingly.
|
|
||||||
# The correct tokens should then be created by
|
|
||||||
if len([v for v in mediaDescriptor.getVideoTracks() if v.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
|
||||||
if context['no_prompt']:
|
|
||||||
raise click.ClickException('More than one default video stream detected and no prompt set')
|
|
||||||
defaultVideoTrackSubIndex = click.prompt("More than one default video stream detected! Please select stream", type=int)
|
|
||||||
mediaDescriptor.setDefaultSubTrack(TrackType.VIDEO, defaultVideoTrackSubIndex)
|
|
||||||
|
|
||||||
if len([v for v in mediaDescriptor.getVideoTracks() if v.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
|
||||||
if context['no_prompt']:
|
|
||||||
raise click.ClickException('More than one forced video stream detected and no prompt set')
|
|
||||||
forcedVideoTrackSubIndex = click.prompt("More than one forced video stream detected! Please select stream", type=int)
|
|
||||||
mediaDescriptor.setForcedSubTrack(TrackType.VIDEO, forcedVideoTrackSubIndex)
|
|
||||||
|
|
||||||
if len([a for a in mediaDescriptor.getAudioTracks() if a.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
|
||||||
if context['no_prompt']:
|
|
||||||
raise click.ClickException('More than one default audio stream detected and no prompt set')
|
|
||||||
defaultAudioTrackSubIndex = click.prompt("More than one default audio stream detected! Please select stream", type=int)
|
|
||||||
mediaDescriptor.setDefaultSubTrack(TrackType.AUDIO, defaultAudioTrackSubIndex)
|
|
||||||
|
|
||||||
if len([a for a in mediaDescriptor.getAudioTracks() if a.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
|
||||||
if context['no_prompt']:
|
|
||||||
raise click.ClickException('More than one forced audio stream detected and no prompt set')
|
|
||||||
forcedAudioTrackSubIndex = click.prompt("More than one forced audio stream detected! Please select stream", type=int)
|
|
||||||
mediaDescriptor.setForcedSubTrack(TrackType.AUDIO, forcedAudioTrackSubIndex)
|
|
||||||
|
|
||||||
if len([s for s in mediaDescriptor.getSubtitleTracks() if s.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
|
||||||
if context['no_prompt']:
|
|
||||||
raise click.ClickException('More than one default subtitle stream detected and no prompt set')
|
|
||||||
defaultSubtitleTrackSubIndex = click.prompt("More than one default subtitle stream detected! Please select stream", type=int)
|
|
||||||
mediaDescriptor.setDefaultSubTrack(TrackType.SUBTITLE, defaultSubtitleTrackSubIndex)
|
|
||||||
|
|
||||||
if len([s for s in mediaDescriptor.getSubtitleTracks() if s.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
|
||||||
if context['no_prompt']:
|
|
||||||
raise click.ClickException('More than one forced subtitle stream detected and no prompt set')
|
|
||||||
forcedSubtitleTrackSubIndex = click.prompt("More than one forced subtitle stream detected! Please select stream", type=int)
|
|
||||||
mediaDescriptor.setForcedSubTrack(TrackType.SUBTITLE, forcedSubtitleTrackSubIndex)
|
|
||||||
|
|
||||||
|
|
||||||
@ffx.command()
|
|
||||||
@click.pass_context
|
|
||||||
|
|
||||||
@click.argument('paths', nargs=-1)
|
|
||||||
|
|
||||||
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
|
|
||||||
|
|
||||||
@click.option('-v', '--video-encoder', type=str, default=FfxController.DEFAULT_VIDEO_ENCODER, help=f"Target video encoder (vp9 or av1)", show_default=True)
|
|
||||||
|
|
||||||
@click.option('-q', '--quality', type=str, default="", help=f"Quality settings to be used with VP9 encoder")
|
|
||||||
@click.option('-p', '--preset', type=str, default="", help=f"Quality preset to be used with AV1 encoder")
|
|
||||||
|
|
||||||
@click.option('-a', '--stereo-bitrate', type=int, default=DEFAULT_STEREO_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode stereo audio streams", show_default=True)
|
|
||||||
@click.option('--ac3', type=int, default=DEFAULT_AC3_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode 5.1 audio streams", show_default=True)
|
|
||||||
@click.option('--dts', type=int, default=DEFAULT_DTS_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode 6.1 audio streams", show_default=True)
|
|
||||||
|
|
||||||
@click.option('--subtitle-directory', type=str, default='', help='Load subtitles from here')
|
|
||||||
@click.option('--subtitle-prefix', type=str, default='', help='Subtitle filename prefix')
|
|
||||||
|
|
||||||
@click.option('--language', type=str, multiple=True, help='Set stream language. Use format <stream index>:<3 letter iso code>')
|
|
||||||
@click.option('--title', type=str, multiple=True, help='Set stream title. Use format <stream index>:<title>')
|
|
||||||
|
|
||||||
@click.option('--default-video', type=int, default=-1, help='Index of default video stream')
|
|
||||||
@click.option('--forced-video', type=int, default=-1, help='Index of forced video stream')
|
|
||||||
@click.option('--default-audio', type=int, default=-1, help='Index of default audio stream')
|
|
||||||
@click.option('--forced-audio', type=int, default=-1, help='Index of forced audio stream')
|
|
||||||
@click.option('--default-subtitle', type=int, default=-1, help='Index of default subtitle stream')
|
|
||||||
@click.option('--forced-subtitle', type=int, default=-1, help='Index of forced subtitle stream')
|
|
||||||
|
|
||||||
@click.option('--rearrange-streams', type=str, default="", help='Rearrange output streams order. Use format comma separated integers')
|
|
||||||
|
|
||||||
@click.option("--crop", is_flag=False, flag_value="default", default="none")
|
|
||||||
|
|
||||||
@click.option("--output-directory", type=str, default='')
|
|
||||||
|
|
||||||
@click.option("--denoise", is_flag=False, flag_value="default", default="none")
|
|
||||||
@click.option("--denoise-use-hw", is_flag=True, default=False)
|
|
||||||
@click.option('--denoise-strength', type=str, default='', help='Denoising strength, more blurring vs more details.')
|
|
||||||
@click.option('--denoise-patch-size', type=str, default='', help='Subimage size to apply filtering on luminosity plane. Reduces broader noise patterns but costly.')
|
|
||||||
@click.option('--denoise-chroma-patch-size', type=str, default='', help='Subimage size to apply filtering on chroma planes.')
|
|
||||||
@click.option('--denoise-research-window', type=str, default='', help='Range to search for comparable patches on luminosity plane. Better filtering but costly.')
|
|
||||||
@click.option('--denoise-chroma-research-window', type=str, default='', help='Range to search for comparable patches on chroma planes.')
|
|
||||||
|
|
||||||
@click.option('--show', type=int, default=-1, help='Set TMDB show identifier')
|
|
||||||
@click.option('--season', type=int, default=-1, help='Set season of show')
|
|
||||||
@click.option('--episode', type=int, default=-1, help='Set episode of show')
|
|
||||||
|
|
||||||
@click.option("--no-tmdb", is_flag=True, default=False)
|
|
||||||
@click.option("--no-pattern", is_flag=True, default=False)
|
|
||||||
|
|
||||||
@click.option("--dont-pass-dispositions", is_flag=True, default=False)
|
|
||||||
|
|
||||||
@click.option("--no-prompt", is_flag=True, default=False)
|
|
||||||
@click.option("--no-signature", is_flag=True, default=False)
|
|
||||||
@click.option("--keep-mkvmerge-metadata", is_flag=True, default=False)
|
|
||||||
|
|
||||||
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
|
|
||||||
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
|
|
||||||
|
|
||||||
def convert(ctx,
|
|
||||||
paths,
|
|
||||||
label,
|
|
||||||
video_encoder,
|
|
||||||
quality,
|
|
||||||
preset,
|
|
||||||
stereo_bitrate,
|
|
||||||
ac3,
|
|
||||||
dts,
|
|
||||||
|
|
||||||
subtitle_directory,
|
|
||||||
subtitle_prefix,
|
|
||||||
|
|
||||||
language,
|
|
||||||
title,
|
|
||||||
|
|
||||||
default_video,
|
|
||||||
forced_video,
|
|
||||||
default_audio,
|
|
||||||
forced_audio,
|
|
||||||
default_subtitle,
|
|
||||||
forced_subtitle,
|
|
||||||
|
|
||||||
rearrange_streams,
|
|
||||||
|
|
||||||
crop,
|
|
||||||
output_directory,
|
|
||||||
|
|
||||||
denoise,
|
|
||||||
denoise_use_hw,
|
|
||||||
denoise_strength,
|
|
||||||
denoise_patch_size,
|
|
||||||
denoise_chroma_patch_size,
|
|
||||||
denoise_research_window,
|
|
||||||
denoise_chroma_research_window,
|
|
||||||
|
|
||||||
show,
|
|
||||||
season,
|
|
||||||
episode,
|
|
||||||
|
|
||||||
no_tmdb,
|
|
||||||
no_pattern,
|
|
||||||
dont_pass_dispositions,
|
|
||||||
no_prompt,
|
|
||||||
no_signature,
|
|
||||||
keep_mkvmerge_metadata,
|
|
||||||
|
|
||||||
nice,
|
|
||||||
cpu):
|
|
||||||
"""Batch conversion of audiovideo files in format suitable for web playback, e.g. jellyfin
|
|
||||||
|
|
||||||
Files found under PATHS will be converted according to parameters.
|
|
||||||
Filename extensions will be changed appropriately.
|
|
||||||
Suffices will we appended to filename in case of multiple created files
|
|
||||||
or if the filename has not changed."""
|
|
||||||
|
|
||||||
startTime = time.perf_counter()
|
|
||||||
|
|
||||||
context = ctx.obj
|
|
||||||
|
|
||||||
context['video_encoder'] = VideoEncoder.fromLabel(video_encoder)
|
|
||||||
|
|
||||||
targetFormat = FfxController.DEFAULT_FILE_FORMAT
|
|
||||||
targetExtension = FfxController.DEFAULT_FILE_EXTENSION
|
|
||||||
|
|
||||||
context['use_tmdb'] = not no_tmdb
|
|
||||||
context['use_pattern'] = not no_pattern
|
|
||||||
context['no_prompt'] = no_prompt
|
|
||||||
context['no_signature'] = no_signature
|
|
||||||
context['keep_mkvmerge_metadata'] = keep_mkvmerge_metadata
|
|
||||||
|
|
||||||
|
|
||||||
context['resource_limits'] = {}
|
|
||||||
context['resource_limits']['niceness'] = nice
|
|
||||||
context['resource_limits']['cpu_percent'] = cpu
|
|
||||||
|
|
||||||
|
|
||||||
context['import_subtitles'] = (subtitle_directory and subtitle_prefix)
|
|
||||||
if context['import_subtitles']:
|
|
||||||
context['subtitle_directory'] = subtitle_directory
|
|
||||||
context['subtitle_prefix'] = subtitle_prefix
|
|
||||||
|
|
||||||
|
|
||||||
existingSourcePaths = [p for p in paths if os.path.isfile(p) and p.split('.')[-1] in FfxController.INPUT_FILE_EXTENSIONS]
|
|
||||||
|
|
||||||
|
|
||||||
# CLI Overrides
|
|
||||||
|
|
||||||
cliOverrides = {}
|
|
||||||
|
|
||||||
if language:
|
|
||||||
cliOverrides['languages'] = {}
|
|
||||||
for overLang in language:
|
|
||||||
olTokens = overLang.split(':')
|
|
||||||
if len(olTokens) == 2:
|
|
||||||
try:
|
|
||||||
cliOverrides['languages'][int(olTokens[0])] = olTokens[1]
|
|
||||||
except ValueError:
|
|
||||||
ctx.obj['logger'].warning(f"Ignoring non-integer language index {olTokens[0]}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if title:
|
|
||||||
cliOverrides['titles'] = {}
|
|
||||||
for overTitle in title:
|
|
||||||
otTokens = overTitle.split(':')
|
|
||||||
if len(otTokens) == 2:
|
|
||||||
try:
|
|
||||||
cliOverrides['titles'][int(otTokens[0])] = otTokens[1]
|
|
||||||
except ValueError:
|
|
||||||
ctx.obj['logger'].warning(f"Ignoring non-integer title index {otTokens[0]}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if default_video != -1:
|
|
||||||
cliOverrides['default_video'] = default_video
|
|
||||||
if forced_video != -1:
|
|
||||||
cliOverrides['forced_video'] = forced_video
|
|
||||||
if default_audio != -1:
|
|
||||||
cliOverrides['default_audio'] = default_audio
|
|
||||||
if forced_audio != -1:
|
|
||||||
cliOverrides['forced_audio'] = forced_audio
|
|
||||||
if default_subtitle != -1:
|
|
||||||
cliOverrides['default_subtitle'] = default_subtitle
|
|
||||||
if forced_subtitle != -1:
|
|
||||||
cliOverrides['forced_subtitle'] = forced_subtitle
|
|
||||||
|
|
||||||
if show != -1 or season != -1 or episode != -1:
|
|
||||||
if len(existingSourcePaths) > 1:
|
|
||||||
context['logger'].warning(f"Ignoring TMDB show, season, episode overrides, not supported for multiple source files")
|
|
||||||
else:
|
|
||||||
cliOverrides['tmdb'] = {}
|
|
||||||
if show != -1:
|
|
||||||
cliOverrides['tmdb']['show'] = show
|
|
||||||
if season != -1:
|
|
||||||
cliOverrides['tmdb']['season'] = season
|
|
||||||
if episode != -1:
|
|
||||||
cliOverrides['tmdb']['episode'] = episode
|
|
||||||
|
|
||||||
if cliOverrides:
|
|
||||||
context['overrides'] = cliOverrides
|
|
||||||
|
|
||||||
|
|
||||||
if rearrange_streams:
|
|
||||||
try:
|
|
||||||
cliOverrides['stream_order'] = [int(si) for si in rearrange_streams.split(",")]
|
|
||||||
except ValueError as ve:
|
|
||||||
errorMessage = "Non-integer in rearrange stream parameter"
|
|
||||||
ctx.obj['logger'].error(errorMessage)
|
|
||||||
raise click.Abort()
|
|
||||||
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"\nVideo encoder: {video_encoder}")
|
|
||||||
|
|
||||||
qualityTokens = quality.split(',')
|
|
||||||
q_list = [q for q in qualityTokens if q.isnumeric()]
|
|
||||||
ctx.obj['logger'].debug(f"Qualities: {q_list}")
|
|
||||||
|
|
||||||
presetTokens = preset.split(',')
|
|
||||||
p_list = [p for p in presetTokens if p.isnumeric()]
|
|
||||||
ctx.obj['logger'].debug(f"Presets: {p_list}")
|
|
||||||
|
|
||||||
|
|
||||||
context['bitrates'] = {}
|
|
||||||
context['bitrates']['stereo'] = str(stereo_bitrate) if str(stereo_bitrate).endswith('k') else f"{stereo_bitrate}k"
|
|
||||||
context['bitrates']['ac3'] = str(ac3) if str(ac3).endswith('k') else f"{ac3}k"
|
|
||||||
context['bitrates']['dts'] = str(dts) if str(dts).endswith('k') else f"{dts}k"
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"Stereo bitrate: {context['bitrates']['stereo']}")
|
|
||||||
ctx.obj['logger'].debug(f"AC3 bitrate: {context['bitrates']['ac3']}")
|
|
||||||
ctx.obj['logger'].debug(f"DTS bitrate: {context['bitrates']['dts']}")
|
|
||||||
|
|
||||||
|
|
||||||
# Process crop parameters
|
|
||||||
context['perform_crop'] = (crop != 'none')
|
|
||||||
if context['perform_crop']:
|
|
||||||
cTokens = crop.split(',')
|
|
||||||
if cTokens and len(cTokens) == 2:
|
|
||||||
context['crop_start'] = int(cTokens[0])
|
|
||||||
context['crop_length'] = int(cTokens[1])
|
|
||||||
ctx.obj['logger'].debug(f"Crop start={context['crop_start']} length={context['crop_length']}")
|
|
||||||
|
|
||||||
|
|
||||||
tc = TmdbController() if context['use_tmdb'] else None
|
|
||||||
|
|
||||||
qualityKwargs = {QualityFilter.QUALITY_KEY: quality}
|
|
||||||
qf = QualityFilter(**qualityKwargs)
|
|
||||||
|
|
||||||
if context['video_encoder'] == VideoEncoder.AV1 and preset:
|
|
||||||
presetKwargs = {PresetFilter.PRESET_KEY: preset}
|
|
||||||
PresetFilter(**presetKwargs)
|
|
||||||
|
|
||||||
denoiseKwargs = {}
|
|
||||||
if denoise_strength:
|
|
||||||
denoiseKwargs[NlmeansFilter.STRENGTH_KEY] = denoise_strength
|
|
||||||
if denoise_patch_size:
|
|
||||||
denoiseKwargs[NlmeansFilter.PATCH_SIZE_KEY] = denoise_patch_size
|
|
||||||
if denoise_chroma_patch_size:
|
|
||||||
denoiseKwargs[NlmeansFilter.CHROMA_PATCH_SIZE_KEY] = denoise_chroma_patch_size
|
|
||||||
if denoise_research_window:
|
|
||||||
denoiseKwargs[NlmeansFilter.RESEARCH_WINDOW_KEY] = denoise_research_window
|
|
||||||
if denoise_chroma_research_window:
|
|
||||||
denoiseKwargs[NlmeansFilter.CHROMA_RESEARCH_WINDOW_KEY] = denoise_chroma_research_window
|
|
||||||
if denoise != 'none' or denoiseKwargs:
|
|
||||||
NlmeansFilter(**denoiseKwargs)
|
|
||||||
|
|
||||||
chainYield = list(qf.getChainYield())
|
|
||||||
|
|
||||||
ctx.obj['logger'].info(f"\nRunning {len(existingSourcePaths) * len(chainYield)} jobs")
|
|
||||||
|
|
||||||
jobIndex = 0
|
|
||||||
|
|
||||||
for sourcePath in existingSourcePaths:
|
|
||||||
|
|
||||||
# Separate basedir, basename and extension for current source file
|
|
||||||
sourceDirectory = os.path.dirname(sourcePath)
|
|
||||||
sourceFilename = os.path.basename(sourcePath)
|
|
||||||
sourcePathTokens = sourceFilename.split('.')
|
|
||||||
|
|
||||||
sourceFileBasename = '.'.join(sourcePathTokens[:-1])
|
|
||||||
sourceFilenameExtension = sourcePathTokens[-1]
|
|
||||||
|
|
||||||
ctx.obj['logger'].info(f"\nProcessing file {sourcePath}")
|
|
||||||
|
|
||||||
targetSuffices = {}
|
|
||||||
|
|
||||||
|
|
||||||
mediaFileProperties = FileProperties(context, sourceFilename)
|
|
||||||
|
|
||||||
|
|
||||||
ssc = ShiftedSeasonController(context)
|
|
||||||
|
|
||||||
showId = mediaFileProperties.getShowId()
|
|
||||||
|
|
||||||
#HINT: -1 if not set
|
|
||||||
if 'tmdb' in cliOverrides.keys() and 'season' in cliOverrides['tmdb']:
|
|
||||||
showSeason = cliOverrides['tmdb']['season']
|
|
||||||
else:
|
|
||||||
showSeason = mediaFileProperties.getSeason()
|
|
||||||
|
|
||||||
if 'tmdb' in cliOverrides.keys() and 'episode' in cliOverrides['tmdb']:
|
|
||||||
showEpisode = cliOverrides['tmdb']['episode']
|
|
||||||
else:
|
|
||||||
showEpisode = mediaFileProperties.getEpisode()
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"Season={showSeason} Episode={showEpisode}")
|
|
||||||
|
|
||||||
|
|
||||||
sourceMediaDescriptor = mediaFileProperties.getMediaDescriptor()
|
|
||||||
|
|
||||||
#HINT: This is None if the filename did not match anything in database
|
|
||||||
currentPattern = mediaFileProperties.getPattern() if context['use_pattern'] else None
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"Pattern matching: {'No' if currentPattern is None else 'Yes'}")
|
|
||||||
|
|
||||||
# Setup FfxController accordingly depending on pattern matching is enabled and a pattern was matched
|
|
||||||
if currentPattern is None:
|
|
||||||
|
|
||||||
checkUniqueDispositions(context, sourceMediaDescriptor)
|
|
||||||
currentShowDescriptor = None
|
|
||||||
|
|
||||||
if context['import_subtitles']:
|
|
||||||
sourceMediaDescriptor.importSubtitles(context['subtitle_directory'],
|
|
||||||
context['subtitle_prefix'],
|
|
||||||
showSeason,
|
|
||||||
showEpisode)
|
|
||||||
|
|
||||||
if cliOverrides:
|
|
||||||
sourceMediaDescriptor.applyOverrides(cliOverrides)
|
|
||||||
|
|
||||||
fc = FfxController(context, sourceMediaDescriptor)
|
|
||||||
|
|
||||||
else:
|
|
||||||
targetMediaDescriptor = currentPattern.getMediaDescriptor(ctx.obj)
|
|
||||||
checkUniqueDispositions(context, targetMediaDescriptor)
|
|
||||||
currentShowDescriptor = currentPattern.getShowDescriptor(ctx.obj)
|
|
||||||
|
|
||||||
if context['import_subtitles']:
|
|
||||||
targetMediaDescriptor.importSubtitles(context['subtitle_directory'],
|
|
||||||
context['subtitle_prefix'],
|
|
||||||
showSeason,
|
|
||||||
showEpisode)
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
|
||||||
|
|
||||||
if cliOverrides:
|
|
||||||
targetMediaDescriptor.applyOverrides(cliOverrides)
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"Input mapping tokens (2nd pass): {targetMediaDescriptor.getInputMappingTokens()}")
|
|
||||||
|
|
||||||
fc = FfxController(context, targetMediaDescriptor, sourceMediaDescriptor)
|
|
||||||
|
|
||||||
|
|
||||||
indexSeasonDigits = currentShowDescriptor.getIndexSeasonDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDEX_SEASON_DIGITS
|
|
||||||
indexEpisodeDigits = currentShowDescriptor.getIndexEpisodeDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDEX_EPISODE_DIGITS
|
|
||||||
indicatorSeasonDigits = currentShowDescriptor.getIndicatorSeasonDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDICATOR_SEASON_DIGITS
|
|
||||||
indicatorEpisodeDigits = currentShowDescriptor.getIndicatorEpisodeDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDICATOR_EPISODE_DIGITS
|
|
||||||
|
|
||||||
|
|
||||||
# Shift season and episode if defined for this show
|
|
||||||
if ('tmdb' not in cliOverrides.keys() and showId != -1
|
|
||||||
and showSeason != -1 and showEpisode != -1):
|
|
||||||
shiftedShowSeason, shiftedShowEpisode = ssc.shiftSeason(showId,
|
|
||||||
season=showSeason,
|
|
||||||
episode=showEpisode)
|
|
||||||
else:
|
|
||||||
shiftedShowSeason = showSeason
|
|
||||||
shiftedShowEpisode = showEpisode
|
|
||||||
|
|
||||||
# Assemble target filename accordingly depending on TMDB lookup is enabled
|
|
||||||
#HINT: -1 if not set
|
|
||||||
showId = cliOverrides['tmdb']['show'] if 'tmdb' in cliOverrides.keys() and 'show' in cliOverrides['tmdb'] else (-1 if currentShowDescriptor is None else currentShowDescriptor.getId())
|
|
||||||
|
|
||||||
if context['use_tmdb'] and showId != -1 and shiftedShowSeason != -1 and shiftedShowEpisode != -1:
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"Querying TMDB for show_id={showId} season={shiftedShowSeason} episode{shiftedShowEpisode}")
|
|
||||||
|
|
||||||
if currentPattern is None:
|
|
||||||
sName, showYear = tc.getShowNameAndYear(showId)
|
|
||||||
showName = filterFilename(sName)
|
|
||||||
showFilenamePrefix = f"{showName} ({str(showYear)})"
|
|
||||||
else:
|
|
||||||
showFilenamePrefix = currentShowDescriptor.getFilenamePrefix()
|
|
||||||
|
|
||||||
tmdbEpisodeResult = tc.queryEpisode(showId, shiftedShowSeason, shiftedShowEpisode)
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"tmdbEpisodeResult={tmdbEpisodeResult}")
|
|
||||||
|
|
||||||
if tmdbEpisodeResult:
|
|
||||||
substitutedEpisodeName = filterFilename(substituteTmdbFilename(tmdbEpisodeResult['name']))
|
|
||||||
sourceFileBasename = getEpisodeFileBasename(showFilenamePrefix,
|
|
||||||
substitutedEpisodeName,
|
|
||||||
shiftedShowSeason,
|
|
||||||
shiftedShowEpisode,
|
|
||||||
indexSeasonDigits,
|
|
||||||
indexEpisodeDigits,
|
|
||||||
indicatorSeasonDigits,
|
|
||||||
indicatorEpisodeDigits,
|
|
||||||
context=ctx.obj)
|
|
||||||
|
|
||||||
if label:
|
|
||||||
if shiftedShowSeason > -1 and shiftedShowEpisode > -1:
|
|
||||||
targetSuffices['se'] = f"S{shiftedShowSeason:0{indicatorSeasonDigits}d}E{shiftedShowEpisode:0{indicatorEpisodeDigits}d}"
|
|
||||||
elif shiftedShowEpisode > -1:
|
|
||||||
targetSuffices['se'] = f"E{shiftedShowEpisode:0{indicatorEpisodeDigits}d}"
|
|
||||||
else:
|
|
||||||
if 'se' in targetSuffices.keys():
|
|
||||||
del targetSuffices['se']
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"fileBasename={sourceFileBasename}")
|
|
||||||
|
|
||||||
|
|
||||||
for chainIteration in chainYield:
|
|
||||||
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"\nchain iteration: {chainIteration}\n")
|
|
||||||
|
|
||||||
# if len(q_list) > 1:
|
|
||||||
# targetSuffices['q'] = f"q{q}"
|
|
||||||
|
|
||||||
chainVariant = '-'.join([fy['variant'] for fy in chainIteration])
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"\nRunning job {jobIndex} file={sourcePath} variant={chainVariant}")
|
|
||||||
jobIndex += 1
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"label={label if label else 'Falsy'}")
|
|
||||||
ctx.obj['logger'].debug(f"sourceFileBasename={sourceFileBasename}")
|
|
||||||
|
|
||||||
# targetFileBasename = mediaFileProperties.assembleTargetFileBasename(label,
|
|
||||||
# q if len(q_list) > 1 else -1,
|
|
||||||
#
|
|
||||||
targetFileBasename = sourceFileBasename if context['use_tmdb'] and not label else label
|
|
||||||
|
|
||||||
|
|
||||||
targetFilenameTokens = [targetFileBasename]
|
|
||||||
|
|
||||||
if 'se' in targetSuffices.keys():
|
|
||||||
targetFilenameTokens += [targetSuffices['se']]
|
|
||||||
|
|
||||||
# if 'q' in targetSuffices.keys():
|
|
||||||
# targetFilenameTokens += [targetSuffices['q']]
|
|
||||||
for filterYield in chainIteration:
|
|
||||||
|
|
||||||
# filterIdentifier = filterYield['identifier']
|
|
||||||
# filterParameters = filterYield['parameters']
|
|
||||||
# filterSuffices = filterYield['suffices']
|
|
||||||
|
|
||||||
targetFilenameTokens += filterYield['suffices']
|
|
||||||
|
|
||||||
#TODO #387
|
|
||||||
# targetFilename = ((f"{sourceFileBasename}_q{q}" if len(q_list) > 1 else sourceFileBasename)
|
|
||||||
# if context['use_tmdb'] else targetFileBasename)
|
|
||||||
|
|
||||||
targetFilename = f"{'_'.join(targetFilenameTokens)}.{targetExtension}"
|
|
||||||
|
|
||||||
targetPath = os.path.join(output_directory if output_directory else sourceDirectory, targetFilename)
|
|
||||||
|
|
||||||
#TODO: target extension anpassen
|
|
||||||
ctx.obj['logger'].info(f"Creating file {targetFilename}")
|
|
||||||
|
|
||||||
fc.runJob(sourcePath,
|
|
||||||
targetPath,
|
|
||||||
targetFormat,
|
|
||||||
context['video_encoder'],
|
|
||||||
chainIteration)
|
|
||||||
|
|
||||||
#TODO: click.confirm('Warning! This file is not compliant to the defined source schema! Do you want to continue?', abort=True)
|
|
||||||
|
|
||||||
endTime = time.perf_counter()
|
|
||||||
ctx.obj['logger'].info(f"\nDONE\nTime elapsed {endTime - startTime}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
ffx()
|
|
||||||
@@ -1,38 +1,50 @@
|
|||||||
import os, click
|
import os, click
|
||||||
|
from logging import Logger
|
||||||
|
|
||||||
|
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
|
||||||
|
|
||||||
from ffx.media_descriptor import MediaDescriptor
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
from ffx.track_descriptor import TrackDescriptor
|
|
||||||
from ffx.audio_layout import AudioLayout
|
from ffx.audio_layout import AudioLayout
|
||||||
from ffx.track_type import TrackType
|
from ffx.track_type import TrackType
|
||||||
|
from ffx.track_codec import TrackCodec
|
||||||
from ffx.video_encoder import VideoEncoder
|
from ffx.video_encoder import VideoEncoder
|
||||||
from ffx.process import executeProcess
|
from ffx.process import executeProcess
|
||||||
from ffx.track_disposition import TrackDisposition
|
|
||||||
from ffx.track_codec import TrackCodec
|
|
||||||
|
|
||||||
from ffx.constants import DEFAULT_CROP_START, DEFAULT_CROP_LENGTH
|
from ffx.constants import (
|
||||||
|
DEFAULT_CONTAINER_EXTENSION,
|
||||||
|
DEFAULT_CONTAINER_FORMAT,
|
||||||
|
DEFAULT_VIDEO_ENCODER_LABEL,
|
||||||
|
DEFAULT_cut_start,
|
||||||
|
DEFAULT_cut_length,
|
||||||
|
FFMPEG_COMMAND_TOKENS,
|
||||||
|
FFMPEG_NULL_OUTPUT_TOKENS,
|
||||||
|
SUPPORTED_INPUT_FILE_EXTENSIONS,
|
||||||
|
)
|
||||||
|
|
||||||
from ffx.filter.quality_filter import QualityFilter
|
from ffx.filter.quality_filter import QualityFilter
|
||||||
from ffx.filter.preset_filter import PresetFilter
|
from ffx.filter.preset_filter import PresetFilter
|
||||||
|
from ffx.filter.crop_filter import CropFilter
|
||||||
|
|
||||||
|
from ffx.model.pattern import Pattern
|
||||||
|
|
||||||
|
|
||||||
class FfxController():
|
class FfxController():
|
||||||
|
|
||||||
COMMAND_TOKENS = ['ffmpeg', '-y']
|
COMMAND_TOKENS = list(FFMPEG_COMMAND_TOKENS)
|
||||||
NULL_TOKENS = ['-f', 'null', '/dev/null'] # -f null /dev/null
|
NULL_TOKENS = list(FFMPEG_NULL_OUTPUT_TOKENS) # -f null /dev/null
|
||||||
|
|
||||||
TEMP_FILE_NAME = "ffmpeg2pass-0.log"
|
TEMP_FILE_NAME = "ffmpeg2pass-0.log"
|
||||||
|
|
||||||
DEFAULT_VIDEO_ENCODER = VideoEncoder.VP9.label()
|
DEFAULT_VIDEO_ENCODER = DEFAULT_VIDEO_ENCODER_LABEL
|
||||||
|
|
||||||
DEFAULT_FILE_FORMAT = 'webm'
|
DEFAULT_FILE_FORMAT = DEFAULT_CONTAINER_FORMAT
|
||||||
DEFAULT_FILE_EXTENSION = 'webm'
|
DEFAULT_FILE_EXTENSION = DEFAULT_CONTAINER_EXTENSION
|
||||||
|
|
||||||
INPUT_FILE_EXTENSIONS = ['mkv', 'mp4', 'avi', 'flv', 'webm']
|
INPUT_FILE_EXTENSIONS = list(SUPPORTED_INPUT_FILE_EXTENSIONS)
|
||||||
|
|
||||||
CHANNEL_MAP_5_1 = 'FL-FL|FR-FR|FC-FC|LFE-LFE|SL-BL|SR-BR:5.1'
|
CHANNEL_MAP_5_1 = 'FL-FL|FR-FR|FC-FC|LFE-LFE|SL-BL|SR-BR:5.1'
|
||||||
|
|
||||||
#!
|
# SIGNATURE_TAGS = {'RECODED_WITH': 'FFX'}
|
||||||
SIGNATURE_TAGS = {'RECODED_WITH': 'FFX'}
|
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
context : dict,
|
context : dict,
|
||||||
@@ -40,12 +52,22 @@ class FfxController():
|
|||||||
sourceMediaDescriptor : MediaDescriptor = None):
|
sourceMediaDescriptor : MediaDescriptor = None):
|
||||||
|
|
||||||
self.__context = context
|
self.__context = context
|
||||||
self.__sourceMediaDescriptor = sourceMediaDescriptor
|
|
||||||
self.__targetMediaDescriptor = targetMediaDescriptor
|
self.__targetMediaDescriptor = targetMediaDescriptor
|
||||||
|
self.__sourceMediaDescriptor = sourceMediaDescriptor
|
||||||
|
|
||||||
self.__configurationData = self.__context['config'].getData()
|
self.__mdcs = MediaDescriptorChangeSet(context,
|
||||||
|
targetMediaDescriptor,
|
||||||
|
sourceMediaDescriptor)
|
||||||
|
|
||||||
self.__logger = context['logger']
|
self.__logger: Logger = context['logger']
|
||||||
|
|
||||||
|
|
||||||
|
def executeCommandSequence(self, commandSequence):
|
||||||
|
out, err, rc = executeProcess(commandSequence, context=self.__context)
|
||||||
|
if rc:
|
||||||
|
raise click.ClickException(f"Command resulted in error: rc={rc} error={err}")
|
||||||
|
return out, err, rc
|
||||||
|
|
||||||
|
|
||||||
def generateAV1Tokens(self, quality, preset, subIndex : int = 0):
|
def generateAV1Tokens(self, quality, preset, subIndex : int = 0):
|
||||||
@@ -55,6 +77,14 @@ class FfxController():
|
|||||||
'-pix_fmt', 'yuv420p10le']
|
'-pix_fmt', 'yuv420p10le']
|
||||||
|
|
||||||
|
|
||||||
|
# -c:v libx264 -preset slow -crf 17
|
||||||
|
def generateH264Tokens(self, quality, subIndex : int = 0):
|
||||||
|
|
||||||
|
return [f"-c:v:{int(subIndex)}", 'libx264',
|
||||||
|
"-preset", "slow",
|
||||||
|
'-crf', str(quality)]
|
||||||
|
|
||||||
|
|
||||||
# -c:v:0 libvpx-vp9 -row-mt 1 -crf 32 -pass 1 -speed 4 -frame-parallel 0 -g 9999 -aq-mode 0
|
# -c:v:0 libvpx-vp9 -row-mt 1 -crf 32 -pass 1 -speed 4 -frame-parallel 0 -g 9999 -aq-mode 0
|
||||||
def generateVP9Pass1Tokens(self, quality, subIndex : int = 0):
|
def generateVP9Pass1Tokens(self, quality, subIndex : int = 0):
|
||||||
|
|
||||||
@@ -82,33 +112,84 @@ class FfxController():
|
|||||||
'-auto-alt-ref', '1',
|
'-auto-alt-ref', '1',
|
||||||
'-lag-in-frames', '25']
|
'-lag-in-frames', '25']
|
||||||
|
|
||||||
|
def generateVideoCopyTokens(self, subIndex):
|
||||||
|
return [f"-c:v:{int(subIndex)}",
|
||||||
|
'copy']
|
||||||
|
|
||||||
|
def generateAudioCopyTokens(self, subIndex):
|
||||||
|
return [f"-c:a:{int(subIndex)}", 'copy']
|
||||||
|
|
||||||
|
def generateSubtitleCopyTokens(self, subIndex):
|
||||||
|
return [f"-c:s:{int(subIndex)}", 'copy']
|
||||||
|
|
||||||
|
def generateAttachmentCopyTokens(self, subIndex):
|
||||||
|
return [f"-c:t:{int(subIndex)}", 'copy']
|
||||||
|
|
||||||
|
def generateCopyTokens(self):
|
||||||
|
copyTokens = []
|
||||||
|
|
||||||
|
for trackDescriptor in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
|
||||||
|
copyTokens += self.generateVideoCopyTokens(trackDescriptor.getSubIndex())
|
||||||
|
|
||||||
|
for trackDescriptor in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.AUDIO):
|
||||||
|
copyTokens += self.generateAudioCopyTokens(trackDescriptor.getSubIndex())
|
||||||
|
|
||||||
|
for trackDescriptor in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.SUBTITLE):
|
||||||
|
copyTokens += self.generateSubtitleCopyTokens(trackDescriptor.getSubIndex())
|
||||||
|
|
||||||
|
attachmentDescriptors = (
|
||||||
|
self.__sourceMediaDescriptor.getTrackDescriptors(trackType=TrackType.ATTACHMENT)
|
||||||
|
if self.__sourceMediaDescriptor is not None
|
||||||
|
else self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.ATTACHMENT)
|
||||||
|
)
|
||||||
|
for trackDescriptor in attachmentDescriptors:
|
||||||
|
copyTokens += self.generateAttachmentCopyTokens(trackDescriptor.getSubIndex())
|
||||||
|
|
||||||
|
return copyTokens
|
||||||
|
|
||||||
|
|
||||||
def generateCropTokens(self):
|
def generateCropTokens(self):
|
||||||
|
|
||||||
if 'crop_start' in self.__context.keys() and 'crop_length' in self.__context.keys():
|
if 'cut_start' in self.__context.keys() and 'cut_length' in self.__context.keys():
|
||||||
cropStart = int(self.__context['crop_start'])
|
cropStart = int(self.__context['cut_start'])
|
||||||
cropLength = int(self.__context['crop_length'])
|
cropLength = int(self.__context['cut_length'])
|
||||||
else:
|
else:
|
||||||
cropStart = DEFAULT_CROP_START
|
cropStart = DEFAULT_cut_start
|
||||||
cropLength = DEFAULT_CROP_LENGTH
|
cropLength = DEFAULT_cut_length
|
||||||
|
|
||||||
return ['-ss', str(cropStart), '-t', str(cropLength)]
|
return ['-ss', str(cropStart), '-t', str(cropLength)]
|
||||||
|
|
||||||
|
|
||||||
def generateOutputTokens(self, filePathBase, format = '', ext = ''):
|
def generateOutputTokens(self, filePathBase, format = '', ext = ''):
|
||||||
outputFilePath = f"{filePathBase}{'.'+str(ext) if ext else ''}"
|
|
||||||
|
self.__logger.debug(f"FfxController.generateOutputTokens(): base='{filePathBase}' format='{format}' ext='{ext}'")
|
||||||
|
|
||||||
|
outputFilePath = f"{filePathBase}{('.'+str(ext)) if ext else ''}"
|
||||||
if format:
|
if format:
|
||||||
return ['-f', format, outputFilePath]
|
return ['-f', format, outputFilePath]
|
||||||
else:
|
else:
|
||||||
return [outputFilePath]
|
return [outputFilePath]
|
||||||
|
|
||||||
|
|
||||||
|
def generateEncodingMetadataTags(self, videoEncoder: VideoEncoder, quality, preset) -> dict:
|
||||||
|
metadataTags = {}
|
||||||
|
|
||||||
|
if videoEncoder in (VideoEncoder.AV1, VideoEncoder.H264, VideoEncoder.VP9):
|
||||||
|
metadataTags["ENCODING_QUALITY"] = str(quality)
|
||||||
|
|
||||||
|
if videoEncoder == VideoEncoder.AV1:
|
||||||
|
metadataTags["ENCODING_PRESET"] = str(preset)
|
||||||
|
|
||||||
|
return metadataTags
|
||||||
|
|
||||||
|
|
||||||
def generateAudioEncodingTokens(self):
|
def generateAudioEncodingTokens(self):
|
||||||
"""Generates ffmpeg options audio streams including channel remapping, codec and bitrate"""
|
"""Generates ffmpeg options audio streams including channel remapping, codec and bitrate"""
|
||||||
|
|
||||||
audioTokens = []
|
audioTokens = []
|
||||||
|
|
||||||
targetAudioTrackDescriptors = [td for td in self.__targetMediaDescriptor.getAllTrackDescriptors() if td.getType() == TrackType.AUDIO]
|
# targetAudioTrackDescriptors = [td for td in self.__targetMediaDescriptor.getAllTrackDescriptors() if td.getType() == TrackType.AUDIO]
|
||||||
|
targetAudioTrackDescriptors = self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.AUDIO)
|
||||||
|
|
||||||
trackSubIndex = 0
|
trackSubIndex = 0
|
||||||
for trackDescriptor in targetAudioTrackDescriptors:
|
for trackDescriptor in targetAudioTrackDescriptors:
|
||||||
@@ -144,136 +225,125 @@ class FfxController():
|
|||||||
f"channelmap={FfxController.CHANNEL_MAP_5_1}",
|
f"channelmap={FfxController.CHANNEL_MAP_5_1}",
|
||||||
f"-b:a:{trackSubIndex}",
|
f"-b:a:{trackSubIndex}",
|
||||||
self.__context['bitrates']['ac3']]
|
self.__context['bitrates']['ac3']]
|
||||||
|
|
||||||
|
# -ac 5 ?
|
||||||
|
if trackAudioLayout == AudioLayout.LAYOUT_5_0:
|
||||||
|
audioTokens += [f"-c:a:{trackSubIndex}",
|
||||||
|
'libopus',
|
||||||
|
f"-filter:a:{trackSubIndex}",
|
||||||
|
'channelmap=channel_layout=5.0',
|
||||||
|
f"-b:a:{trackSubIndex}",
|
||||||
|
self.__context['bitrates']['ac3']]
|
||||||
|
|
||||||
trackSubIndex += 1
|
trackSubIndex += 1
|
||||||
return audioTokens
|
return audioTokens
|
||||||
|
|
||||||
|
|
||||||
# -disposition:s:0 default -disposition:s:1 0
|
|
||||||
def generateDispositionTokens(self):
|
|
||||||
|
|
||||||
targetTrackDescriptors = self.__targetMediaDescriptor.getAllTrackDescriptors()
|
|
||||||
|
|
||||||
sourceTrackDescriptors = ([] if self.__sourceMediaDescriptor is None
|
|
||||||
else self.__sourceMediaDescriptor.getAllTrackDescriptors())
|
|
||||||
|
|
||||||
dispositionTokens = []
|
|
||||||
|
|
||||||
for trackIndex in range(len(targetTrackDescriptors)):
|
|
||||||
|
|
||||||
td = targetTrackDescriptors[trackIndex]
|
|
||||||
|
|
||||||
#HINT: No dispositions for pgs subtitle tracks that have no external file source
|
|
||||||
if (td.getExternalSourceFilePath()
|
|
||||||
or td.getCodec() != TrackCodec.PGS):
|
|
||||||
|
|
||||||
subIndex = td.getSubIndex()
|
|
||||||
streamIndicator = td.getType().indicator()
|
|
||||||
|
|
||||||
|
|
||||||
sourceDispositionSet = sourceTrackDescriptors[td.getSourceIndex()].getDispositionSet() if sourceTrackDescriptors else set()
|
|
||||||
|
|
||||||
#TODO: Alles discarden was im targetDescriptor vorhanden ist (?)
|
|
||||||
sourceDispositionSet.discard(TrackDisposition.DEFAULT)
|
|
||||||
|
|
||||||
dispositionSet = td.getDispositionSet() | sourceDispositionSet
|
|
||||||
|
|
||||||
if dispositionSet:
|
|
||||||
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in dispositionSet])]
|
|
||||||
else:
|
|
||||||
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
|
|
||||||
|
|
||||||
return dispositionTokens
|
|
||||||
|
|
||||||
|
|
||||||
def generateMetadataTokens(self):
|
|
||||||
|
|
||||||
metadataTokens = []
|
|
||||||
|
|
||||||
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
|
|
||||||
|
|
||||||
signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
|
|
||||||
removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
|
|
||||||
removeTrackKeys = metadataConfiguration['streams']['remove'] if 'streams' in metadataConfiguration.keys() and 'remove' in metadataConfiguration['streams'].keys() else []
|
|
||||||
|
|
||||||
mediaTags = {k:v for k,v in self.__targetMediaDescriptor.getTags().items() if not k in removeGlobalKeys}
|
|
||||||
|
|
||||||
if (not 'no_signature' in self.__context.keys()
|
|
||||||
or not self.__context['no_signature']):
|
|
||||||
outputMediaTags = mediaTags | signatureTags
|
|
||||||
else:
|
|
||||||
outputMediaTags = mediaTags
|
|
||||||
|
|
||||||
for tagKey, tagValue in outputMediaTags.items():
|
|
||||||
metadataTokens += [f"-metadata:g",
|
|
||||||
f"{tagKey}={tagValue}"]
|
|
||||||
|
|
||||||
for removeKey in removeGlobalKeys:
|
|
||||||
metadataTokens += [f"-metadata:g",
|
|
||||||
f"{removeKey}="]
|
|
||||||
|
|
||||||
|
|
||||||
removeMkvmergeMetadata = (not 'keep_mkvmerge_metadata' in self.__context.keys()
|
|
||||||
or not self.__context['keep_mkvmerge_metadata'])
|
|
||||||
|
|
||||||
#HINT: With current ffmpeg version track metadata tags are not passed to the outfile
|
|
||||||
for td in self.__targetMediaDescriptor.getAllTrackDescriptors():
|
|
||||||
|
|
||||||
typeIndicator = td.getType().indicator()
|
|
||||||
subIndex = td.getSubIndex()
|
|
||||||
|
|
||||||
for tagKey, tagValue in td.getTags().items():
|
|
||||||
|
|
||||||
if not tagKey in removeTrackKeys:
|
|
||||||
metadataTokens += [f"-metadata:s:{typeIndicator}:{subIndex}",
|
|
||||||
f"{tagKey}={tagValue}"]
|
|
||||||
|
|
||||||
for removeKey in removeTrackKeys:
|
|
||||||
metadataTokens += [f"-metadata:s:{typeIndicator}:{subIndex}",
|
|
||||||
f"{removeKey}="]
|
|
||||||
|
|
||||||
|
|
||||||
return metadataTokens
|
|
||||||
|
|
||||||
|
|
||||||
def runJob(self,
|
def runJob(self,
|
||||||
sourcePath,
|
sourcePath,
|
||||||
targetPath,
|
targetPath,
|
||||||
targetFormat: str = '',
|
targetFormat: str = '',
|
||||||
videoEncoder: VideoEncoder = VideoEncoder.VP9,
|
chainIteration: list = [],
|
||||||
chainIteration: list = []):
|
cropArguments: dict = {},
|
||||||
|
currentPattern: Pattern = None):
|
||||||
# quality: int = DEFAULT_QUALITY,
|
# quality: int = DEFAULT_QUALITY,
|
||||||
# preset: int = DEFAULT_AV1_PRESET):
|
# preset: int = DEFAULT_AV1_PRESET):
|
||||||
|
|
||||||
|
|
||||||
|
videoEncoder: VideoEncoder = self.__context.get('video_encoder', VideoEncoder.VP9)
|
||||||
|
|
||||||
|
|
||||||
qualityFilters = [fy for fy in chainIteration if fy['identifier'] == 'quality']
|
qualityFilters = [fy for fy in chainIteration if fy['identifier'] == 'quality']
|
||||||
presetFilters = [fy for fy in chainIteration if fy['identifier'] == 'preset']
|
presetFilters = [fy for fy in chainIteration if fy['identifier'] == 'preset']
|
||||||
|
|
||||||
|
cropFilters = [fy for fy in chainIteration if fy['identifier'] == 'crop']
|
||||||
denoiseFilters = [fy for fy in chainIteration if fy['identifier'] == 'nlmeans']
|
denoiseFilters = [fy for fy in chainIteration if fy['identifier'] == 'nlmeans']
|
||||||
|
deinterlaceFilters = [fy for fy in chainIteration if fy['identifier'] == 'bwdif']
|
||||||
|
|
||||||
|
|
||||||
|
if qualityFilters and (quality := qualityFilters[0]['parameters']['quality']):
|
||||||
|
self.__logger.info(f"Setting quality {quality} from command line parameter")
|
||||||
|
elif currentPattern is not None and (quality := currentPattern.quality):
|
||||||
|
self.__logger.info(f"Setting quality {quality} from pattern default")
|
||||||
|
else:
|
||||||
|
quality = (QualityFilter.DEFAULT_H264_QUALITY
|
||||||
|
if (videoEncoder == VideoEncoder.H264)
|
||||||
|
else QualityFilter.DEFAULT_VP9_QUALITY)
|
||||||
|
self.__logger.info(f"Setting quality {quality} from default")
|
||||||
|
|
||||||
|
|
||||||
quality = qualityFilters[0]['parameters']['quality'] if qualityFilters else QualityFilter.DEFAULT_QUALITY
|
|
||||||
preset = presetFilters[0]['parameters']['preset'] if presetFilters else PresetFilter.DEFAULT_PRESET
|
preset = presetFilters[0]['parameters']['preset'] if presetFilters else PresetFilter.DEFAULT_PRESET
|
||||||
|
self.__context['encoding_metadata_tags'] = self.generateEncodingMetadataTags(
|
||||||
|
videoEncoder,
|
||||||
|
quality,
|
||||||
|
preset,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
denoiseTokens = denoiseFilters[0]['tokens'] if denoiseFilters else []
|
filterParamTokens = []
|
||||||
|
|
||||||
|
if cropArguments:
|
||||||
|
|
||||||
|
cropParams = (f"crop="
|
||||||
|
+ f"{cropArguments[CropFilter.OUTPUT_WIDTH_KEY]}"
|
||||||
|
+ f":{cropArguments[CropFilter.OUTPUT_HEIGHT_KEY]}"
|
||||||
|
+ f":{cropArguments[CropFilter.OFFSET_X_KEY]}"
|
||||||
|
+ f":{cropArguments[CropFilter.OFFSET_Y_KEY]}")
|
||||||
|
|
||||||
|
filterParamTokens.append(cropParams)
|
||||||
|
|
||||||
|
filterParamTokens.extend(denoiseFilters[0]['tokens'] if denoiseFilters else [])
|
||||||
|
filterParamTokens.extend(deinterlaceFilters[0]['tokens'] if deinterlaceFilters else [])
|
||||||
|
|
||||||
|
deinterlaceFilters
|
||||||
|
|
||||||
|
filterTokens = ['-vf', ', '.join(filterParamTokens)] if filterParamTokens else []
|
||||||
|
|
||||||
|
|
||||||
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
|
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
|
||||||
|
|
||||||
|
if videoEncoder == VideoEncoder.COPY:
|
||||||
|
|
||||||
|
commandSequence = (commandTokens
|
||||||
|
+ self.__targetMediaDescriptor.getImportFileTokens()
|
||||||
|
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
|
||||||
|
+ self.__mdcs.generateDispositionTokens())
|
||||||
|
|
||||||
|
commandSequence += self.__mdcs.generateMetadataTokens()
|
||||||
|
commandSequence += self.generateCopyTokens()
|
||||||
|
|
||||||
|
if self.__context['perform_cut']:
|
||||||
|
commandSequence += self.generateCropTokens()
|
||||||
|
|
||||||
|
commandSequence += self.generateOutputTokens(targetPath,
|
||||||
|
targetFormat)
|
||||||
|
|
||||||
|
self.__logger.debug("FfxController.runJob(): Running command sequence")
|
||||||
|
|
||||||
|
if not self.__context['dry_run']:
|
||||||
|
self.executeCommandSequence(commandSequence)
|
||||||
|
return
|
||||||
|
|
||||||
if videoEncoder == VideoEncoder.AV1:
|
if videoEncoder == VideoEncoder.AV1:
|
||||||
|
|
||||||
commandSequence = (commandTokens
|
commandSequence = (commandTokens
|
||||||
+ self.__targetMediaDescriptor.getImportFileTokens()
|
+ self.__targetMediaDescriptor.getImportFileTokens()
|
||||||
+ self.__targetMediaDescriptor.getInputMappingTokens()
|
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
|
||||||
+ self.generateDispositionTokens())
|
+ self.__mdcs.generateDispositionTokens())
|
||||||
|
|
||||||
# Optional tokens
|
# Optional tokens
|
||||||
commandSequence += self.generateMetadataTokens()
|
commandSequence += self.__mdcs.generateMetadataTokens()
|
||||||
commandSequence += denoiseTokens
|
commandSequence += filterTokens
|
||||||
|
|
||||||
commandSequence += (self.generateAudioEncodingTokens()
|
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
|
||||||
+ self.generateAV1Tokens(int(quality), int(preset))
|
#HINT: Attached thumbnails are not supported by .webm container format
|
||||||
+ self.generateAudioEncodingTokens())
|
if td.getCodec != TrackCodec.PNG:
|
||||||
|
commandSequence += self.generateAV1Tokens(int(quality), int(preset))
|
||||||
|
|
||||||
if self.__context['perform_crop']:
|
commandSequence += self.generateAudioEncodingTokens()
|
||||||
commandSequence += FfxController.generateCropTokens()
|
|
||||||
|
if self.__context['perform_cut']:
|
||||||
|
commandSequence += self.generateCropTokens()
|
||||||
|
|
||||||
commandSequence += self.generateOutputTokens(targetPath,
|
commandSequence += self.generateOutputTokens(targetPath,
|
||||||
targetFormat)
|
targetFormat)
|
||||||
@@ -281,7 +351,38 @@ class FfxController():
|
|||||||
self.__logger.debug(f"FfxController.runJob(): Running command sequence")
|
self.__logger.debug(f"FfxController.runJob(): Running command sequence")
|
||||||
|
|
||||||
if not self.__context['dry_run']:
|
if not self.__context['dry_run']:
|
||||||
executeProcess(commandSequence, context = self.__context)
|
self.executeCommandSequence(commandSequence)
|
||||||
|
|
||||||
|
|
||||||
|
if videoEncoder == VideoEncoder.H264:
|
||||||
|
|
||||||
|
commandSequence = (commandTokens
|
||||||
|
+ self.__targetMediaDescriptor.getImportFileTokens()
|
||||||
|
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
|
||||||
|
+ self.__mdcs.generateDispositionTokens())
|
||||||
|
|
||||||
|
# Optional tokens
|
||||||
|
commandSequence += self.__mdcs.generateMetadataTokens()
|
||||||
|
commandSequence += filterTokens
|
||||||
|
|
||||||
|
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
|
||||||
|
#HINT: Attached thumbnails are not supported by .webm container format
|
||||||
|
if td.getCodec != TrackCodec.PNG:
|
||||||
|
commandSequence += self.generateH264Tokens(int(quality))
|
||||||
|
|
||||||
|
commandSequence += self.generateAudioEncodingTokens()
|
||||||
|
|
||||||
|
if self.__context['perform_cut']:
|
||||||
|
commandSequence += self.generateCropTokens()
|
||||||
|
|
||||||
|
commandSequence += self.generateOutputTokens(targetPath,
|
||||||
|
targetFormat)
|
||||||
|
|
||||||
|
self.__logger.debug(f"FfxController.runJob(): Running command sequence")
|
||||||
|
|
||||||
|
if not self.__context['dry_run']:
|
||||||
|
self.executeCommandSequence(commandSequence)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if videoEncoder == VideoEncoder.VP9:
|
if videoEncoder == VideoEncoder.VP9:
|
||||||
@@ -294,11 +395,14 @@ class FfxController():
|
|||||||
# the required bitrate for the second run is determined and recorded
|
# the required bitrate for the second run is determined and recorded
|
||||||
# TODO: Results seems to be slightly better with first pass omitted,
|
# TODO: Results seems to be slightly better with first pass omitted,
|
||||||
# Confirm or find better filter settings for 2-pass
|
# Confirm or find better filter settings for 2-pass
|
||||||
# commandSequence1 += self.__context['denoiser'].generateDenoiseTokens()
|
# commandSequence1 += self.__context['denoiser'].generatefilterTokens()
|
||||||
|
|
||||||
|
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
|
||||||
|
#HINT: Attached thumbnails are not supported by .webm container format
|
||||||
|
if td.getCodec != TrackCodec.PNG:
|
||||||
commandSequence1 += self.generateVP9Pass1Tokens(int(quality))
|
commandSequence1 += self.generateVP9Pass1Tokens(int(quality))
|
||||||
|
|
||||||
if self.__context['perform_crop']:
|
if self.__context['perform_cut']:
|
||||||
commandSequence1 += self.generateCropTokens()
|
commandSequence1 += self.generateCropTokens()
|
||||||
|
|
||||||
commandSequence1 += FfxController.NULL_TOKENS
|
commandSequence1 += FfxController.NULL_TOKENS
|
||||||
@@ -309,20 +413,25 @@ class FfxController():
|
|||||||
self.__logger.debug(f"FfxController.runJob(): Running command sequence 1")
|
self.__logger.debug(f"FfxController.runJob(): Running command sequence 1")
|
||||||
|
|
||||||
if not self.__context['dry_run']:
|
if not self.__context['dry_run']:
|
||||||
executeProcess(commandSequence1, context = self.__context)
|
self.executeCommandSequence(commandSequence1)
|
||||||
|
|
||||||
commandSequence2 = (commandTokens
|
commandSequence2 = (commandTokens
|
||||||
+ self.__targetMediaDescriptor.getImportFileTokens()
|
+ self.__targetMediaDescriptor.getImportFileTokens()
|
||||||
+ self.__targetMediaDescriptor.getInputMappingTokens()
|
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
|
||||||
+ self.generateDispositionTokens())
|
+ self.__mdcs.generateDispositionTokens())
|
||||||
|
|
||||||
# Optional tokens
|
# Optional tokens
|
||||||
commandSequence2 += self.generateMetadataTokens()
|
commandSequence2 += self.__mdcs.generateMetadataTokens()
|
||||||
commandSequence2 += denoiseTokens
|
commandSequence2 += filterTokens
|
||||||
|
|
||||||
commandSequence2 += self.generateVP9Pass2Tokens(int(quality)) + self.generateAudioEncodingTokens()
|
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
|
||||||
|
#HINT: Attached thumbnails are not supported by .webm container format
|
||||||
|
if td.getCodec != TrackCodec.PNG:
|
||||||
|
commandSequence2 += self.generateVP9Pass2Tokens(int(quality))
|
||||||
|
|
||||||
if self.__context['perform_crop']:
|
commandSequence2 += self.generateAudioEncodingTokens()
|
||||||
|
|
||||||
|
if self.__context['perform_cut']:
|
||||||
commandSequence2 += self.generateCropTokens()
|
commandSequence2 += self.generateCropTokens()
|
||||||
|
|
||||||
commandSequence2 += self.generateOutputTokens(targetPath,
|
commandSequence2 += self.generateOutputTokens(targetPath,
|
||||||
@@ -331,9 +440,7 @@ class FfxController():
|
|||||||
self.__logger.debug(f"FfxController.runJob(): Running command sequence 2")
|
self.__logger.debug(f"FfxController.runJob(): Running command sequence 2")
|
||||||
|
|
||||||
if not self.__context['dry_run']:
|
if not self.__context['dry_run']:
|
||||||
out, err, rc = executeProcess(commandSequence2, context = self.__context)
|
self.executeCommandSequence(commandSequence2)
|
||||||
if rc:
|
|
||||||
raise click.ClickException(f"Command resulted in error: rc={rc} error={err}")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -358,4 +465,4 @@ class FfxController():
|
|||||||
str(length),
|
str(length),
|
||||||
path]
|
path]
|
||||||
|
|
||||||
out, err, rc = executeProcess(commandTokens, context = self.__context)
|
self.executeCommandSequence(commandTokens)
|
||||||
|
|||||||
@@ -1,23 +1,47 @@
|
|||||||
import os, re, json
|
import os, re, json
|
||||||
|
|
||||||
|
from .constants import (
|
||||||
|
DEFAULT_CROPDETECT_DURATION_SECONDS,
|
||||||
|
DEFAULT_CROPDETECT_SEEK_SECONDS,
|
||||||
|
FFMPEG_COMMAND_TOKENS,
|
||||||
|
FFMPEG_NULL_OUTPUT_TOKENS,
|
||||||
|
)
|
||||||
from .media_descriptor import MediaDescriptor
|
from .media_descriptor import MediaDescriptor
|
||||||
from .pattern_controller import PatternController
|
from .pattern_controller import PatternController
|
||||||
|
|
||||||
|
from ffx.filter.crop_filter import CropFilter
|
||||||
|
|
||||||
from .process import executeProcess
|
from .process import executeProcess
|
||||||
|
|
||||||
from ffx.model.pattern import Pattern
|
from ffx.model.pattern import Pattern
|
||||||
|
|
||||||
|
|
||||||
class FileProperties():
|
class FileProperties():
|
||||||
|
_cropdetect_cache: dict[tuple[str, int, int, int, int], dict[str, str]] = {}
|
||||||
|
|
||||||
FILE_EXTENSIONS = ['mkv', 'mp4', 'avi', 'flv', 'webm']
|
FILE_EXTENSIONS = ['mkv', 'mp4', 'avi', 'flv', 'webm']
|
||||||
|
FFPROBE_COMMAND_TOKENS = ["ffprobe", "-hide_banner", "-show_format", "-show_streams", "-of", "json"]
|
||||||
|
|
||||||
SE_INDICATOR_PATTERN = '([sS][0-9]+[eE][0-9]+)'
|
SE_INDICATOR_PATTERN = '([sS][0-9]+[eE][0-9]+)'
|
||||||
SEASON_EPISODE_INDICATOR_MATCH = '[sS]([0-9]+)[eE]([0-9]+)'
|
SEASON_EPISODE_INDICATOR_MATCH = '[sS]([0-9]+)[eE]([0-9]+)'
|
||||||
EPISODE_INDICATOR_MATCH = '[eE]([0-9]+)'
|
EPISODE_INDICATOR_MATCH = '[eE]([0-9]+)'
|
||||||
|
|
||||||
|
CROPDETECT_PATTERN = 'crop=[0-9]+:[0-9]+:[0-9]+:[0-9]+$'
|
||||||
|
|
||||||
DEFAULT_INDEX_DIGITS = 3
|
DEFAULT_INDEX_DIGITS = 3
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def extractSeasonEpisodeValues(cls, sourceText: str) -> tuple[int | None, int] | None:
|
||||||
|
seasonEpisodeMatch = re.search(cls.SEASON_EPISODE_INDICATOR_MATCH, str(sourceText))
|
||||||
|
if seasonEpisodeMatch is not None:
|
||||||
|
return int(seasonEpisodeMatch.group(1)), int(seasonEpisodeMatch.group(2))
|
||||||
|
|
||||||
|
episodeMatch = re.search(cls.EPISODE_INDICATOR_MATCH, str(sourceText))
|
||||||
|
if episodeMatch is not None:
|
||||||
|
return None, int(episodeMatch.group(1))
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
def __init__(self, context, sourcePath):
|
def __init__(self, context, sourcePath):
|
||||||
|
|
||||||
self.context = context
|
self.context = context
|
||||||
@@ -40,9 +64,10 @@ class FileProperties():
|
|||||||
self.__sourceFilenameExtension = ''
|
self.__sourceFilenameExtension = ''
|
||||||
|
|
||||||
self.__pc = PatternController(context)
|
self.__pc = PatternController(context)
|
||||||
|
self.__usePattern = bool(self.context.get('use_pattern', True))
|
||||||
|
|
||||||
# Checking if database contains matching pattern
|
# Checking if database contains matching pattern
|
||||||
matchResult = self.__pc.matchFilename(self.__sourceFilename)
|
matchResult = self.__pc.matchFilename(self.__sourceFilename) if self.__usePattern else {}
|
||||||
|
|
||||||
self.__logger.debug(f"FileProperties.__init__(): Match result: {matchResult}")
|
self.__logger.debug(f"FileProperties.__init__(): Match result: {matchResult}")
|
||||||
|
|
||||||
@@ -52,26 +77,67 @@ class FileProperties():
|
|||||||
databaseMatchedGroups = matchResult['match'].groups()
|
databaseMatchedGroups = matchResult['match'].groups()
|
||||||
self.__logger.debug(f"FileProperties.__init__(): Matched groups: {databaseMatchedGroups}")
|
self.__logger.debug(f"FileProperties.__init__(): Matched groups: {databaseMatchedGroups}")
|
||||||
|
|
||||||
seIndicator = databaseMatchedGroups[0]
|
indicatorSource = databaseMatchedGroups[0]
|
||||||
|
|
||||||
se_match = re.search(FileProperties.SEASON_EPISODE_INDICATOR_MATCH, seIndicator)
|
|
||||||
e_match = re.search(FileProperties.EPISODE_INDICATOR_MATCH, seIndicator)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
self.__logger.debug(f"FileProperties.__init__(): Checking file name for indicator {self.__sourceFilename}")
|
self.__logger.debug(f"FileProperties.__init__(): Checking file name for indicator {self.__sourceFilename}")
|
||||||
|
indicatorSource = self.__sourceFilename
|
||||||
|
|
||||||
se_match = re.search(FileProperties.SEASON_EPISODE_INDICATOR_MATCH, self.__sourceFilename)
|
seasonEpisodeValues = self.extractSeasonEpisodeValues(indicatorSource)
|
||||||
e_match = re.search(FileProperties.EPISODE_INDICATOR_MATCH, self.__sourceFilename)
|
if seasonEpisodeValues is None:
|
||||||
|
|
||||||
if se_match is not None:
|
|
||||||
self.__season = int(se_match.group(1))
|
|
||||||
self.__episode = int(se_match.group(2))
|
|
||||||
elif e_match is not None:
|
|
||||||
self.__season = -1
|
|
||||||
self.__episode = int(e_match.group(1))
|
|
||||||
else:
|
|
||||||
self.__season = -1
|
self.__season = -1
|
||||||
self.__episode = -1
|
self.__episode = -1
|
||||||
|
else:
|
||||||
|
sourceSeason, sourceEpisode = seasonEpisodeValues
|
||||||
|
self.__season = -1 if sourceSeason is None else int(sourceSeason)
|
||||||
|
self.__episode = int(sourceEpisode)
|
||||||
|
|
||||||
|
self.__ffprobeData = None
|
||||||
|
|
||||||
|
def _getCropdetectWindow(self):
|
||||||
|
cropdetectContext = self.context.get('cropdetect', {})
|
||||||
|
|
||||||
|
seekSeconds = int(cropdetectContext.get('seek_seconds', DEFAULT_CROPDETECT_SEEK_SECONDS))
|
||||||
|
durationSeconds = int(cropdetectContext.get('duration_seconds', DEFAULT_CROPDETECT_DURATION_SECONDS))
|
||||||
|
|
||||||
|
if seekSeconds < 0:
|
||||||
|
raise ValueError("Crop detection seek seconds must be zero or greater.")
|
||||||
|
if durationSeconds <= 0:
|
||||||
|
raise ValueError("Crop detection duration seconds must be greater than zero.")
|
||||||
|
|
||||||
|
return seekSeconds, durationSeconds
|
||||||
|
|
||||||
|
def _getCropdetectCacheKey(self):
|
||||||
|
sourceStat = os.stat(self.__sourcePath)
|
||||||
|
seekSeconds, durationSeconds = self._getCropdetectWindow()
|
||||||
|
|
||||||
|
return (
|
||||||
|
os.path.abspath(self.__sourcePath),
|
||||||
|
sourceStat.st_mtime_ns,
|
||||||
|
sourceStat.st_size,
|
||||||
|
seekSeconds,
|
||||||
|
durationSeconds,
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _clear_cropdetect_cache(cls):
|
||||||
|
cls._cropdetect_cache.clear()
|
||||||
|
|
||||||
|
def _getFfprobeData(self):
|
||||||
|
if self.__ffprobeData is not None:
|
||||||
|
return self.__ffprobeData
|
||||||
|
|
||||||
|
ffprobeOutput, ffprobeError, returnCode = executeProcess(
|
||||||
|
FileProperties.FFPROBE_COMMAND_TOKENS + [self.__sourcePath]
|
||||||
|
)
|
||||||
|
|
||||||
|
if 'Invalid data found when processing input' in ffprobeError:
|
||||||
|
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
|
||||||
|
|
||||||
|
if returnCode != 0:
|
||||||
|
raise Exception(f"ffprobe returned with error {returnCode}")
|
||||||
|
|
||||||
|
self.__ffprobeData = json.loads(ffprobeOutput)
|
||||||
|
return self.__ffprobeData
|
||||||
|
|
||||||
|
|
||||||
def getFormatData(self):
|
def getFormatData(self):
|
||||||
@@ -94,22 +160,7 @@ class FileProperties():
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
return self._getFfprobeData()['format']
|
||||||
# ffprobe -hide_banner -show_format -of json
|
|
||||||
ffprobeOutput, ffprobeError, returnCode = executeProcess(["ffprobe",
|
|
||||||
"-hide_banner",
|
|
||||||
"-show_format",
|
|
||||||
"-of", "json",
|
|
||||||
self.__sourcePath]) #,
|
|
||||||
#context = self.context)
|
|
||||||
|
|
||||||
if 'Invalid data found when processing input' in ffprobeError:
|
|
||||||
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
|
|
||||||
|
|
||||||
if returnCode != 0:
|
|
||||||
raise Exception(f"ffprobe returned with error {returnCode}")
|
|
||||||
|
|
||||||
return json.loads(ffprobeOutput)['format']
|
|
||||||
|
|
||||||
|
|
||||||
def getStreamData(self):
|
def getStreamData(self):
|
||||||
@@ -154,24 +205,64 @@ class FileProperties():
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
return self._getFfprobeData()['streams']
|
||||||
|
|
||||||
# ffprobe -hide_banner -show_streams -of json
|
|
||||||
ffprobeOutput, ffprobeError, returnCode = executeProcess(["ffprobe",
|
|
||||||
"-hide_banner",
|
|
||||||
"-show_streams",
|
|
||||||
"-of", "json",
|
|
||||||
self.__sourcePath]) #,
|
|
||||||
#context = self.context)
|
|
||||||
|
|
||||||
if 'Invalid data found when processing input' in ffprobeError:
|
|
||||||
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
|
|
||||||
|
|
||||||
|
def findCropArguments(self):
|
||||||
|
""""""
|
||||||
|
|
||||||
|
cacheKey = self._getCropdetectCacheKey()
|
||||||
|
cachedCropArguments = FileProperties._cropdetect_cache.get(cacheKey)
|
||||||
|
if cachedCropArguments is not None:
|
||||||
|
self.__logger.debug(
|
||||||
|
"FileProperties.findCropArguments(): Reusing cached cropdetect result for %s",
|
||||||
|
self.__sourcePath,
|
||||||
|
)
|
||||||
|
return dict(cachedCropArguments)
|
||||||
|
|
||||||
|
seekSeconds, durationSeconds = self._getCropdetectWindow()
|
||||||
|
|
||||||
|
cropdetectCommand = (
|
||||||
|
list(FFMPEG_COMMAND_TOKENS)
|
||||||
|
+ ["-ss", str(seekSeconds), "-i", self.__sourcePath, "-t", str(durationSeconds), "-vf", "cropdetect"]
|
||||||
|
+ list(FFMPEG_NULL_OUTPUT_TOKENS)
|
||||||
|
)
|
||||||
|
_ffmpegOutput, ffmpegError, returnCode = executeProcess(cropdetectCommand, context=self.context)
|
||||||
|
|
||||||
|
errorLines = ffmpegError.split('\n')
|
||||||
|
|
||||||
|
crops = {}
|
||||||
|
for el in errorLines:
|
||||||
|
|
||||||
|
cropdetect_match = re.search(FileProperties.CROPDETECT_PATTERN, el)
|
||||||
|
|
||||||
|
if cropdetect_match is not None:
|
||||||
|
cropParam = str(cropdetect_match.group(0))
|
||||||
|
|
||||||
|
crops[cropParam] = crops.get(cropParam, 0) + 1
|
||||||
|
|
||||||
|
if crops:
|
||||||
|
cropString = max(crops.items(), key=lambda item: (item[1], item[0]))[0]
|
||||||
|
|
||||||
|
cropTokens = cropString.split('=')
|
||||||
|
cropValueTokens = cropTokens[1]
|
||||||
|
cropValues = cropValueTokens.split(':')
|
||||||
|
|
||||||
|
cropArguments = {
|
||||||
|
CropFilter.OUTPUT_WIDTH_KEY: cropValues[0],
|
||||||
|
CropFilter.OUTPUT_HEIGHT_KEY: cropValues[1],
|
||||||
|
CropFilter.OFFSET_X_KEY: cropValues[2],
|
||||||
|
CropFilter.OFFSET_Y_KEY: cropValues[3]
|
||||||
|
}
|
||||||
|
FileProperties._cropdetect_cache[cacheKey] = dict(cropArguments)
|
||||||
|
return cropArguments
|
||||||
|
|
||||||
if returnCode != 0:
|
if returnCode != 0:
|
||||||
raise Exception(f"ffprobe returned with error {returnCode}")
|
raise Exception(f"ffmpeg cropdetect returned with error {returnCode}")
|
||||||
|
|
||||||
|
FileProperties._cropdetect_cache[cacheKey] = {}
|
||||||
return json.loads(ffprobeOutput)['streams']
|
return {}
|
||||||
|
|
||||||
|
|
||||||
def getMediaDescriptor(self):
|
def getMediaDescriptor(self):
|
||||||
|
|||||||
51
src/ffx/filter/crop_filter.py
Normal file
51
src/ffx/filter/crop_filter.py
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
import itertools
|
||||||
|
|
||||||
|
from .filter import Filter
|
||||||
|
|
||||||
|
|
||||||
|
class CropFilter(Filter):
|
||||||
|
|
||||||
|
IDENTIFIER = 'crop'
|
||||||
|
|
||||||
|
OUTPUT_WIDTH_KEY = 'output_width'
|
||||||
|
OUTPUT_HEIGHT_KEY = 'output_height'
|
||||||
|
OFFSET_X_KEY = 'x_offset'
|
||||||
|
OFFSET_Y_KEY = 'y_offset'
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
|
||||||
|
self.__outputWidth = int(kwargs.get(CropFilter.OUTPUT_WIDTH_KEY, 0))
|
||||||
|
self.__outputHeight = int(kwargs.get(CropFilter.OUTPUT_HEIGHT_KEY, 0))
|
||||||
|
self.__offsetX = int(kwargs.get(CropFilter.OFFSET_X_KEY, 0))
|
||||||
|
self.__offsetY = int(kwargs.get(CropFilter.OFFSET_Y_KEY, 0))
|
||||||
|
|
||||||
|
super().__init__(self)
|
||||||
|
|
||||||
|
def setArguments(self, **kwargs):
|
||||||
|
self.__outputWidth = int(kwargs.get(CropFilter.OUTPUT_WIDTH_KEY))
|
||||||
|
self.__outputHeight = int(kwargs.get(CropFilter.OUTPUT_HEIGHT_KEY))
|
||||||
|
self.__offsetX = int(kwargs.get(CropFilter.OFFSET_X_KEY,))
|
||||||
|
self.__offsetY = int(kwargs.get(CropFilter.OFFSET_Y_KEY,))
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
payload = {'identifier': CropFilter.IDENTIFIER,
|
||||||
|
'parameters': {
|
||||||
|
CropFilter.OUTPUT_WIDTH_KEY: self.__outputWidth,
|
||||||
|
CropFilter.OUTPUT_HEIGHT_KEY: self.__outputHeight,
|
||||||
|
CropFilter.OFFSET_X_KEY: self.__offsetX,
|
||||||
|
CropFilter.OFFSET_Y_KEY: self.__offsetY
|
||||||
|
},
|
||||||
|
'suffices': [],
|
||||||
|
'variant': f"C{self.__outputWidth}-{self.__outputHeight}-{self.__offsetX}-{self.__offsetY}",
|
||||||
|
'tokens': ['crop='
|
||||||
|
+ f"{self.__outputWidth}"
|
||||||
|
+ f":{self.__outputHeight}"
|
||||||
|
+ f":{self.__offsetX}"
|
||||||
|
+ f":{self.__offsetY}"]}
|
||||||
|
|
||||||
|
return payload
|
||||||
|
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
yield self.getPayload()
|
||||||
140
src/ffx/filter/deinterlace_filter.py
Normal file
140
src/ffx/filter/deinterlace_filter.py
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
import itertools
|
||||||
|
|
||||||
|
from .filter import Filter
|
||||||
|
|
||||||
|
|
||||||
|
class DeinterlaceFilter(Filter):
|
||||||
|
|
||||||
|
IDENTIFIER = 'bwdif'
|
||||||
|
|
||||||
|
# DEFAULT_STRENGTH: float = 2.8
|
||||||
|
# DEFAULT_PATCH_SIZE: int = 13
|
||||||
|
# DEFAULT_CHROMA_PATCH_SIZE: int = 9
|
||||||
|
# DEFAULT_RESEARCH_WINDOW: int = 23
|
||||||
|
# DEFAULT_CHROMA_RESEARCH_WINDOW: int= 17
|
||||||
|
|
||||||
|
# STRENGTH_KEY = 'strength'
|
||||||
|
# PATCH_SIZE_KEY = 'patch_size'
|
||||||
|
# CHROMA_PATCH_SIZE_KEY = 'chroma_patch_size'
|
||||||
|
# RESEARCH_WINDOW_KEY = 'research_window'
|
||||||
|
# CHROMA_RESEARCH_WINDOW_KEY = 'chroma_research_window'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
|
||||||
|
# self.__useHardware = kwargs.get('use_hardware', False)
|
||||||
|
|
||||||
|
# self.__strengthList = []
|
||||||
|
# strength = kwargs.get(NlmeansFilter.STRENGTH_KEY, '')
|
||||||
|
# if strength:
|
||||||
|
# strengthTokens = strength.split(',')
|
||||||
|
# for st in strengthTokens:
|
||||||
|
# try:
|
||||||
|
# strengthValue = float(st)
|
||||||
|
# except:
|
||||||
|
# raise ValueError('NlmeansFilter: Strength value has to be of type float')
|
||||||
|
# if strengthValue < 1.0 or strengthValue > 30.0:
|
||||||
|
# raise ValueError('NlmeansFilter: Strength value has to be between 1.0 and 30.0')
|
||||||
|
# self.__strengthList.append(strengthValue)
|
||||||
|
# else:
|
||||||
|
# self.__strengthList = [NlmeansFilter.DEFAULT_STRENGTH]
|
||||||
|
|
||||||
|
# self.__patchSizeList = []
|
||||||
|
# patchSize = kwargs.get(NlmeansFilter.PATCH_SIZE_KEY, '')
|
||||||
|
# if patchSize:
|
||||||
|
# patchSizeTokens = patchSize.split(',')
|
||||||
|
# for pst in patchSizeTokens:
|
||||||
|
# try:
|
||||||
|
# patchSizeValue = int(pst)
|
||||||
|
# except:
|
||||||
|
# raise ValueError('NlmeansFilter: Patch size value has to be of type int')
|
||||||
|
# if patchSizeValue < 0 or patchSizeValue > 99:
|
||||||
|
# raise ValueError('NlmeansFilter: Patch size value has to be between 0 and 99')
|
||||||
|
# if patchSizeValue % 2 == 0:
|
||||||
|
# raise ValueError('NlmeansFilter: Patch size value has to an odd number')
|
||||||
|
# self.__patchSizeList.append(patchSizeValue)
|
||||||
|
# else:
|
||||||
|
# self.__patchSizeList = [NlmeansFilter.DEFAULT_PATCH_SIZE]
|
||||||
|
|
||||||
|
# self.__chromaPatchSizeList = []
|
||||||
|
# chromaPatchSize = kwargs.get(NlmeansFilter.CHROMA_PATCH_SIZE_KEY, '')
|
||||||
|
# if chromaPatchSize:
|
||||||
|
# chromaPatchSizeTokens = chromaPatchSize.split(',')
|
||||||
|
# for cpst in chromaPatchSizeTokens:
|
||||||
|
# try:
|
||||||
|
# chromaPatchSizeValue = int(pst)
|
||||||
|
# except:
|
||||||
|
# raise ValueError('NlmeansFilter: Chroma patch size value has to be of type int')
|
||||||
|
# if chromaPatchSizeValue < 0 or chromaPatchSizeValue > 99:
|
||||||
|
# raise ValueError('NlmeansFilter: Chroma patch value has to be between 0 and 99')
|
||||||
|
# if chromaPatchSizeValue % 2 == 0:
|
||||||
|
# raise ValueError('NlmeansFilter: Chroma patch value has to an odd number')
|
||||||
|
# self.__chromaPatchSizeList.append(chromaPatchSizeValue)
|
||||||
|
# else:
|
||||||
|
# self.__chromaPatchSizeList = [NlmeansFilter.DEFAULT_CHROMA_PATCH_SIZE]
|
||||||
|
|
||||||
|
# self.__researchWindowList = []
|
||||||
|
# researchWindow = kwargs.get(NlmeansFilter.RESEARCH_WINDOW_KEY, '')
|
||||||
|
# if researchWindow:
|
||||||
|
# researchWindowTokens = researchWindow.split(',')
|
||||||
|
# for rwt in researchWindowTokens:
|
||||||
|
# try:
|
||||||
|
# researchWindowValue = int(rwt)
|
||||||
|
# except:
|
||||||
|
# raise ValueError('NlmeansFilter: Research window value has to be of type int')
|
||||||
|
# if researchWindowValue < 0 or researchWindowValue > 99:
|
||||||
|
# raise ValueError('NlmeansFilter: Research window value has to be between 0 and 99')
|
||||||
|
# if researchWindowValue % 2 == 0:
|
||||||
|
# raise ValueError('NlmeansFilter: Research window value has to an odd number')
|
||||||
|
# self.__researchWindowList.append(researchWindowValue)
|
||||||
|
# else:
|
||||||
|
# self.__researchWindowList = [NlmeansFilter.DEFAULT_RESEARCH_WINDOW]
|
||||||
|
|
||||||
|
# self.__chromaResearchWindowList = []
|
||||||
|
# chromaResearchWindow = kwargs.get(NlmeansFilter.CHROMA_RESEARCH_WINDOW_KEY, '')
|
||||||
|
# if chromaResearchWindow:
|
||||||
|
# chromaResearchWindowTokens = chromaResearchWindow.split(',')
|
||||||
|
# for crwt in chromaResearchWindowTokens:
|
||||||
|
# try:
|
||||||
|
# chromaResearchWindowValue = int(crwt)
|
||||||
|
# except:
|
||||||
|
# raise ValueError('NlmeansFilter: Chroma research window value has to be of type int')
|
||||||
|
# if chromaResearchWindowValue < 0 or chromaResearchWindowValue > 99:
|
||||||
|
# raise ValueError('NlmeansFilter: Chroma research window value has to be between 0 and 99')
|
||||||
|
# if chromaResearchWindowValue % 2 == 0:
|
||||||
|
# raise ValueError('NlmeansFilter: Chroma research window value has to an odd number')
|
||||||
|
# self.__chromaResearchWindowList.append(chromaResearchWindowValue)
|
||||||
|
# else:
|
||||||
|
# self.__chromaResearchWindowList = [NlmeansFilter.DEFAULT_CHROMA_RESEARCH_WINDOW]
|
||||||
|
|
||||||
|
super().__init__(self)
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
# strength = iteration[0]
|
||||||
|
# patchSize = iteration[1]
|
||||||
|
# chromaPatchSize = iteration[2]
|
||||||
|
# researchWindow = iteration[3]
|
||||||
|
# chromaResearchWindow = iteration[4]
|
||||||
|
|
||||||
|
suffices = []
|
||||||
|
|
||||||
|
# filterName = 'nlmeans_opencl' if self.__useHardware else 'nlmeans'
|
||||||
|
|
||||||
|
payload = {'identifier': DeinterlaceFilter.IDENTIFIER,
|
||||||
|
'parameters': {},
|
||||||
|
'suffices': suffices,
|
||||||
|
'variant': f"DEINT",
|
||||||
|
'tokens': ['bwdif=mode=1']}
|
||||||
|
|
||||||
|
return payload
|
||||||
|
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
# for it in itertools.product(self.__strengthList,
|
||||||
|
# self.__patchSizeList,
|
||||||
|
# self.__chromaPatchSizeList,
|
||||||
|
# self.__researchWindowList,
|
||||||
|
# self.__chromaResearchWindowList):
|
||||||
|
yield self.getPayload()
|
||||||
@@ -144,7 +144,7 @@ class NlmeansFilter(Filter):
|
|||||||
'suffices': suffices,
|
'suffices': suffices,
|
||||||
'variant': f"DS{strength}-DP{patchSize}-DPC{chromaPatchSize}"
|
'variant': f"DS{strength}-DP{patchSize}-DPC{chromaPatchSize}"
|
||||||
+ f"-DR{researchWindow}-DRC{chromaResearchWindow}",
|
+ f"-DR{researchWindow}-DRC{chromaResearchWindow}",
|
||||||
'tokens': ['-vf', f"{filterName}=s={strength}"
|
'tokens': [f"{filterName}=s={strength}"
|
||||||
+ f":p={patchSize}"
|
+ f":p={patchSize}"
|
||||||
+ f":pc={chromaPatchSize}"
|
+ f":pc={chromaPatchSize}"
|
||||||
+ f":r={researchWindow}"
|
+ f":r={researchWindow}"
|
||||||
|
|||||||
@@ -1,18 +1,24 @@
|
|||||||
import itertools
|
import click
|
||||||
|
|
||||||
from .filter import Filter
|
from .filter import Filter
|
||||||
|
|
||||||
|
from ffx.video_encoder import VideoEncoder
|
||||||
|
|
||||||
|
|
||||||
class QualityFilter(Filter):
|
class QualityFilter(Filter):
|
||||||
|
|
||||||
IDENTIFIER = 'quality'
|
IDENTIFIER = 'quality'
|
||||||
|
|
||||||
DEFAULT_QUALITY = 32
|
DEFAULT_VP9_QUALITY = 32
|
||||||
|
DEFAULT_H264_QUALITY = 17
|
||||||
|
|
||||||
QUALITY_KEY = 'quality'
|
QUALITY_KEY = 'quality'
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
|
|
||||||
|
context = click.get_current_context().obj
|
||||||
|
|
||||||
|
|
||||||
self.__qualitiesList = []
|
self.__qualitiesList = []
|
||||||
qualities = kwargs.get(QualityFilter.QUALITY_KEY, '')
|
qualities = kwargs.get(QualityFilter.QUALITY_KEY, '')
|
||||||
if qualities:
|
if qualities:
|
||||||
@@ -26,7 +32,9 @@ class QualityFilter(Filter):
|
|||||||
raise ValueError('QualityFilter: Quality value has to be between 0 and 63')
|
raise ValueError('QualityFilter: Quality value has to be between 0 and 63')
|
||||||
self.__qualitiesList.append(qualityValue)
|
self.__qualitiesList.append(qualityValue)
|
||||||
else:
|
else:
|
||||||
self.__qualitiesList = [QualityFilter.DEFAULT_QUALITY]
|
|
||||||
|
self.__qualitiesList = [None]
|
||||||
|
|
||||||
|
|
||||||
super().__init__(self)
|
super().__init__(self)
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
import re, logging
|
import re
|
||||||
|
|
||||||
from jinja2 import Environment, Undefined
|
from jinja2 import Environment, Undefined
|
||||||
from .constants import DEFAULT_OUTPUT_FILENAME_TEMPLATE
|
from .constants import DEFAULT_OUTPUT_FILENAME_TEMPLATE
|
||||||
from .configuration_controller import ConfigurationController
|
from .configuration_controller import ConfigurationController
|
||||||
|
from .logging_utils import get_ffx_logger
|
||||||
|
from .show_descriptor import ShowDescriptor
|
||||||
|
|
||||||
|
|
||||||
class EmptyStringUndefined(Undefined):
|
class EmptyStringUndefined(Undefined):
|
||||||
@@ -15,8 +17,55 @@ DIFF_REMOVED_KEY = 'removed'
|
|||||||
DIFF_CHANGED_KEY = 'changed'
|
DIFF_CHANGED_KEY = 'changed'
|
||||||
DIFF_UNCHANGED_KEY = 'unchanged'
|
DIFF_UNCHANGED_KEY = 'unchanged'
|
||||||
|
|
||||||
|
FILENAME_FILTER_TRANSLATION = str.maketrans(
|
||||||
|
{
|
||||||
|
"/": "-",
|
||||||
|
":": ";",
|
||||||
|
"*": "",
|
||||||
|
"'": "",
|
||||||
|
"?": "#",
|
||||||
|
"♥": "",
|
||||||
|
"’": "",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
TMDB_FILLER_MARKERS = (" (*)", "(*)")
|
||||||
|
TMDB_EPISODE_RANGE_SUFFIX_REGEX = re.compile(r"\(([0-9]+)[-/]([0-9]+)\)$")
|
||||||
|
TMDB_EPISODE_PART_SUFFIX_REGEX = re.compile(r"\(([0-9]+)\)$")
|
||||||
|
RICH_COLOR_REGEX = re.compile(r"\[[a-z_]+\](.+)\[/[a-z_]+\]")
|
||||||
|
|
||||||
def dictDiff(a : dict, b : dict):
|
|
||||||
|
def dictDiff(a : dict, b : dict, ignoreKeys: list = [], removeKeys: list = []):
|
||||||
|
"""
|
||||||
|
ignoreKeys: Ignored keys are filtered from calculating diff at all
|
||||||
|
removeKeys: Override diff calculation to remove keys certainly
|
||||||
|
"""
|
||||||
|
|
||||||
|
a_filtered = {k:v for k,v in a.items() if not k in ignoreKeys}
|
||||||
|
b_filtered = {k:v for k,v in b.items() if not k in ignoreKeys and k not in removeKeys}
|
||||||
|
|
||||||
|
a_only = {k:v for k,v in a_filtered.items() if not k in b_filtered.keys()}
|
||||||
|
b_only = {k:v for k,v in b_filtered.items() if not k in a_filtered.keys()}
|
||||||
|
|
||||||
|
a_b = set(a_filtered.keys()) & set(b_filtered.keys())
|
||||||
|
|
||||||
|
changed = {k:b_filtered[k] for k in a_b if a_filtered[k] != b_filtered[k]}
|
||||||
|
unchanged = {k:b_filtered[k] for k in a_b if a_filtered[k] == b_filtered[k]}
|
||||||
|
|
||||||
|
diffResult = {}
|
||||||
|
|
||||||
|
|
||||||
|
if a_only:
|
||||||
|
diffResult[DIFF_REMOVED_KEY] = a_only
|
||||||
|
diffResult[DIFF_UNCHANGED_KEY] = unchanged
|
||||||
|
if b_only:
|
||||||
|
diffResult[DIFF_ADDED_KEY] = b_only
|
||||||
|
if changed:
|
||||||
|
diffResult[DIFF_CHANGED_KEY] = changed
|
||||||
|
|
||||||
|
return diffResult
|
||||||
|
|
||||||
|
|
||||||
|
def dictKeysDiff(a : dict, b : dict):
|
||||||
|
|
||||||
a_keys = set(a.keys())
|
a_keys = set(a.keys())
|
||||||
b_keys = set(b.keys())
|
b_keys = set(b.keys())
|
||||||
@@ -40,9 +89,10 @@ def dictDiff(a : dict, b : dict):
|
|||||||
|
|
||||||
return diffResult
|
return diffResult
|
||||||
|
|
||||||
|
|
||||||
def dictCache(element: dict, cache: list = []):
|
def dictCache(element: dict, cache: list = []):
|
||||||
for index in range(len(cache)):
|
for index in range(len(cache)):
|
||||||
diff = dictDiff(cache[index], element)
|
diff = dictKeysDiff(cache[index], element)
|
||||||
if not diff:
|
if not diff:
|
||||||
return index, cache
|
return index, cache
|
||||||
cache.append(element)
|
cache.append(element)
|
||||||
@@ -53,11 +103,13 @@ def setDiff(a : set, b : set) -> set:
|
|||||||
|
|
||||||
a_only = a - b
|
a_only = a - b
|
||||||
b_only = b - a
|
b_only = b - a
|
||||||
|
a_and_b = a & b
|
||||||
|
|
||||||
diffResult = {}
|
diffResult = {}
|
||||||
|
|
||||||
if a_only:
|
if a_only:
|
||||||
diffResult[DIFF_REMOVED_KEY] = a_only
|
diffResult[DIFF_REMOVED_KEY] = a_only
|
||||||
|
diffResult[DIFF_UNCHANGED_KEY] = a_and_b
|
||||||
if b_only:
|
if b_only:
|
||||||
diffResult[DIFF_ADDED_KEY] = b_only
|
diffResult[DIFF_ADDED_KEY] = b_only
|
||||||
|
|
||||||
@@ -78,47 +130,45 @@ def filterFilename(fileName: str) -> str:
|
|||||||
"""This filter replaces charactes from TMDB responses with characters
|
"""This filter replaces charactes from TMDB responses with characters
|
||||||
less problemating when using in filenames or removes them"""
|
less problemating when using in filenames or removes them"""
|
||||||
|
|
||||||
fileName = str(fileName).replace('/', '-')
|
return str(fileName).translate(FILENAME_FILTER_TRANSLATION).strip()
|
||||||
fileName = str(fileName).replace(':', ';')
|
|
||||||
fileName = str(fileName).replace('*', '')
|
|
||||||
fileName = str(fileName).replace("'", '')
|
|
||||||
fileName = str(fileName).replace("?", '#')
|
|
||||||
|
|
||||||
return fileName.strip()
|
|
||||||
|
|
||||||
def substituteTmdbFilename(fileName: str) -> str:
|
def substituteTmdbFilename(fileName: str) -> str:
|
||||||
"""If chaining this method with filterFilename use this one first as the latter will destroy some patterns"""
|
"""If chaining this method with filterFilename use this one first as the latter will destroy some patterns"""
|
||||||
|
|
||||||
# This indicates filler episodes in TMDB episode names
|
normalizedFileName = str(fileName)
|
||||||
fileName = str(fileName).replace(' (*)', '')
|
|
||||||
fileName = str(fileName).replace('(*)', '')
|
|
||||||
|
|
||||||
# This indicates the index of multi-episode files
|
for fillerMarker in TMDB_FILLER_MARKERS:
|
||||||
episodePartMatch = re.search("\\(([0-9]+)\\)$", fileName)
|
normalizedFileName = normalizedFileName.replace(fillerMarker, '')
|
||||||
|
|
||||||
|
episodeRangeMatch = TMDB_EPISODE_RANGE_SUFFIX_REGEX.search(normalizedFileName)
|
||||||
|
if episodeRangeMatch is not None:
|
||||||
|
partFirstIndex, partLastIndex = episodeRangeMatch.groups()
|
||||||
|
return TMDB_EPISODE_RANGE_SUFFIX_REGEX.sub(
|
||||||
|
f"Teil {partFirstIndex}-{partLastIndex}",
|
||||||
|
normalizedFileName,
|
||||||
|
count=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
episodePartMatch = TMDB_EPISODE_PART_SUFFIX_REGEX.search(normalizedFileName)
|
||||||
if episodePartMatch is not None:
|
if episodePartMatch is not None:
|
||||||
partSuffix = str(episodePartMatch.group(0))
|
partIndex = episodePartMatch.group(1)
|
||||||
partIndex = episodePartMatch.groups()[0]
|
return TMDB_EPISODE_PART_SUFFIX_REGEX.sub(
|
||||||
fileName = str(fileName).replace(partSuffix, f"Teil {partIndex}")
|
f"Teil {partIndex}",
|
||||||
|
normalizedFileName,
|
||||||
|
count=1,
|
||||||
|
)
|
||||||
|
|
||||||
# Also multi-episodes with first and last episode index
|
return normalizedFileName
|
||||||
episodePartMatch = re.search("\\(([0-9]+)[-\\/]([0-9]+)\\)$", fileName)
|
|
||||||
if episodePartMatch is not None:
|
|
||||||
partSuffix = str(episodePartMatch.group(0))
|
|
||||||
partFirstIndex = episodePartMatch.groups()[0]
|
|
||||||
partLastIndex = episodePartMatch.groups()[1]
|
|
||||||
fileName = str(fileName).replace(partSuffix, f"Teil {partFirstIndex}-{partLastIndex}")
|
|
||||||
|
|
||||||
return fileName
|
|
||||||
|
|
||||||
|
|
||||||
def getEpisodeFileBasename(showName,
|
def getEpisodeFileBasename(showName,
|
||||||
episodeName,
|
episodeName,
|
||||||
season,
|
season,
|
||||||
episode,
|
episode,
|
||||||
indexSeasonDigits = 2,
|
indexSeasonDigits = None,
|
||||||
indexEpisodeDigits = 2,
|
indexEpisodeDigits = None,
|
||||||
indicatorSeasonDigits = 2,
|
indicatorSeasonDigits = None,
|
||||||
indicatorEpisodeDigits = 2,
|
indicatorEpisodeDigits = None,
|
||||||
context = None):
|
context = None):
|
||||||
"""
|
"""
|
||||||
One Piece:
|
One Piece:
|
||||||
@@ -150,12 +200,21 @@ def getEpisodeFileBasename(showName,
|
|||||||
configData = cc.getData() if cc is not None else {}
|
configData = cc.getData() if cc is not None else {}
|
||||||
outputFilenameTemplate = configData.get(ConfigurationController.OUTPUT_FILENAME_TEMPLATE_KEY,
|
outputFilenameTemplate = configData.get(ConfigurationController.OUTPUT_FILENAME_TEMPLATE_KEY,
|
||||||
DEFAULT_OUTPUT_FILENAME_TEMPLATE)
|
DEFAULT_OUTPUT_FILENAME_TEMPLATE)
|
||||||
|
defaultDigitLengths = ShowDescriptor.getDefaultDigitLengths(context)
|
||||||
|
|
||||||
|
if indexSeasonDigits is None:
|
||||||
|
indexSeasonDigits = defaultDigitLengths[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]
|
||||||
|
if indexEpisodeDigits is None:
|
||||||
|
indexEpisodeDigits = defaultDigitLengths[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]
|
||||||
|
if indicatorSeasonDigits is None:
|
||||||
|
indicatorSeasonDigits = defaultDigitLengths[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]
|
||||||
|
if indicatorEpisodeDigits is None:
|
||||||
|
indicatorEpisodeDigits = defaultDigitLengths[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]
|
||||||
|
|
||||||
if context is not None and 'logger' in context.keys():
|
if context is not None and 'logger' in context.keys():
|
||||||
logger = context['logger']
|
logger = context['logger']
|
||||||
else:
|
else:
|
||||||
logger = logging.getLogger('FFX')
|
logger = get_ffx_logger()
|
||||||
logger.addHandler(logging.NullHandler())
|
|
||||||
|
|
||||||
|
|
||||||
indexSeparator = ' ' if indexSeasonDigits or indexEpisodeDigits else ''
|
indexSeparator = ' ' if indexSeasonDigits or indexEpisodeDigits else ''
|
||||||
@@ -185,3 +244,16 @@ def getEpisodeFileBasename(showName,
|
|||||||
|
|
||||||
# return ''.join(filenameTokens)
|
# return ''.join(filenameTokens)
|
||||||
|
|
||||||
|
|
||||||
|
def formatRichColor(text: str, color: str = None):
|
||||||
|
if color is None:
|
||||||
|
return text
|
||||||
|
else:
|
||||||
|
return f"[{color}]{text}[/{color}]"
|
||||||
|
|
||||||
|
def removeRichColor(text: str):
|
||||||
|
richColorMatch = RICH_COLOR_REGEX.search(str(text))
|
||||||
|
if richColorMatch is None:
|
||||||
|
return text
|
||||||
|
else:
|
||||||
|
return str(richColorMatch.group(1))
|
||||||
|
|||||||
@@ -1,79 +1,196 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
import difflib
|
import difflib
|
||||||
|
|
||||||
|
|
||||||
class IsoLanguage(Enum):
|
class IsoLanguage(Enum):
|
||||||
|
|
||||||
AFRIKAANS = {"name": "Afrikaans", "iso639_1": "af", "iso639_2": "afr"}
|
ABKHAZIAN = {"name": "Abkhazian", "iso639_1": "ab", "iso639_2": ["abk"]}
|
||||||
ALBANIAN = {"name": "Albanian", "iso639_1": "sq", "iso639_2": "alb"}
|
AFAR = {"name": "Afar", "iso639_1": "aa", "iso639_2": ["aar"]}
|
||||||
ARABIC = {"name": "Arabic", "iso639_1": "ar", "iso639_2": "ara"}
|
AFRIKAANS = {"name": "Afrikaans", "iso639_1": "af", "iso639_2": ["afr"]}
|
||||||
ARMENIAN = {"name": "Armenian", "iso639_1": "hy", "iso639_2": "arm"}
|
AKAN = {"name": "Akan", "iso639_1": "ak", "iso639_2": ["aka"]}
|
||||||
AZERBAIJANI = {"name": "Azerbaijani", "iso639_1": "az", "iso639_2": "aze"}
|
ALBANIAN = {"name": "Albanian", "iso639_1": "sq", "iso639_2": ["sqi", "alb"]}
|
||||||
BASQUE = {"name": "Basque", "iso639_1": "eu", "iso639_2": "baq"}
|
AMHARIC = {"name": "Amharic", "iso639_1": "am", "iso639_2": ["amh"]}
|
||||||
BELARUSIAN = {"name": "Belarusian", "iso639_1": "be", "iso639_2": "bel"}
|
ARABIC = {"name": "Arabic", "iso639_1": "ar", "iso639_2": ["ara"]}
|
||||||
BULGARIAN = {"name": "Bulgarian", "iso639_1": "bg", "iso639_2": "bul"}
|
ARAGONESE = {"name": "Aragonese", "iso639_1": "an", "iso639_2": ["arg"]}
|
||||||
CATALAN = {"name": "Catalan", "iso639_1": "ca", "iso639_2": "cat"}
|
ARMENIAN = {"name": "Armenian", "iso639_1": "hy", "iso639_2": ["hye", "arm"]}
|
||||||
CHINESE = {"name": "Chinese", "iso639_1": "zh", "iso639_2": "chi"}
|
ASSAMESE = {"name": "Assamese", "iso639_1": "as", "iso639_2": ["asm"]}
|
||||||
CROATIAN = {"name": "Croatian", "iso639_1": "hr", "iso639_2": "hrv"}
|
AVARIC = {"name": "Avaric", "iso639_1": "av", "iso639_2": ["ava"]}
|
||||||
CZECH = {"name": "Czech", "iso639_1": "cs", "iso639_2": "cze"}
|
AVESTAN = {"name": "Avestan", "iso639_1": "ae", "iso639_2": ["ave"]}
|
||||||
DANISH = {"name": "Danish", "iso639_1": "da", "iso639_2": "dan"}
|
AYMARA = {"name": "Aymara", "iso639_1": "ay", "iso639_2": ["aym"]}
|
||||||
DUTCH = {"name": "Dutch", "iso639_1": "nl", "iso639_2": "dut"}
|
AZERBAIJANI = {"name": "Azerbaijani", "iso639_1": "az", "iso639_2": ["aze"]}
|
||||||
ENGLISH = {"name": "English", "iso639_1": "en", "iso639_2": "eng"}
|
BAMBARA = {"name": "Bambara", "iso639_1": "bm", "iso639_2": ["bam"]}
|
||||||
ESTONIAN = {"name": "Estonian", "iso639_1": "et", "iso639_2": "est"}
|
BASHKIR = {"name": "Bashkir", "iso639_1": "ba", "iso639_2": ["bak"]}
|
||||||
FINNISH = {"name": "Finnish", "iso639_1": "fi", "iso639_2": "fin"}
|
BASQUE = {"name": "Basque", "iso639_1": "eu", "iso639_2": ["eus", "baq"]}
|
||||||
FRENCH = {"name": "French", "iso639_1": "fr", "iso639_2": "fre"}
|
BELARUSIAN = {"name": "Belarusian", "iso639_1": "be", "iso639_2": ["bel"]}
|
||||||
GEORGIAN = {"name": "Georgian", "iso639_1": "ka", "iso639_2": "geo"}
|
BENGALI = {"name": "Bengali", "iso639_1": "bn", "iso639_2": ["ben"]}
|
||||||
GERMAN = {"name": "German", "iso639_1": "de", "iso639_2": "ger"}
|
BISLAMA = {"name": "Bislama", "iso639_1": "bi", "iso639_2": ["bis"]}
|
||||||
GREEK = {"name": "Greek", "iso639_1": "el", "iso639_2": "gre"}
|
BOKMAL = {"name": "Bokmål", "iso639_1": "nb", "iso639_2": ["nob"]}
|
||||||
HEBREW = {"name": "Hebrew", "iso639_1": "he", "iso639_2": "heb"}
|
BOSNIAN = {"name": "Bosnian", "iso639_1": "bs", "iso639_2": ["bos"]}
|
||||||
HINDI = {"name": "Hindi", "iso639_1": "hi", "iso639_2": "hin"}
|
BRETON = {"name": "Breton", "iso639_1": "br", "iso639_2": ["bre"]}
|
||||||
HUNGARIAN = {"name": "Hungarian", "iso639_1": "hu", "iso639_2": "hun"}
|
BULGARIAN = {"name": "Bulgarian", "iso639_1": "bg", "iso639_2": ["bul"]}
|
||||||
ICELANDIC = {"name": "Icelandic", "iso639_1": "is", "iso639_2": "ice"}
|
BURMESE = {"name": "Burmese", "iso639_1": "my", "iso639_2": ["mya", "bur"]}
|
||||||
INDONESIAN = {"name": "Indonesian", "iso639_1": "id", "iso639_2": "ind"}
|
CATALAN = {"name": "Catalan", "iso639_1": "ca", "iso639_2": ["cat"]}
|
||||||
IRISH = {"name": "Irish", "iso639_1": "ga", "iso639_2": "gle"}
|
CHAMORRO = {"name": "Chamorro", "iso639_1": "ch", "iso639_2": ["cha"]}
|
||||||
ITALIAN = {"name": "Italian", "iso639_1": "it", "iso639_2": "ita"}
|
CHECHEN = {"name": "Chechen", "iso639_1": "ce", "iso639_2": ["che"]}
|
||||||
JAPANESE = {"name": "Japanese", "iso639_1": "ja", "iso639_2": "jpn"}
|
CHICHEWA = {"name": "Chichewa", "iso639_1": "ny", "iso639_2": ["nya"]}
|
||||||
KAZAKH = {"name": "Kazakh", "iso639_1": "kk", "iso639_2": "kaz"}
|
CHINESE = {"name": "Chinese", "iso639_1": "zh", "iso639_2": ["zho", "chi"]}
|
||||||
KOREAN = {"name": "Korean", "iso639_1": "ko", "iso639_2": "kor"}
|
CHURCH_SLAVIC = {"name": "Church Slavic", "iso639_1": "cu", "iso639_2": ["chu"]}
|
||||||
LATIN = {"name": "Latin", "iso639_1": "la", "iso639_2": "lat"}
|
CHUVASH = {"name": "Chuvash", "iso639_1": "cv", "iso639_2": ["chv"]}
|
||||||
LATVIAN = {"name": "Latvian", "iso639_1": "lv", "iso639_2": "lav"}
|
CORNISH = {"name": "Cornish", "iso639_1": "kw", "iso639_2": ["cor"]}
|
||||||
LITHUANIAN = {"name": "Lithuanian", "iso639_1": "lt", "iso639_2": "lit"}
|
CORSICAN = {"name": "Corsican", "iso639_1": "co", "iso639_2": ["cos"]}
|
||||||
MACEDONIAN = {"name": "Macedonian", "iso639_1": "mk", "iso639_2": "mac"}
|
CREE = {"name": "Cree", "iso639_1": "cr", "iso639_2": ["cre"]}
|
||||||
MALAY = {"name": "Malay", "iso639_1": "ms", "iso639_2": "may"}
|
CROATIAN = {"name": "Croatian", "iso639_1": "hr", "iso639_2": ["hrv"]}
|
||||||
MALTESE = {"name": "Maltese", "iso639_1": "mt", "iso639_2": "mlt"}
|
CZECH = {"name": "Czech", "iso639_1": "cs", "iso639_2": ["ces", "cze"]}
|
||||||
NORWEGIAN = {"name": "Norwegian", "iso639_1": "no", "iso639_2": "nor"}
|
DANISH = {"name": "Danish", "iso639_1": "da", "iso639_2": ["dan"]}
|
||||||
PERSIAN = {"name": "Persian", "iso639_1": "fa", "iso639_2": "per"}
|
DIVEHI = {"name": "Divehi", "iso639_1": "dv", "iso639_2": ["div"]}
|
||||||
POLISH = {"name": "Polish", "iso639_1": "pl", "iso639_2": "pol"}
|
DUTCH = {"name": "Dutch", "iso639_1": "nl", "iso639_2": ["nld", "dut"]}
|
||||||
PORTUGUESE = {"name": "Portuguese", "iso639_1": "pt", "iso639_2": "por"}
|
DZONGKHA = {"name": "Dzongkha", "iso639_1": "dz", "iso639_2": ["dzo"]}
|
||||||
ROMANIAN = {"name": "Romanian", "iso639_1": "ro", "iso639_2": "rum"}
|
ENGLISH = {"name": "English", "iso639_1": "en", "iso639_2": ["eng"]}
|
||||||
RUSSIAN = {"name": "Russian", "iso639_1": "ru", "iso639_2": "rus"}
|
ESPERANTO = {"name": "Esperanto", "iso639_1": "eo", "iso639_2": ["epo"]}
|
||||||
NORTHERN_SAMI = {"name": "Northern Sami", "iso639_1": "se", "iso639_2": "sme"}
|
ESTONIAN = {"name": "Estonian", "iso639_1": "et", "iso639_2": ["est"]}
|
||||||
SAMOAN = {"name": "Samoan", "iso639_1": "sm", "iso639_2": "smo"}
|
EWE = {"name": "Ewe", "iso639_1": "ee", "iso639_2": ["ewe"]}
|
||||||
SANGO = {"name": "Sango", "iso639_1": "sg", "iso639_2": "sag"}
|
FAROESE = {"name": "Faroese", "iso639_1": "fo", "iso639_2": ["fao"]}
|
||||||
SANSKRIT = {"name": "Sanskrit", "iso639_1": "sa", "iso639_2": "san"}
|
FIJIAN = {"name": "Fijian", "iso639_1": "fj", "iso639_2": ["fij"]}
|
||||||
SARDINIAN = {"name": "Sardinian", "iso639_1": "sc", "iso639_2": "srd"}
|
FINNISH = {"name": "Finnish", "iso639_1": "fi", "iso639_2": ["fin"]}
|
||||||
SERBIAN = {"name": "Serbian", "iso639_1": "sr", "iso639_2": "srp"}
|
FRENCH = {"name": "French", "iso639_1": "fr", "iso639_2": ["fra", "fre"]}
|
||||||
SHONA = {"name": "Shona", "iso639_1": "sn", "iso639_2": "sna"}
|
FULAH = {"name": "Fulah", "iso639_1": "ff", "iso639_2": ["ful"]}
|
||||||
SINDHI = {"name": "Sindhi", "iso639_1": "sd", "iso639_2": "snd"}
|
GALICIAN = {"name": "Galician", "iso639_1": "gl", "iso639_2": ["glg"]}
|
||||||
SINHALA = {"name": "Sinhala", "iso639_1": "si", "iso639_2": "sin"}
|
GANDA = {"name": "Ganda", "iso639_1": "lg", "iso639_2": ["lug"]}
|
||||||
SLOVAK = {"name": "Slovak", "iso639_1": "sk", "iso639_2": "slk"}
|
GEORGIAN = {"name": "Georgian", "iso639_1": "ka", "iso639_2": ["kat", "geo"]}
|
||||||
SLOVENIAN = {"name": "Slovenian", "iso639_1": "sl", "iso639_2": "slv"}
|
GERMAN = {"name": "German", "iso639_1": "de", "iso639_2": ["deu", "ger"]}
|
||||||
SOMALI = {"name": "Somali", "iso639_1": "so", "iso639_2": "som"}
|
GREEK = {"name": "Greek", "iso639_1": "el", "iso639_2": ["ell", "gre"]}
|
||||||
SOUTHERN_SOTHO = {"name": "Southern Sotho", "iso639_1": "st", "iso639_2": "sot"}
|
GUARANI = {"name": "Guarani", "iso639_1": "gn", "iso639_2": ["grn"]}
|
||||||
SPANISH = {"name": "Spanish", "iso639_1": "es", "iso639_2": "spa"}
|
GUJARATI = {"name": "Gujarati", "iso639_1": "gu", "iso639_2": ["guj"]}
|
||||||
SUNDANESE = {"name": "Sundanese", "iso639_1": "su", "iso639_2": "sun"}
|
HAITIAN = {"name": "Haitian", "iso639_1": "ht", "iso639_2": ["hat"]}
|
||||||
SWAHILI = {"name": "Swahili", "iso639_1": "sw", "iso639_2": "swa"}
|
HAUSA = {"name": "Hausa", "iso639_1": "ha", "iso639_2": ["hau"]}
|
||||||
SWATI = {"name": "Swati", "iso639_1": "ss", "iso639_2": "ssw"}
|
HEBREW = {"name": "Hebrew", "iso639_1": "he", "iso639_2": ["heb"]}
|
||||||
SWEDISH = {"name": "Swedish", "iso639_1": "sv", "iso639_2": "swe"}
|
HERERO = {"name": "Herero", "iso639_1": "hz", "iso639_2": ["her"]}
|
||||||
TAGALOG = {"name": "Tagalog", "iso639_1": "tl", "iso639_2": "tgl"}
|
HINDI = {"name": "Hindi", "iso639_1": "hi", "iso639_2": ["hin"]}
|
||||||
TAMIL = {"name": "Tamil", "iso639_1": "ta", "iso639_2": "tam"}
|
HIRI_MOTU = {"name": "Hiri Motu", "iso639_1": "ho", "iso639_2": ["hmo"]}
|
||||||
THAI = {"name": "Thai", "iso639_1": "th", "iso639_2": "tha"}
|
HUNGARIAN = {"name": "Hungarian", "iso639_1": "hu", "iso639_2": ["hun"]}
|
||||||
TURKISH = {"name": "Turkish", "iso639_1": "tr", "iso639_2": "tur"}
|
ICELANDIC = {"name": "Icelandic", "iso639_1": "is", "iso639_2": ["isl", "ice"]}
|
||||||
UKRAINIAN = {"name": "Ukrainian", "iso639_1": "uk", "iso639_2": "ukr"}
|
IDO = {"name": "Ido", "iso639_1": "io", "iso639_2": ["ido"]}
|
||||||
URDU = {"name": "Urdu", "iso639_1": "ur", "iso639_2": "urd"}
|
IGBO = {"name": "Igbo", "iso639_1": "ig", "iso639_2": ["ibo"]}
|
||||||
VIETNAMESE = {"name": "Vietnamese", "iso639_1": "vi", "iso639_2": "vie"}
|
INDONESIAN = {"name": "Indonesian", "iso639_1": "id", "iso639_2": ["ind"]}
|
||||||
WELSH = {"name": "Welsh", "iso639_1": "cy", "iso639_2": "wel"}
|
INTERLINGUA = {"name": "Interlingua", "iso639_1": "ia", "iso639_2": ["ina"]}
|
||||||
|
INTERLINGUE = {"name": "Interlingue", "iso639_1": "ie", "iso639_2": ["ile"]}
|
||||||
|
INUKTITUT = {"name": "Inuktitut", "iso639_1": "iu", "iso639_2": ["iku"]}
|
||||||
|
INUPIAQ = {"name": "Inupiaq", "iso639_1": "ik", "iso639_2": ["ipk"]}
|
||||||
|
IRISH = {"name": "Irish", "iso639_1": "ga", "iso639_2": ["gle"]}
|
||||||
|
ITALIAN = {"name": "Italian", "iso639_1": "it", "iso639_2": ["ita"]}
|
||||||
|
JAPANESE = {"name": "Japanese", "iso639_1": "ja", "iso639_2": ["jpn"]}
|
||||||
|
JAVANESE = {"name": "Javanese", "iso639_1": "jv", "iso639_2": ["jav"]}
|
||||||
|
KALAALLISUT = {"name": "Kalaallisut", "iso639_1": "kl", "iso639_2": ["kal"]}
|
||||||
|
KANNADA = {"name": "Kannada", "iso639_1": "kn", "iso639_2": ["kan"]}
|
||||||
|
KANURI = {"name": "Kanuri", "iso639_1": "kr", "iso639_2": ["kau"]}
|
||||||
|
KASHMIRI = {"name": "Kashmiri", "iso639_1": "ks", "iso639_2": ["kas"]}
|
||||||
|
KAZAKH = {"name": "Kazakh", "iso639_1": "kk", "iso639_2": ["kaz"]}
|
||||||
|
KHMER = {"name": "Khmer", "iso639_1": "km", "iso639_2": ["khm"]}
|
||||||
|
KIKUYU = {"name": "Kikuyu", "iso639_1": "ki", "iso639_2": ["kik"]}
|
||||||
|
KINYARWANDA = {"name": "Kinyarwanda", "iso639_1": "rw", "iso639_2": ["kin"]}
|
||||||
|
KIRGHIZ = {"name": "Kirghiz", "iso639_1": "ky", "iso639_2": ["kir"]}
|
||||||
|
KOMI = {"name": "Komi", "iso639_1": "kv", "iso639_2": ["kom"]}
|
||||||
|
KONGO = {"name": "Kongo", "iso639_1": "kg", "iso639_2": ["kon"]}
|
||||||
|
KOREAN = {"name": "Korean", "iso639_1": "ko", "iso639_2": ["kor"]}
|
||||||
|
KUANYAMA = {"name": "Kuanyama", "iso639_1": "kj", "iso639_2": ["kua"]}
|
||||||
|
KURDISH = {"name": "Kurdish", "iso639_1": "ku", "iso639_2": ["kur"]}
|
||||||
|
LAO = {"name": "Lao", "iso639_1": "lo", "iso639_2": ["lao"]}
|
||||||
|
LATIN = {"name": "Latin", "iso639_1": "la", "iso639_2": ["lat"]}
|
||||||
|
LATVIAN = {"name": "Latvian", "iso639_1": "lv", "iso639_2": ["lav"]}
|
||||||
|
LIMBURGAN = {"name": "Limburgan", "iso639_1": "li", "iso639_2": ["lim"]}
|
||||||
|
LINGALA = {"name": "Lingala", "iso639_1": "ln", "iso639_2": ["lin"]}
|
||||||
|
LITHUANIAN = {"name": "Lithuanian", "iso639_1": "lt", "iso639_2": ["lit"]}
|
||||||
|
LUBA_KATANGA = {"name": "Luba-Katanga", "iso639_1": "lu", "iso639_2": ["lub"]}
|
||||||
|
LUXEMBOURGISH = {"name": "Luxembourgish", "iso639_1": "lb", "iso639_2": ["ltz"]}
|
||||||
|
MACEDONIAN = {"name": "Macedonian", "iso639_1": "mk", "iso639_2": ["mkd", "mac"]}
|
||||||
|
MALAGASY = {"name": "Malagasy", "iso639_1": "mg", "iso639_2": ["mlg"]}
|
||||||
|
MALAY = {"name": "Malay", "iso639_1": "ms", "iso639_2": ["msa", "may"]}
|
||||||
|
MALAYALAM = {"name": "Malayalam", "iso639_1": "ml", "iso639_2": ["mal"]}
|
||||||
|
MALTESE = {"name": "Maltese", "iso639_1": "mt", "iso639_2": ["mlt"]}
|
||||||
|
MANX = {"name": "Manx", "iso639_1": "gv", "iso639_2": ["glv"]}
|
||||||
|
MAORI = {"name": "Maori", "iso639_1": "mi", "iso639_2": ["mri", "mao"]}
|
||||||
|
MARATHI = {"name": "Marathi", "iso639_1": "mr", "iso639_2": ["mar"]}
|
||||||
|
MARSHALLESE = {"name": "Marshallese", "iso639_1": "mh", "iso639_2": ["mah"]}
|
||||||
|
MONGOLIAN = {"name": "Mongolian", "iso639_1": "mn", "iso639_2": ["mon"]}
|
||||||
|
NAURU = {"name": "Nauru", "iso639_1": "na", "iso639_2": ["nau"]}
|
||||||
|
NAVAJO = {"name": "Navajo", "iso639_1": "nv", "iso639_2": ["nav"]}
|
||||||
|
NDONGA = {"name": "Ndonga", "iso639_1": "ng", "iso639_2": ["ndo"]}
|
||||||
|
NEPALI = {"name": "Nepali", "iso639_1": "ne", "iso639_2": ["nep"]}
|
||||||
|
NORTH_NDEBELE = {"name": "North Ndebele", "iso639_1": "nd", "iso639_2": ["nde"]}
|
||||||
|
NORTHERN_SAMI = {"name": "Northern Sami", "iso639_1": "se", "iso639_2": ["sme"]}
|
||||||
|
NORWEGIAN = {"name": "Norwegian", "iso639_1": "no", "iso639_2": ["nor"]}
|
||||||
|
NORWEGIAN_NYNORSK = {"name": "Nynorsk", "iso639_1": "nn", "iso639_2": ["nno"]}
|
||||||
|
OCCITAN = {"name": "Occitan", "iso639_1": "oc", "iso639_2": ["oci"]}
|
||||||
|
OJIBWA = {"name": "Ojibwa", "iso639_1": "oj", "iso639_2": ["oji"]}
|
||||||
|
ORIYA = {"name": "Oriya", "iso639_1": "or", "iso639_2": ["ori"]}
|
||||||
|
OROMO = {"name": "Oromo", "iso639_1": "om", "iso639_2": ["orm"]}
|
||||||
|
OSSETIAN = {"name": "Ossetian", "iso639_1": "os", "iso639_2": ["oss"]}
|
||||||
|
PALI = {"name": "Pali", "iso639_1": "pi", "iso639_2": ["pli"]}
|
||||||
|
PANJABI = {"name": "Panjabi", "iso639_1": "pa", "iso639_2": ["pan"]}
|
||||||
|
PERSIAN = {"name": "Persian", "iso639_1": "fa", "iso639_2": ["fas", "per"]}
|
||||||
|
POLISH = {"name": "Polish", "iso639_1": "pl", "iso639_2": ["pol"]}
|
||||||
|
PORTUGUESE = {"name": "Portuguese", "iso639_1": "pt", "iso639_2": ["por"]}
|
||||||
|
PUSHTO = {"name": "Pushto", "iso639_1": "ps", "iso639_2": ["pus"]}
|
||||||
|
QUECHUA = {"name": "Quechua", "iso639_1": "qu", "iso639_2": ["que"]}
|
||||||
|
ROMANIAN = {"name": "Romanian", "iso639_1": "ro", "iso639_2": ["ron", "rum"]}
|
||||||
|
ROMANSH = {"name": "Romansh", "iso639_1": "rm", "iso639_2": ["roh"]}
|
||||||
|
RUNDI = {"name": "Rundi", "iso639_1": "rn", "iso639_2": ["run"]}
|
||||||
|
RUSSIAN = {"name": "Russian", "iso639_1": "ru", "iso639_2": ["rus"]}
|
||||||
|
SAMOAN = {"name": "Samoan", "iso639_1": "sm", "iso639_2": ["smo"]}
|
||||||
|
SANGO = {"name": "Sango", "iso639_1": "sg", "iso639_2": ["sag"]}
|
||||||
|
SANSKRIT = {"name": "Sanskrit", "iso639_1": "sa", "iso639_2": ["san"]}
|
||||||
|
SARDINIAN = {"name": "Sardinian", "iso639_1": "sc", "iso639_2": ["srd"]}
|
||||||
|
SCOTTISH_GAELIC = {"name": "Scottish Gaelic", "iso639_1": "gd", "iso639_2": ["gla"]}
|
||||||
|
SERBIAN = {"name": "Serbian", "iso639_1": "sr", "iso639_2": ["srp"]}
|
||||||
|
SHONA = {"name": "Shona", "iso639_1": "sn", "iso639_2": ["sna"]}
|
||||||
|
SICHUAN_YI = {"name": "Sichuan Yi", "iso639_1": "ii", "iso639_2": ["iii"]}
|
||||||
|
SINDHI = {"name": "Sindhi", "iso639_1": "sd", "iso639_2": ["snd"]}
|
||||||
|
SINHALA = {"name": "Sinhala", "iso639_1": "si", "iso639_2": ["sin"]}
|
||||||
|
SLOVAK = {"name": "Slovak", "iso639_1": "sk", "iso639_2": ["slk", "slo"]}
|
||||||
|
SLOVENIAN = {"name": "Slovenian", "iso639_1": "sl", "iso639_2": ["slv"]}
|
||||||
|
SOMALI = {"name": "Somali", "iso639_1": "so", "iso639_2": ["som"]}
|
||||||
|
SOUTH_NDEBELE = {"name": "South Ndebele", "iso639_1": "nr", "iso639_2": ["nbl"]}
|
||||||
|
SOUTHERN_SOTHO = {"name": "Southern Sotho", "iso639_1": "st", "iso639_2": ["sot"]}
|
||||||
|
SPANISH = {"name": "Spanish", "iso639_1": "es", "iso639_2": ["spa"]}
|
||||||
|
SUNDANESE = {"name": "Sundanese", "iso639_1": "su", "iso639_2": ["sun"]}
|
||||||
|
SWAHILI = {"name": "Swahili", "iso639_1": "sw", "iso639_2": ["swa"]}
|
||||||
|
SWATI = {"name": "Swati", "iso639_1": "ss", "iso639_2": ["ssw"]}
|
||||||
|
SWEDISH = {"name": "Swedish", "iso639_1": "sv", "iso639_2": ["swe"]}
|
||||||
|
TAGALOG = {"name": "Tagalog", "iso639_1": "tl", "iso639_2": ["tgl"]}
|
||||||
|
TAHITIAN = {"name": "Tahitian", "iso639_1": "ty", "iso639_2": ["tah"]}
|
||||||
|
TAJIK = {"name": "Tajik", "iso639_1": "tg", "iso639_2": ["tgk"]}
|
||||||
|
TAMIL = {"name": "Tamil", "iso639_1": "ta", "iso639_2": ["tam"]}
|
||||||
|
TATAR = {"name": "Tatar", "iso639_1": "tt", "iso639_2": ["tat"]}
|
||||||
|
TELUGU = {"name": "Telugu", "iso639_1": "te", "iso639_2": ["tel"]}
|
||||||
|
THAI = {"name": "Thai", "iso639_1": "th", "iso639_2": ["tha"]}
|
||||||
|
TIBETAN = {"name": "Tibetan", "iso639_1": "bo", "iso639_2": ["bod", "tib"]}
|
||||||
|
TIGRINYA = {"name": "Tigrinya", "iso639_1": "ti", "iso639_2": ["tir"]}
|
||||||
|
TONGA = {"name": "Tonga", "iso639_1": "to", "iso639_2": ["ton"]}
|
||||||
|
TSONGA = {"name": "Tsonga", "iso639_1": "ts", "iso639_2": ["tso"]}
|
||||||
|
TSWANA = {"name": "Tswana", "iso639_1": "tn", "iso639_2": ["tsn"]}
|
||||||
|
TURKISH = {"name": "Turkish", "iso639_1": "tr", "iso639_2": ["tur"]}
|
||||||
|
TURKMEN = {"name": "Turkmen", "iso639_1": "tk", "iso639_2": ["tuk"]}
|
||||||
|
TWI = {"name": "Twi", "iso639_1": "tw", "iso639_2": ["twi"]}
|
||||||
|
UIGHUR = {"name": "Uighur", "iso639_1": "ug", "iso639_2": ["uig"]}
|
||||||
|
UKRAINIAN = {"name": "Ukrainian", "iso639_1": "uk", "iso639_2": ["ukr"]}
|
||||||
|
URDU = {"name": "Urdu", "iso639_1": "ur", "iso639_2": ["urd"]}
|
||||||
|
UZBEK = {"name": "Uzbek", "iso639_1": "uz", "iso639_2": ["uzb"]}
|
||||||
|
VENDA = {"name": "Venda", "iso639_1": "ve", "iso639_2": ["ven"]}
|
||||||
|
VIETNAMESE = {"name": "Vietnamese", "iso639_1": "vi", "iso639_2": ["vie"]}
|
||||||
|
VOLAPUK = {"name": "Volapük", "iso639_1": "vo", "iso639_2": ["vol"]}
|
||||||
|
WALLOON = {"name": "Walloon", "iso639_1": "wa", "iso639_2": ["wln"]}
|
||||||
|
WELSH = {"name": "Welsh", "iso639_1": "cy", "iso639_2": ["cym", "wel"]}
|
||||||
|
WESTERN_FRISIAN = {"name": "Western Frisian", "iso639_1": "fy", "iso639_2": ["fry"]}
|
||||||
|
WOLOF = {"name": "Wolof", "iso639_1": "wo", "iso639_2": ["wol"]}
|
||||||
|
XHOSA = {"name": "Xhosa", "iso639_1": "xh", "iso639_2": ["xho"]}
|
||||||
|
YIDDISH = {"name": "Yiddish", "iso639_1": "yi", "iso639_2": ["yid"]}
|
||||||
|
YORUBA = {"name": "Yoruba", "iso639_1": "yo", "iso639_2": ["yor"]}
|
||||||
|
ZHUANG = {"name": "Zhuang", "iso639_1": "za", "iso639_2": ["zha"]}
|
||||||
|
ZULU = {"name": "Zulu", "iso639_1": "zu", "iso639_2": ["zul"]}
|
||||||
|
|
||||||
UNDEFINED = {"name": "undefined", "iso639_1": "xx", "iso639_2": "und"}
|
FILIPINO = {"name": "Filipino", "iso639_1": "tl", "iso639_2": ["fil"]}
|
||||||
|
|
||||||
|
UNDEFINED = {"name": "undefined", "iso639_1": "xx", "iso639_2": ["und"]}
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -82,25 +199,22 @@ class IsoLanguage(Enum):
|
|||||||
closestMatches = difflib.get_close_matches(label, [l.value["name"] for l in IsoLanguage], n=1)
|
closestMatches = difflib.get_close_matches(label, [l.value["name"] for l in IsoLanguage], n=1)
|
||||||
|
|
||||||
if closestMatches:
|
if closestMatches:
|
||||||
foundLangs = [l for l in IsoLanguage if l.value['name'] == closestMatches[0]]
|
foundLangs = [l for l in IsoLanguage if l.value["name"] == closestMatches[0]]
|
||||||
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
|
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
|
||||||
else:
|
else:
|
||||||
return IsoLanguage.UNDEFINED
|
return IsoLanguage.UNDEFINED
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def findThreeLetter(theeLetter : str):
|
def findThreeLetter(theeLetter : str):
|
||||||
foundLangs = [l for l in IsoLanguage if l.value['iso639_2'] == str(theeLetter)]
|
foundLangs = [l for l in IsoLanguage if str(theeLetter) in l.value["iso639_2"]]
|
||||||
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
|
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
|
||||||
|
|
||||||
|
|
||||||
def label(self):
|
def label(self):
|
||||||
return str(self.value['name'])
|
return str(self.value["name"])
|
||||||
|
|
||||||
def twoLetter(self):
|
def twoLetter(self):
|
||||||
return str(self.value['iso639_1'])
|
return str(self.value["iso639_1"])
|
||||||
|
|
||||||
def threeLetter(self):
|
def threeLetter(self):
|
||||||
return str(self.value['iso639_2'])
|
return str(self.value["iso639_2"][0])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
68
src/ffx/logging_utils.py
Normal file
68
src/ffx/logging_utils.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
FFX_LOGGER_NAME = "FFX"
|
||||||
|
CONSOLE_HANDLER_NAME = "ffx-console"
|
||||||
|
FILE_HANDLER_NAME = "ffx-file"
|
||||||
|
|
||||||
|
|
||||||
|
def get_ffx_logger(name: str = FFX_LOGGER_NAME) -> logging.Logger:
|
||||||
|
logger = logging.getLogger(name)
|
||||||
|
logger.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
if not logger.handlers:
|
||||||
|
logger.addHandler(logging.NullHandler())
|
||||||
|
|
||||||
|
return logger
|
||||||
|
|
||||||
|
|
||||||
|
def configure_ffx_logger(
|
||||||
|
log_file_path: str,
|
||||||
|
file_level: int,
|
||||||
|
console_level: int,
|
||||||
|
name: str = FFX_LOGGER_NAME,
|
||||||
|
) -> logging.Logger:
|
||||||
|
logger = get_ffx_logger(name)
|
||||||
|
logger.propagate = False
|
||||||
|
|
||||||
|
for handler in list(logger.handlers):
|
||||||
|
if isinstance(handler, logging.NullHandler):
|
||||||
|
logger.removeHandler(handler)
|
||||||
|
|
||||||
|
console_handler = next(
|
||||||
|
(handler for handler in logger.handlers if handler.get_name() == CONSOLE_HANDLER_NAME),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
if console_handler is None:
|
||||||
|
console_handler = logging.StreamHandler()
|
||||||
|
console_handler.set_name(CONSOLE_HANDLER_NAME)
|
||||||
|
logger.addHandler(console_handler)
|
||||||
|
|
||||||
|
console_handler.setLevel(console_level)
|
||||||
|
console_handler.setFormatter(logging.Formatter("%(message)s"))
|
||||||
|
|
||||||
|
normalized_log_path = os.path.abspath(log_file_path)
|
||||||
|
file_handler = next(
|
||||||
|
(handler for handler in logger.handlers if handler.get_name() == FILE_HANDLER_NAME),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
if (
|
||||||
|
file_handler is not None
|
||||||
|
and os.path.abspath(file_handler.baseFilename) != normalized_log_path
|
||||||
|
):
|
||||||
|
logger.removeHandler(file_handler)
|
||||||
|
file_handler.close()
|
||||||
|
file_handler = None
|
||||||
|
|
||||||
|
if file_handler is None:
|
||||||
|
file_handler = logging.FileHandler(normalized_log_path)
|
||||||
|
file_handler.set_name(FILE_HANDLER_NAME)
|
||||||
|
logger.addHandler(file_handler)
|
||||||
|
|
||||||
|
file_handler.setLevel(file_level)
|
||||||
|
file_handler.setFormatter(
|
||||||
|
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
||||||
|
)
|
||||||
|
|
||||||
|
return logger
|
||||||
@@ -25,14 +25,14 @@ class MediaController():
|
|||||||
pid = int(patternId)
|
pid = int(patternId)
|
||||||
|
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(Pattern).filter(Pattern.id == pid)
|
pattern = s.query(Pattern).filter(Pattern.id == pid).first()
|
||||||
|
|
||||||
if q.count():
|
if pattern is not None:
|
||||||
pattern = q.first
|
|
||||||
|
|
||||||
for mediaTagKey, mediaTagValue in mediaDescriptor.getTags():
|
for mediaTagKey, mediaTagValue in mediaDescriptor.getTags():
|
||||||
self.__tac.updateMediaTag(pid, mediaTagKey, mediaTagValue)
|
self.__tac.updateMediaTag(pid, mediaTagKey, mediaTagValue)
|
||||||
for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
|
# for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
|
||||||
|
for trackDescriptor in mediaDescriptor.getTrackDescriptors():
|
||||||
self.__tc.addTrack(trackDescriptor, patternId = pid)
|
self.__tc.addTrack(trackDescriptor, patternId = pid)
|
||||||
|
|
||||||
s.commit()
|
s.commit()
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import os, re, click, logging
|
import os, re, click
|
||||||
|
|
||||||
from typing import List, Self
|
from typing import List, Self
|
||||||
|
|
||||||
@@ -9,8 +9,7 @@ from ffx.track_disposition import TrackDisposition
|
|||||||
from ffx.track_codec import TrackCodec
|
from ffx.track_codec import TrackCodec
|
||||||
|
|
||||||
from ffx.track_descriptor import TrackDescriptor
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.logging_utils import get_ffx_logger
|
||||||
from ffx.helper import dictDiff, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY
|
|
||||||
|
|
||||||
|
|
||||||
class MediaDescriptor:
|
class MediaDescriptor:
|
||||||
@@ -22,6 +21,7 @@ class MediaDescriptor:
|
|||||||
TRACKS_KEY = "tracks"
|
TRACKS_KEY = "tracks"
|
||||||
|
|
||||||
TRACK_DESCRIPTOR_LIST_KEY = "track_descriptors"
|
TRACK_DESCRIPTOR_LIST_KEY = "track_descriptors"
|
||||||
|
ATTACHMENT_DESCRIPTOR_LIST_KEY = "attachment_descriptors"
|
||||||
CLEAR_TAGS_FLAG_KEY = "clear_tags"
|
CLEAR_TAGS_FLAG_KEY = "clear_tags"
|
||||||
|
|
||||||
FFPROBE_DISPOSITION_KEY = "disposition"
|
FFPROBE_DISPOSITION_KEY = "disposition"
|
||||||
@@ -31,7 +31,9 @@ class MediaDescriptor:
|
|||||||
#407 remove as well
|
#407 remove as well
|
||||||
EXCLUDED_MEDIA_TAGS = ["creation_time"]
|
EXCLUDED_MEDIA_TAGS = ["creation_time"]
|
||||||
|
|
||||||
SEASON_EPISODE_STREAM_LANGUAGE_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
|
SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
|
||||||
|
STREAM_LANGUAGE_DISPOSITIONS_MATCH = '([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
|
||||||
|
|
||||||
SUBTITLE_FILE_EXTENSION = 'vtt'
|
SUBTITLE_FILE_EXTENSION = 'vtt'
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
@@ -45,8 +47,7 @@ class MediaDescriptor:
|
|||||||
self.__logger = self.__context['logger']
|
self.__logger = self.__context['logger']
|
||||||
else:
|
else:
|
||||||
self.__context = {}
|
self.__context = {}
|
||||||
self.__logger = logging.getLogger('FFX')
|
self.__logger = get_ffx_logger()
|
||||||
self.__logger.addHandler(logging.NullHandler())
|
|
||||||
|
|
||||||
if MediaDescriptor.TAGS_KEY in kwargs.keys():
|
if MediaDescriptor.TAGS_KEY in kwargs.keys():
|
||||||
if type(kwargs[MediaDescriptor.TAGS_KEY]) is not dict:
|
if type(kwargs[MediaDescriptor.TAGS_KEY]) is not dict:
|
||||||
@@ -69,9 +70,9 @@ class MediaDescriptor:
|
|||||||
raise TypeError(
|
raise TypeError(
|
||||||
f"TrackDesciptor.__init__(): All elements of argument list {MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY} are required to be of type TrackDescriptor"
|
f"TrackDesciptor.__init__(): All elements of argument list {MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY} are required to be of type TrackDescriptor"
|
||||||
)
|
)
|
||||||
self.__trackDescriptors = kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY]
|
self.__trackDescriptors: List[TrackDescriptor] = kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY]
|
||||||
else:
|
else:
|
||||||
self.__trackDescriptors = []
|
self.__trackDescriptors: List[TrackDescriptor] = []
|
||||||
|
|
||||||
def setTrackLanguage(self, language: str, index: int, trackType: TrackType = None):
|
def setTrackLanguage(self, language: str, index: int, trackType: TrackType = None):
|
||||||
|
|
||||||
@@ -107,14 +108,16 @@ class MediaDescriptor:
|
|||||||
|
|
||||||
|
|
||||||
def setDefaultSubTrack(self, trackType: TrackType, subIndex: int):
|
def setDefaultSubTrack(self, trackType: TrackType, subIndex: int):
|
||||||
for t in self.getAllTrackDescriptors():
|
# for t in self.getAllTrackDescriptors():
|
||||||
|
for t in self.getTrackDescriptors():
|
||||||
if t.getType() == trackType:
|
if t.getType() == trackType:
|
||||||
t.setDispositionFlag(
|
t.setDispositionFlag(
|
||||||
TrackDisposition.DEFAULT, t.getSubIndex() == int(subIndex)
|
TrackDisposition.DEFAULT, t.getSubIndex() == int(subIndex)
|
||||||
)
|
)
|
||||||
|
|
||||||
def setForcedSubTrack(self, trackType: TrackType, subIndex: int):
|
def setForcedSubTrack(self, trackType: TrackType, subIndex: int):
|
||||||
for t in self.getAllTrackDescriptors():
|
# for t in self.getAllTrackDescriptors():
|
||||||
|
for t in self.getTrackDescriptors():
|
||||||
if t.getType() == trackType:
|
if t.getType() == trackType:
|
||||||
t.setDispositionFlag(
|
t.setDispositionFlag(
|
||||||
TrackDisposition.FORCED, t.getSubIndex() == int(subIndex)
|
TrackDisposition.FORCED, t.getSubIndex() == int(subIndex)
|
||||||
@@ -190,7 +193,8 @@ class MediaDescriptor:
|
|||||||
|
|
||||||
|
|
||||||
def applySourceIndices(self, sourceMediaDescriptor: Self):
|
def applySourceIndices(self, sourceMediaDescriptor: Self):
|
||||||
sourceTrackDescriptors = sourceMediaDescriptor.getAllTrackDescriptors()
|
# sourceTrackDescriptors = sourceMediaDescriptor.getAllTrackDescriptors()
|
||||||
|
sourceTrackDescriptors = sourceMediaDescriptor.getTrackDescriptors()
|
||||||
|
|
||||||
numTrackDescriptors = len(self.__trackDescriptors)
|
numTrackDescriptors = len(self.__trackDescriptors)
|
||||||
if len(sourceTrackDescriptors) != numTrackDescriptors:
|
if len(sourceTrackDescriptors) != numTrackDescriptors:
|
||||||
@@ -203,7 +207,7 @@ class MediaDescriptor:
|
|||||||
def rearrangeTrackDescriptors(self, newOrder: List[int]):
|
def rearrangeTrackDescriptors(self, newOrder: List[int]):
|
||||||
if len(newOrder) != len(self.__trackDescriptors):
|
if len(newOrder) != len(self.__trackDescriptors):
|
||||||
raise ValueError('Length of list with reordered indices does not match number of track descriptors')
|
raise ValueError('Length of list with reordered indices does not match number of track descriptors')
|
||||||
reorderedTrackDescriptors = {}
|
reorderedTrackDescriptors = []
|
||||||
for oldIndex in newOrder:
|
for oldIndex in newOrder:
|
||||||
reorderedTrackDescriptors.append(self.__trackDescriptors[oldIndex])
|
reorderedTrackDescriptors.append(self.__trackDescriptors[oldIndex])
|
||||||
self.__trackDescriptors = reorderedTrackDescriptors
|
self.__trackDescriptors = reorderedTrackDescriptors
|
||||||
@@ -285,9 +289,9 @@ class MediaDescriptor:
|
|||||||
tdList[trackIndex].setIndex(trackIndex)
|
tdList[trackIndex].setIndex(trackIndex)
|
||||||
|
|
||||||
|
|
||||||
def getAllTrackDescriptors(self):
|
# def getAllTrackDescriptors(self):
|
||||||
"""Returns all track descriptors sorted by type: video, audio then subtitles"""
|
# """Returns all track descriptors sorted by type: video, audio then subtitles"""
|
||||||
return self.getVideoTracks() + self.getAudioTracks() + self.getSubtitleTracks()
|
# return self.getVideoTracks() + self.getAudioTracks() + self.getSubtitleTracks()
|
||||||
|
|
||||||
|
|
||||||
def getTrackDescriptors(self,
|
def getTrackDescriptors(self,
|
||||||
@@ -317,82 +321,16 @@ class MediaDescriptor:
|
|||||||
if s.getType() == TrackType.SUBTITLE
|
if s.getType() == TrackType.SUBTITLE
|
||||||
]
|
]
|
||||||
|
|
||||||
|
def getAttachmentTracks(self) -> List[TrackDescriptor]:
|
||||||
def compare(self, vsMediaDescriptor: Self):
|
return [
|
||||||
|
s
|
||||||
if not isinstance(vsMediaDescriptor, self.__class__):
|
for s in self.__trackDescriptors
|
||||||
self.__logger.error(f"MediaDescriptor.compare(): Argument is required to be of type {self.__class__}")
|
if s.getType() == TrackType.ATTACHMENT
|
||||||
raise click.Abort()
|
]
|
||||||
|
|
||||||
vsTags = vsMediaDescriptor.getTags()
|
|
||||||
tags = self.getTags()
|
|
||||||
|
|
||||||
# HINT: Some tags differ per file, for example creation_time, so these are removed before diff
|
|
||||||
for emt in MediaDescriptor.EXCLUDED_MEDIA_TAGS:
|
|
||||||
if emt in tags.keys():
|
|
||||||
del tags[emt]
|
|
||||||
if emt in vsTags.keys():
|
|
||||||
del vsTags[emt]
|
|
||||||
|
|
||||||
tagsDiff = dictDiff(vsTags, tags)
|
|
||||||
|
|
||||||
compareResult = {}
|
|
||||||
|
|
||||||
if tagsDiff:
|
|
||||||
compareResult[MediaDescriptor.TAGS_KEY] = tagsDiff
|
|
||||||
|
|
||||||
# Target track configuration (from DB)
|
|
||||||
# tracks = self.getAllTrackDescriptors()
|
|
||||||
tracks = self.getAllTrackDescriptors() # filtern
|
|
||||||
numTracks = len(tracks)
|
|
||||||
|
|
||||||
# Current track configuration (of file)
|
|
||||||
vsTracks = vsMediaDescriptor.getAllTrackDescriptors()
|
|
||||||
numVsTracks = len(vsTracks)
|
|
||||||
|
|
||||||
maxNumOfTracks = max(numVsTracks, numTracks)
|
|
||||||
|
|
||||||
trackCompareResult = {}
|
|
||||||
|
|
||||||
for tp in range(maxNumOfTracks):
|
|
||||||
|
|
||||||
#!
|
|
||||||
vsTrackIndex = tracks[tp].getSourceIndex()
|
|
||||||
|
|
||||||
# Will trigger if tracks are missing in file
|
|
||||||
if tp > (numVsTracks - 1):
|
|
||||||
if DIFF_ADDED_KEY not in trackCompareResult.keys():
|
|
||||||
trackCompareResult[DIFF_ADDED_KEY] = set()
|
|
||||||
trackCompareResult[DIFF_ADDED_KEY].add(tracks[tp].getIndex())
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Will trigger if tracks are missing in DB definition
|
|
||||||
# New tracks will be added per update via this way
|
|
||||||
if tp > (numTracks - 1):
|
|
||||||
if DIFF_REMOVED_KEY not in trackCompareResult.keys():
|
|
||||||
trackCompareResult[DIFF_REMOVED_KEY] = {}
|
|
||||||
trackCompareResult[DIFF_REMOVED_KEY][
|
|
||||||
vsTracks[vsTrackIndex].getIndex()
|
|
||||||
] = vsTracks[vsTrackIndex]
|
|
||||||
continue
|
|
||||||
|
|
||||||
# assumption is made here that the track order will not change for all files of a sequence
|
|
||||||
trackDiff = tracks[tp].compare(vsTracks[vsTrackIndex])
|
|
||||||
|
|
||||||
if trackDiff:
|
|
||||||
if DIFF_CHANGED_KEY not in trackCompareResult.keys():
|
|
||||||
trackCompareResult[DIFF_CHANGED_KEY] = {}
|
|
||||||
trackCompareResult[DIFF_CHANGED_KEY][
|
|
||||||
vsTracks[vsTrackIndex].getIndex()
|
|
||||||
] = trackDiff
|
|
||||||
|
|
||||||
if trackCompareResult:
|
|
||||||
compareResult[MediaDescriptor.TRACKS_KEY] = trackCompareResult
|
|
||||||
|
|
||||||
return compareResult
|
|
||||||
|
|
||||||
|
|
||||||
def getImportFileTokens(self, use_sub_index: bool = True):
|
def getImportFileTokens(self, use_sub_index: bool = True):
|
||||||
|
"""Generate ffmpeg import options for external stream files"""
|
||||||
|
|
||||||
importFileTokens = []
|
importFileTokens = []
|
||||||
|
|
||||||
@@ -415,25 +353,47 @@ class MediaDescriptor:
|
|||||||
return importFileTokens
|
return importFileTokens
|
||||||
|
|
||||||
|
|
||||||
def getInputMappingTokens(self, use_sub_index: bool = True, only_video: bool = False):
|
def getInputMappingTokens(self,
|
||||||
|
use_sub_index: bool = True,
|
||||||
|
only_video: bool = False,
|
||||||
|
sourceMediaDescriptor: Self = None):
|
||||||
"""Tracks must be reordered for source index order"""
|
"""Tracks must be reordered for source index order"""
|
||||||
|
|
||||||
inputMappingTokens = []
|
inputMappingTokens = []
|
||||||
|
|
||||||
|
sortedTrackDescriptors = sorted(self.__trackDescriptors, key=lambda d: d.getIndex())
|
||||||
|
sourceTrackDescriptorsByIndex = {
|
||||||
|
td.getIndex(): td
|
||||||
|
for td in (
|
||||||
|
sourceMediaDescriptor.getTrackDescriptors()
|
||||||
|
if sourceMediaDescriptor is not None
|
||||||
|
else sortedTrackDescriptors
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
# raise click.ClickException(' '.join([f"\nindex={td.getIndex()} subIndex={td.getSubIndex()} srcIndex={td.getSourceIndex()} type={td.getType().label()}" for td in self.__trackDescriptors]))
|
||||||
|
|
||||||
filePointer = 1
|
filePointer = 1
|
||||||
for trackIndex in range(len(self.__trackDescriptors)):
|
for trackIndex in range(len(sortedTrackDescriptors)):
|
||||||
|
|
||||||
td = self.__trackDescriptors[trackIndex]
|
td: TrackDescriptor = sortedTrackDescriptors[trackIndex]
|
||||||
|
|
||||||
stdi = self.__trackDescriptors[td.getSourceIndex()].getIndex()
|
#HINT: Attached thumbnails are not supported by .webm container format
|
||||||
stdsi = self.__trackDescriptors[td.getSourceIndex()].getSubIndex()
|
if td.getCodec() != TrackCodec.PNG:
|
||||||
|
|
||||||
# sti = self.__trackDescriptors[trackIndex].getSourceIndex()
|
sourceTrackDescriptor = sourceTrackDescriptorsByIndex.get(td.getSourceIndex())
|
||||||
# sotd = sourceOrderTrackDescriptors[sti]
|
if sourceTrackDescriptor is None:
|
||||||
|
raise ValueError(f"No source track descriptor found for source index {td.getSourceIndex()}")
|
||||||
|
|
||||||
|
stdi = sourceTrackDescriptor.getIndex()
|
||||||
|
stdsi = sourceTrackDescriptor.getSubIndex()
|
||||||
|
|
||||||
trackType = td.getType()
|
trackType = td.getType()
|
||||||
|
trackCodec = td.getCodec()
|
||||||
|
|
||||||
|
if (trackType != TrackType.ATTACHMENT
|
||||||
|
and (trackType == TrackType.VIDEO or not only_video)):
|
||||||
|
|
||||||
if (trackType == TrackType.VIDEO or not only_video):
|
|
||||||
|
|
||||||
importedFilePath = td.getExternalSourceFilePath()
|
importedFilePath = td.getExternalSourceFilePath()
|
||||||
|
|
||||||
@@ -449,42 +409,59 @@ class MediaDescriptor:
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
|
|
||||||
if td.getCodec() != TrackCodec.PGS:
|
if not trackCodec in [TrackCodec.PGS, TrackCodec.VOBSUB]:
|
||||||
inputMappingTokens += [
|
inputMappingTokens += [
|
||||||
"-map",
|
"-map",
|
||||||
f"0:{trackType.indicator()}:{stdsi}",
|
f"0:{trackType.indicator()}:{stdsi}",
|
||||||
]
|
]
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if td.getCodec() != TrackCodec.PGS:
|
if not trackCodec in [TrackCodec.PGS, TrackCodec.VOBSUB]:
|
||||||
inputMappingTokens += ["-map", f"0:{stdi}"]
|
inputMappingTokens += ["-map", f"0:{stdi}"]
|
||||||
|
|
||||||
|
if sourceMediaDescriptor:
|
||||||
|
fontDescriptors = [ftd for ftd in sourceMediaDescriptor.getAttachmentTracks()
|
||||||
|
if ftd.getCodec() == TrackCodec.TTF]
|
||||||
|
else:
|
||||||
|
fontDescriptors = [ftd for ftd in self.__trackDescriptors
|
||||||
|
if ftd.getType() == TrackType.ATTACHMENT
|
||||||
|
and ftd.getCodec() == TrackCodec.TTF]
|
||||||
|
|
||||||
|
for ad in sorted(fontDescriptors, key=lambda d: d.getIndex()):
|
||||||
|
inputMappingTokens += ["-map", f"0:{ad.getIndex()}"]
|
||||||
|
|
||||||
return inputMappingTokens
|
return inputMappingTokens
|
||||||
|
|
||||||
|
|
||||||
def searchSubtitleFiles(self, searchDirectory, prefix):
|
def searchSubtitleFiles(self, searchDirectory, prefix):
|
||||||
|
|
||||||
sesl_match = re.compile(MediaDescriptor.SEASON_EPISODE_STREAM_LANGUAGE_MATCH)
|
sesld_match = re.compile(f"{prefix}_{MediaDescriptor.SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH}")
|
||||||
|
sld_match = re.compile(f"{prefix}_{MediaDescriptor.STREAM_LANGUAGE_DISPOSITIONS_MATCH}")
|
||||||
|
|
||||||
subtitleFileDescriptors = []
|
subtitleFileDescriptors = []
|
||||||
|
|
||||||
for subtitleFilename in os.listdir(searchDirectory):
|
for subtitleFilename in os.listdir(searchDirectory):
|
||||||
if subtitleFilename.startswith(prefix) and subtitleFilename.endswith(
|
if subtitleFilename.startswith(prefix) and subtitleFilename.endswith(
|
||||||
"." + MediaDescriptor.SUBTITLE_FILE_EXTENSION
|
"." + MediaDescriptor.SUBTITLE_FILE_EXTENSION
|
||||||
):
|
):
|
||||||
sesl_result = sesl_match.search(subtitleFilename)
|
|
||||||
if sesl_result is not None:
|
sesld_result = sesld_match.search(subtitleFilename)
|
||||||
|
sld_result = None if not sesld_result is None else sld_match.search(subtitleFilename)
|
||||||
|
|
||||||
|
if not sesld_result is None:
|
||||||
|
|
||||||
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
|
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
|
||||||
if os.path.isfile(subtitleFilePath):
|
if os.path.isfile(subtitleFilePath):
|
||||||
|
|
||||||
subtitleFileDescriptor = {}
|
subtitleFileDescriptor = {}
|
||||||
subtitleFileDescriptor["path"] = subtitleFilePath
|
subtitleFileDescriptor["path"] = subtitleFilePath
|
||||||
subtitleFileDescriptor["season"] = int(sesl_result.group(1))
|
subtitleFileDescriptor["season"] = int(sesld_result.group(1))
|
||||||
subtitleFileDescriptor["episode"] = int(sesl_result.group(2))
|
subtitleFileDescriptor["episode"] = int(sesld_result.group(2))
|
||||||
subtitleFileDescriptor["index"] = int(sesl_result.group(3))
|
subtitleFileDescriptor["index"] = int(sesld_result.group(3))
|
||||||
subtitleFileDescriptor["language"] = sesl_result.group(4)
|
subtitleFileDescriptor["language"] = sesld_result.group(4)
|
||||||
|
|
||||||
dispSet = set()
|
dispSet = set()
|
||||||
dispCaptGroups = sesl_result.groups()
|
dispCaptGroups = sesld_result.groups()
|
||||||
numCaptGroups = len(dispCaptGroups)
|
numCaptGroups = len(dispCaptGroups)
|
||||||
if numCaptGroups > 4:
|
if numCaptGroups > 4:
|
||||||
for groupIndex in range(numCaptGroups - 4):
|
for groupIndex in range(numCaptGroups - 4):
|
||||||
@@ -495,6 +472,29 @@ class MediaDescriptor:
|
|||||||
|
|
||||||
subtitleFileDescriptors.append(subtitleFileDescriptor)
|
subtitleFileDescriptors.append(subtitleFileDescriptor)
|
||||||
|
|
||||||
|
if not sld_result is None:
|
||||||
|
|
||||||
|
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
|
||||||
|
if os.path.isfile(subtitleFilePath):
|
||||||
|
|
||||||
|
subtitleFileDescriptor = {}
|
||||||
|
subtitleFileDescriptor["path"] = subtitleFilePath
|
||||||
|
subtitleFileDescriptor["index"] = int(sld_result.group(1))
|
||||||
|
subtitleFileDescriptor["language"] = sld_result.group(2)
|
||||||
|
|
||||||
|
dispSet = set()
|
||||||
|
dispCaptGroups = sld_result.groups()
|
||||||
|
numCaptGroups = len(dispCaptGroups)
|
||||||
|
if numCaptGroups > 2:
|
||||||
|
for groupIndex in range(numCaptGroups - 2):
|
||||||
|
disp = TrackDisposition.fromIndicator(dispCaptGroups[groupIndex + 2])
|
||||||
|
if disp is not None:
|
||||||
|
dispSet.add(disp)
|
||||||
|
subtitleFileDescriptor["disposition_set"] = dispSet
|
||||||
|
|
||||||
|
subtitleFileDescriptors.append(subtitleFileDescriptor)
|
||||||
|
|
||||||
|
|
||||||
self.__logger.debug(f"searchSubtitleFiles(): Available subtitle files {subtitleFileDescriptors}")
|
self.__logger.debug(f"searchSubtitleFiles(): Available subtitle files {subtitleFileDescriptors}")
|
||||||
|
|
||||||
return subtitleFileDescriptors
|
return subtitleFileDescriptors
|
||||||
@@ -518,7 +518,11 @@ class MediaDescriptor:
|
|||||||
[
|
[
|
||||||
d
|
d
|
||||||
for d in availableFileSubtitleDescriptors
|
for d in availableFileSubtitleDescriptors
|
||||||
if d["season"] == int(season) and d["episode"] == int(episode)
|
if ((season == -1 and episode == -1)
|
||||||
|
or (
|
||||||
|
d.get("season") == int(season)
|
||||||
|
and d.get("episode") == int(episode)
|
||||||
|
))
|
||||||
],
|
],
|
||||||
key=lambda d: d["index"],
|
key=lambda d: d["index"],
|
||||||
)
|
)
|
||||||
@@ -533,15 +537,20 @@ class MediaDescriptor:
|
|||||||
if matchingSubtitleTrackDescriptor:
|
if matchingSubtitleTrackDescriptor:
|
||||||
# click.echo(f"Found matching subtitle file {msfd["path"]}\n")
|
# click.echo(f"Found matching subtitle file {msfd["path"]}\n")
|
||||||
self.__logger.debug(f"importSubtitles(): Found matching subtitle file {msfd['path']}")
|
self.__logger.debug(f"importSubtitles(): Found matching subtitle file {msfd['path']}")
|
||||||
matchingSubtitleTrackDescriptor[0].setExternalSourceFilePath(msfd["path"])
|
matchingTrack = matchingSubtitleTrackDescriptor[0]
|
||||||
|
matchingTrack.setExternalSourceFilePath(msfd["path"])
|
||||||
|
|
||||||
# TODO: Check if useful
|
# Prefer metadata coming from the external single-track source when
|
||||||
# matchingSubtitleTrackDescriptor[0].setDispositionSet(msfd["disposition_set"])
|
# it is provided explicitly by the filename contract.
|
||||||
|
matchingTrack.getTags()["language"] = msfd["language"]
|
||||||
|
if msfd["disposition_set"]:
|
||||||
|
matchingTrack.setDispositionSet(msfd["disposition_set"])
|
||||||
|
|
||||||
|
|
||||||
def getConfiguration(self, label: str = ''):
|
def getConfiguration(self, label: str = ''):
|
||||||
yield f"--- {label if label else 'MediaDescriptor '+str(id(self))} {' '.join([str(k)+'='+str(v) for k,v in self.__mediaTags.items()])}"
|
yield f"--- {label if label else 'MediaDescriptor '+str(id(self))} {' '.join([str(k)+'='+str(v) for k,v in self.__mediaTags.items()])}"
|
||||||
for td in self.getAllTrackDescriptors():
|
# for td in self.getAllTrackDescriptors():
|
||||||
|
for td in self.getTrackDescriptors():
|
||||||
yield (f"{td.getIndex()}:{td.getType().indicator()}:{td.getSubIndex()} "
|
yield (f"{td.getIndex()}:{td.getType().indicator()}:{td.getSubIndex()} "
|
||||||
+ '|'.join([d.indicator() for d in td.getDispositionSet()])
|
+ '|'.join([d.indicator() for d in td.getDispositionSet()])
|
||||||
+ ' ' + ' '.join([str(k)+'='+str(v) for k,v in td.getTags().items()]))
|
+ ' ' + ' '.join([str(k)+'='+str(v) for k,v in td.getTags().items()]))
|
||||||
|
|||||||
346
src/ffx/media_descriptor_change_set.py
Normal file
346
src/ffx/media_descriptor_change_set.py
Normal file
@@ -0,0 +1,346 @@
|
|||||||
|
import click
|
||||||
|
|
||||||
|
from ffx.iso_language import IsoLanguage
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
|
||||||
|
from ffx.helper import dictDiff, setDiff, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY, DIFF_UNCHANGED_KEY
|
||||||
|
|
||||||
|
from ffx.track_codec import TrackCodec
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
|
||||||
|
|
||||||
|
class MediaDescriptorChangeSet():
|
||||||
|
|
||||||
|
TAGS_KEY = "tags"
|
||||||
|
TRACKS_KEY = "tracks"
|
||||||
|
DISPOSITION_SET_KEY = "disposition_set"
|
||||||
|
|
||||||
|
TRACK_DESCRIPTOR_KEY = "track_descriptor"
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context,
|
||||||
|
targetMediaDescriptor: MediaDescriptor = None,
|
||||||
|
sourceMediaDescriptor: MediaDescriptor = None):
|
||||||
|
|
||||||
|
self.__context = context
|
||||||
|
self.__logger = context['logger']
|
||||||
|
|
||||||
|
self.__configurationData = self.__context['config'].getData()
|
||||||
|
|
||||||
|
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
|
||||||
|
|
||||||
|
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
|
||||||
|
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
|
||||||
|
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
|
||||||
|
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'remove' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'ignore' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
|
||||||
|
|
||||||
|
self.__targetTrackDescriptors = targetMediaDescriptor.getTrackDescriptors() if targetMediaDescriptor is not None else []
|
||||||
|
self.__sourceTrackDescriptors = sourceMediaDescriptor.getTrackDescriptors() if sourceMediaDescriptor is not None else []
|
||||||
|
self.__targetTrackDescriptorsByIndex = {
|
||||||
|
trackDescriptor.getIndex(): trackDescriptor
|
||||||
|
for trackDescriptor in self.__targetTrackDescriptors
|
||||||
|
}
|
||||||
|
self.__sourceTrackDescriptorsByIndex = {
|
||||||
|
trackDescriptor.getIndex(): trackDescriptor
|
||||||
|
for trackDescriptor in self.__sourceTrackDescriptors
|
||||||
|
}
|
||||||
|
|
||||||
|
targetMediaTags = targetMediaDescriptor.getTags() if targetMediaDescriptor is not None else {}
|
||||||
|
sourceMediaTags = sourceMediaDescriptor.getTags() if sourceMediaDescriptor is not None else {}
|
||||||
|
|
||||||
|
|
||||||
|
self.__changeSetObj = {}
|
||||||
|
|
||||||
|
#if targetMediaDescriptor is not None:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#!!#
|
||||||
|
tagsDiff = dictDiff(sourceMediaTags,
|
||||||
|
targetMediaTags,
|
||||||
|
ignoreKeys=self.__ignoreGlobalKeys,
|
||||||
|
removeKeys=self.__removeGlobalKeys)
|
||||||
|
|
||||||
|
if tagsDiff:
|
||||||
|
self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY] = tagsDiff
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
self.__numTargetTracks = len(self.__targetTrackDescriptors)
|
||||||
|
|
||||||
|
# Current track configuration (of file)
|
||||||
|
|
||||||
|
self.__numSourceTracks = len(self.__sourceTrackDescriptors)
|
||||||
|
|
||||||
|
trackCompareResult = {}
|
||||||
|
|
||||||
|
for targetTrackDescriptor in self.__targetTrackDescriptors:
|
||||||
|
sourceTrackDescriptor = self.__sourceTrackDescriptorsByIndex.get(
|
||||||
|
targetTrackDescriptor.getSourceIndex()
|
||||||
|
)
|
||||||
|
|
||||||
|
if sourceTrackDescriptor is None:
|
||||||
|
if DIFF_ADDED_KEY not in trackCompareResult.keys():
|
||||||
|
trackCompareResult[DIFF_ADDED_KEY] = {}
|
||||||
|
trackCompareResult[DIFF_ADDED_KEY][targetTrackDescriptor.getIndex()] = targetTrackDescriptor
|
||||||
|
continue
|
||||||
|
|
||||||
|
trackDiff = self.compareTracks(targetTrackDescriptor, sourceTrackDescriptor)
|
||||||
|
if trackDiff:
|
||||||
|
if DIFF_CHANGED_KEY not in trackCompareResult.keys():
|
||||||
|
trackCompareResult[DIFF_CHANGED_KEY] = {}
|
||||||
|
trackCompareResult[DIFF_CHANGED_KEY][targetTrackDescriptor.getIndex()] = trackDiff
|
||||||
|
|
||||||
|
targetSourceIndices = {
|
||||||
|
targetTrackDescriptor.getSourceIndex()
|
||||||
|
for targetTrackDescriptor in self.__targetTrackDescriptors
|
||||||
|
}
|
||||||
|
for sourceTrackDescriptor in self.__sourceTrackDescriptors:
|
||||||
|
if sourceTrackDescriptor.getIndex() not in targetSourceIndices:
|
||||||
|
if DIFF_REMOVED_KEY not in trackCompareResult.keys():
|
||||||
|
trackCompareResult[DIFF_REMOVED_KEY] = {}
|
||||||
|
trackCompareResult[DIFF_REMOVED_KEY][sourceTrackDescriptor.getIndex()] = sourceTrackDescriptor
|
||||||
|
|
||||||
|
|
||||||
|
if trackCompareResult:
|
||||||
|
self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY] = trackCompareResult
|
||||||
|
|
||||||
|
|
||||||
|
def compareTracks(self,
|
||||||
|
targetTrackDescriptor: TrackDescriptor = None,
|
||||||
|
sourceTrackDescriptor: TrackDescriptor = None):
|
||||||
|
|
||||||
|
sourceTrackTags = sourceTrackDescriptor.getTags() if sourceTrackDescriptor is not None else {}
|
||||||
|
targetTrackTags = (
|
||||||
|
self.normalizeTrackTags(targetTrackDescriptor.getTags())
|
||||||
|
if targetTrackDescriptor is not None
|
||||||
|
else {}
|
||||||
|
)
|
||||||
|
|
||||||
|
trackCompareResult = {}
|
||||||
|
|
||||||
|
tagsDiffResult = dictDiff(sourceTrackTags,
|
||||||
|
targetTrackTags,
|
||||||
|
ignoreKeys=self.__ignoreTrackKeys,
|
||||||
|
removeKeys=self.__removeTrackKeys)
|
||||||
|
|
||||||
|
if tagsDiffResult:
|
||||||
|
trackCompareResult[MediaDescriptorChangeSet.TAGS_KEY] = tagsDiffResult
|
||||||
|
|
||||||
|
sourceDispositionSet = sourceTrackDescriptor.getDispositionSet() if sourceTrackDescriptor is not None else set()
|
||||||
|
targetDispositionSet = targetTrackDescriptor.getDispositionSet() if targetTrackDescriptor is not None else set()
|
||||||
|
|
||||||
|
# if targetTrackDescriptor.getIndex() == 3:
|
||||||
|
# raise click.ClickException(f"{sourceDispositionSet} {targetDispositionSet}")
|
||||||
|
|
||||||
|
dispositionDiffResult = setDiff(sourceDispositionSet, targetDispositionSet)
|
||||||
|
|
||||||
|
if dispositionDiffResult:
|
||||||
|
trackCompareResult[MediaDescriptorChangeSet.DISPOSITION_SET_KEY] = dispositionDiffResult
|
||||||
|
|
||||||
|
return trackCompareResult
|
||||||
|
|
||||||
|
def normalizeTrackTagValue(self, tagKey, tagValue):
|
||||||
|
if tagKey != "language":
|
||||||
|
return tagValue
|
||||||
|
|
||||||
|
if isinstance(tagValue, IsoLanguage):
|
||||||
|
return tagValue.threeLetter()
|
||||||
|
|
||||||
|
trackLanguage = IsoLanguage.findThreeLetter(str(tagValue))
|
||||||
|
if trackLanguage != IsoLanguage.UNDEFINED:
|
||||||
|
return trackLanguage.threeLetter()
|
||||||
|
|
||||||
|
return tagValue
|
||||||
|
|
||||||
|
def normalizeTrackTags(self, trackTags: dict):
|
||||||
|
return {
|
||||||
|
tagKey: self.normalizeTrackTagValue(tagKey, tagValue)
|
||||||
|
for tagKey, tagValue in trackTags.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def generateDispositionTokens(self):
|
||||||
|
"""
|
||||||
|
#Example: -disposition:s:0 default -disposition:s:1 0
|
||||||
|
"""
|
||||||
|
dispositionTokens = []
|
||||||
|
|
||||||
|
# if MediaDescriptorChangeSet.TRACKS_KEY in self.__changeSetObj.keys():
|
||||||
|
#
|
||||||
|
# if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
# addedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY]
|
||||||
|
# trackDescriptor: TrackDescriptor
|
||||||
|
# for trackDescriptor in addedTracks.values():
|
||||||
|
#
|
||||||
|
# dispositionSet = trackDescriptor.getDispositionSet()
|
||||||
|
#
|
||||||
|
# if dispositionSet:
|
||||||
|
# dispositionTokens += [f"-disposition:{trackDescriptor.getType().indicator()}:{trackDescriptor.getSubIndex()}",
|
||||||
|
# '+'.join([d.label() for d in dispositionSet])]
|
||||||
|
#
|
||||||
|
# if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
# changedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
|
||||||
|
# trackDiffObj: dict
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# for trackIndex, trackDiffObj in changedTracks.items():
|
||||||
|
#
|
||||||
|
# if MediaDescriptorChangeSet.DISPOSITION_SET_KEY in trackDiffObj.keys():
|
||||||
|
#
|
||||||
|
# dispositionDiffObj: dict = trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY]
|
||||||
|
#
|
||||||
|
# addedDispositions = dispositionDiffObj[DIFF_ADDED_KEY] if DIFF_ADDED_KEY in dispositionDiffObj.keys() else set()
|
||||||
|
# removedDispositions = dispositionDiffObj[DIFF_REMOVED_KEY] if DIFF_REMOVED_KEY in dispositionDiffObj.keys() else set()
|
||||||
|
# unchangedDispositions = dispositionDiffObj[DIFF_UNCHANGED_KEY] if DIFF_UNCHANGED_KEY in dispositionDiffObj.keys() else set()
|
||||||
|
#
|
||||||
|
# targetDispositions = addedDispositions | unchangedDispositions
|
||||||
|
#
|
||||||
|
# trackDescriptor = self.__targetTrackDescriptors[trackIndex]
|
||||||
|
# streamIndicator = trackDescriptor.getType().indicator()
|
||||||
|
# subIndex = trackDescriptor.getSubIndex()
|
||||||
|
#
|
||||||
|
# if targetDispositions:
|
||||||
|
# dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in targetDispositions])]
|
||||||
|
# # if not targetDispositions and removedDispositions:
|
||||||
|
# else:
|
||||||
|
# dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
|
||||||
|
for ttd in self.__targetTrackDescriptors:
|
||||||
|
|
||||||
|
targetDispositions = ttd.getDispositionSet()
|
||||||
|
streamIndicator = ttd.getType().indicator()
|
||||||
|
subIndex = ttd.getSubIndex()
|
||||||
|
|
||||||
|
if targetDispositions:
|
||||||
|
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in targetDispositions])]
|
||||||
|
# if not targetDispositions and removedDispositions:
|
||||||
|
else:
|
||||||
|
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
|
||||||
|
|
||||||
|
return dispositionTokens
|
||||||
|
|
||||||
|
|
||||||
|
def generateMetadataTokens(self):
|
||||||
|
|
||||||
|
metadataTokens = []
|
||||||
|
|
||||||
|
if MediaDescriptorChangeSet.TAGS_KEY in self.__changeSetObj.keys():
|
||||||
|
|
||||||
|
addedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY]
|
||||||
|
if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||||
|
removedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY]
|
||||||
|
if DIFF_REMOVED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||||
|
changedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY]
|
||||||
|
if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||||
|
|
||||||
|
outputMediaTags = addedMediaTags | changedMediaTags
|
||||||
|
|
||||||
|
if (not 'no_signature' in self.__context.keys()
|
||||||
|
or not self.__context['no_signature']):
|
||||||
|
outputMediaTags = outputMediaTags | self.__signatureTags
|
||||||
|
|
||||||
|
# outputMediaTags = {k:v for k,v in outputMediaTags.items() if k not in self.__removeGlobalKeys}
|
||||||
|
|
||||||
|
for tagKey, tagValue in outputMediaTags.items():
|
||||||
|
metadataTokens += [f"-metadata:g",
|
||||||
|
f"{tagKey}={tagValue}"]
|
||||||
|
|
||||||
|
for tagKey, tagValue in changedMediaTags.items():
|
||||||
|
metadataTokens += [f"-metadata:g",
|
||||||
|
f"{tagKey}={tagValue}"]
|
||||||
|
|
||||||
|
for removeKey in removedMediaTags.keys():
|
||||||
|
metadataTokens += [f"-metadata:g",
|
||||||
|
f"{removeKey}="]
|
||||||
|
|
||||||
|
|
||||||
|
if MediaDescriptorChangeSet.TRACKS_KEY in self.__changeSetObj.keys():
|
||||||
|
|
||||||
|
if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
addedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY]
|
||||||
|
trackDescriptor: TrackDescriptor
|
||||||
|
for trackDescriptor in addedTracks.values():
|
||||||
|
for tagKey, tagValue in self.normalizeTrackTags(trackDescriptor.getTags()).items():
|
||||||
|
if not tagKey in self.__removeTrackKeys:
|
||||||
|
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
|
||||||
|
+ f":{trackDescriptor.getSubIndex()}",
|
||||||
|
f"{tagKey}={tagValue}"]
|
||||||
|
|
||||||
|
if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
changedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
|
||||||
|
trackDiffObj: dict
|
||||||
|
for trackIndex, trackDiffObj in changedTracks.items():
|
||||||
|
|
||||||
|
if MediaDescriptorChangeSet.TAGS_KEY in trackDiffObj.keys():
|
||||||
|
|
||||||
|
tagsDiffObj = trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY]
|
||||||
|
|
||||||
|
addedTrackTags = tagsDiffObj[DIFF_ADDED_KEY] if DIFF_ADDED_KEY in tagsDiffObj.keys() else {}
|
||||||
|
changedTrackTags = tagsDiffObj[DIFF_CHANGED_KEY] if DIFF_CHANGED_KEY in tagsDiffObj.keys() else {}
|
||||||
|
unchangedTrackTags = tagsDiffObj[DIFF_UNCHANGED_KEY] if DIFF_UNCHANGED_KEY in tagsDiffObj.keys() else {}
|
||||||
|
removedTrackTags = tagsDiffObj[DIFF_REMOVED_KEY] if DIFF_REMOVED_KEY in tagsDiffObj.keys() else {}
|
||||||
|
|
||||||
|
outputTrackTags = addedTrackTags | changedTrackTags
|
||||||
|
|
||||||
|
trackDescriptor = self.__targetTrackDescriptorsByIndex[trackIndex]
|
||||||
|
|
||||||
|
for tagKey, tagValue in self.normalizeTrackTags(outputTrackTags).items():
|
||||||
|
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
|
||||||
|
+ f":{trackDescriptor.getSubIndex()}",
|
||||||
|
f"{tagKey}={tagValue}"]
|
||||||
|
|
||||||
|
if trackDescriptor.getExternalSourceFilePath():
|
||||||
|
# When a single-track external file substitutes the
|
||||||
|
# media payload, keep metadata from the regular
|
||||||
|
# source track unless the external/target side
|
||||||
|
# overrides it explicitly.
|
||||||
|
preservedTrackTags = (
|
||||||
|
{
|
||||||
|
tagKey: tagValue
|
||||||
|
for tagKey, tagValue in removedTrackTags.items()
|
||||||
|
if tagKey not in self.__removeTrackKeys
|
||||||
|
}
|
||||||
|
| unchangedTrackTags
|
||||||
|
)
|
||||||
|
for tagKey, tagValue in self.normalizeTrackTags(preservedTrackTags).items():
|
||||||
|
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
|
||||||
|
+ f":{trackDescriptor.getSubIndex()}",
|
||||||
|
f"{tagKey}={tagValue}"]
|
||||||
|
else:
|
||||||
|
for removeKey in removedTrackTags.keys():
|
||||||
|
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
|
||||||
|
+ f":{trackDescriptor.getSubIndex()}",
|
||||||
|
f"{removeKey}="]
|
||||||
|
|
||||||
|
for tagKey, tagValue in self.__context.get('encoding_metadata_tags', {}).items():
|
||||||
|
metadataTokens += [f"-metadata:g", f"{tagKey}={tagValue}"]
|
||||||
|
|
||||||
|
metadataTokens += self.generateConfiguredRemovalMetadataTokens()
|
||||||
|
|
||||||
|
return metadataTokens
|
||||||
|
|
||||||
|
|
||||||
|
def getChangeSetObj(self):
|
||||||
|
return self.__changeSetObj
|
||||||
|
|
||||||
|
def generateConfiguredRemovalMetadataTokens(self):
|
||||||
|
metadataTokens = []
|
||||||
|
|
||||||
|
for removeKey in self.__removeGlobalKeys:
|
||||||
|
metadataTokens += ["-metadata:g", f"{removeKey}="]
|
||||||
|
|
||||||
|
for trackDescriptor in self.__targetTrackDescriptors:
|
||||||
|
for removeKey in self.__removeTrackKeys:
|
||||||
|
metadataTokens += [
|
||||||
|
f"-metadata:s:{trackDescriptor.getType().indicator()}:{trackDescriptor.getSubIndex()}",
|
||||||
|
f"{removeKey}=",
|
||||||
|
]
|
||||||
|
|
||||||
|
return metadataTokens
|
||||||
@@ -6,13 +6,9 @@ from textual.containers import Grid
|
|||||||
|
|
||||||
from ffx.audio_layout import AudioLayout
|
from ffx.audio_layout import AudioLayout
|
||||||
|
|
||||||
from .pattern_controller import PatternController
|
|
||||||
from .show_controller import ShowController
|
|
||||||
from .track_controller import TrackController
|
|
||||||
from .tag_controller import TagController
|
|
||||||
|
|
||||||
from .show_details_screen import ShowDetailsScreen
|
from .show_details_screen import ShowDetailsScreen
|
||||||
from .pattern_details_screen import PatternDetailsScreen
|
from .pattern_details_screen import PatternDetailsScreen
|
||||||
|
from .screen_support import build_screen_bootstrap, build_screen_controllers
|
||||||
|
|
||||||
from ffx.track_type import TrackType
|
from ffx.track_type import TrackType
|
||||||
from ffx.track_codec import TrackCodec
|
from ffx.track_codec import TrackCodec
|
||||||
@@ -27,7 +23,9 @@ from textual.widgets._data_table import CellDoesNotExist
|
|||||||
from ffx.media_descriptor import MediaDescriptor
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
from ffx.file_properties import FileProperties
|
from ffx.file_properties import FileProperties
|
||||||
|
|
||||||
from ffx.helper import DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY
|
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
|
||||||
|
|
||||||
|
from ffx.helper import formatRichColor, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY, DIFF_UNCHANGED_KEY
|
||||||
|
|
||||||
|
|
||||||
# Screen[dict[int, str, int]]
|
# Screen[dict[int, str, int]]
|
||||||
@@ -38,7 +36,7 @@ class MediaDetailsScreen(Screen):
|
|||||||
Grid {
|
Grid {
|
||||||
grid-size: 5 8;
|
grid-size: 5 8;
|
||||||
grid-rows: 8 2 2 2 2 8 2 2 8;
|
grid-rows: 8 2 2 2 2 8 2 2 8;
|
||||||
grid-columns: 25 25 120 10 75;
|
grid-columns: 15 25 90 10 105;
|
||||||
height: 100%;
|
height: 100%;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
padding: 1;
|
padding: 1;
|
||||||
@@ -110,6 +108,19 @@ class MediaDetailsScreen(Screen):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
TRACKS_TABLE_INDEX_COLUMN_LABEL = "Index"
|
||||||
|
TRACKS_TABLE_TYPE_COLUMN_LABEL = "Type"
|
||||||
|
TRACKS_TABLE_SUB_INDEX_COLUMN_LABEL = "SubIndex"
|
||||||
|
TRACKS_TABLE_CODEC_COLUMN_LABEL = "Codec"
|
||||||
|
TRACKS_TABLE_LAYOUT_COLUMN_LABEL = "Layout"
|
||||||
|
TRACKS_TABLE_LANGUAGE_COLUMN_LABEL = "Language"
|
||||||
|
TRACKS_TABLE_TITLE_COLUMN_LABEL = "Title"
|
||||||
|
TRACKS_TABLE_DEFAULT_COLUMN_LABEL = "Default"
|
||||||
|
TRACKS_TABLE_FORCED_COLUMN_LABEL = "Forced"
|
||||||
|
|
||||||
|
DIFFERENCES_TABLE_DIFFERENCES_COLUMN_LABEL = 'Differences (file->db/output)'
|
||||||
|
|
||||||
|
|
||||||
BINDINGS = [
|
BINDINGS = [
|
||||||
("n", "new_pattern", "New Pattern"),
|
("n", "new_pattern", "New Pattern"),
|
||||||
("u", "update_pattern", "Update Pattern"),
|
("u", "update_pattern", "Update Pattern"),
|
||||||
@@ -120,13 +131,23 @@ class MediaDetailsScreen(Screen):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
self.context = self.app.getContext()
|
bootstrap = build_screen_bootstrap(self.app.getContext())
|
||||||
self.Session = self.context['database']['session'] # convenience
|
self.context = bootstrap.context
|
||||||
|
|
||||||
self.__pc = PatternController(context = self.context)
|
self.__removeGlobalKeys = bootstrap.remove_global_keys
|
||||||
self.__sc = ShowController(context = self.context)
|
self.__ignoreGlobalKeys = bootstrap.ignore_global_keys
|
||||||
self.__tc = TrackController(context = self.context)
|
|
||||||
self.__tac = TagController(context = self.context)
|
controllers = build_screen_controllers(
|
||||||
|
self.context,
|
||||||
|
pattern=True,
|
||||||
|
show=True,
|
||||||
|
track=True,
|
||||||
|
tag=True,
|
||||||
|
)
|
||||||
|
self.__pc = controllers['pattern']
|
||||||
|
self.__sc = controllers['show']
|
||||||
|
self.__tc = controllers['track']
|
||||||
|
self.__tac = controllers['tag']
|
||||||
|
|
||||||
if not 'command' in self.context.keys() or self.context['command'] != 'inspect':
|
if not 'command' in self.context.keys() or self.context['command'] != 'inspect':
|
||||||
raise click.ClickException(f"MediaDetailsScreen.__init__(): Can only perform command 'inspect'")
|
raise click.ClickException(f"MediaDetailsScreen.__init__(): Can only perform command 'inspect'")
|
||||||
@@ -180,7 +201,7 @@ class MediaDetailsScreen(Screen):
|
|||||||
def loadProperties(self):
|
def loadProperties(self):
|
||||||
|
|
||||||
self.__mediaFileProperties = FileProperties(self.context, self.__mediaFilename)
|
self.__mediaFileProperties = FileProperties(self.context, self.__mediaFilename)
|
||||||
self.__currentMediaDescriptor = self.__mediaFileProperties.getMediaDescriptor()
|
self.__sourceMediaDescriptor = self.__mediaFileProperties.getMediaDescriptor()
|
||||||
|
|
||||||
#HINT: This is None if the filename did not match anything in database
|
#HINT: This is None if the filename did not match anything in database
|
||||||
self.__currentPattern = self.__mediaFileProperties.getPattern()
|
self.__currentPattern = self.__mediaFileProperties.getPattern()
|
||||||
@@ -191,9 +212,13 @@ class MediaDetailsScreen(Screen):
|
|||||||
# Enumerating differences between media descriptors
|
# Enumerating differences between media descriptors
|
||||||
# from file (=current) vs from stored in database (=target)
|
# from file (=current) vs from stored in database (=target)
|
||||||
try:
|
try:
|
||||||
self.__mediaDifferences = self.__targetMediaDescriptor.compare(self.__currentMediaDescriptor) if self.__currentPattern is not None else {}
|
mdcs = MediaDescriptorChangeSet(self.context,
|
||||||
|
self.__targetMediaDescriptor,
|
||||||
|
self.__sourceMediaDescriptor)
|
||||||
|
|
||||||
|
self.__mediaChangeSetObj = mdcs.getChangeSetObj()
|
||||||
except ValueError:
|
except ValueError:
|
||||||
self.__mediaDifferences = {}
|
self.__mediaChangeSetObj = {}
|
||||||
|
|
||||||
|
|
||||||
def updateDifferences(self):
|
def updateDifferences(self):
|
||||||
@@ -202,73 +227,87 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
self.differencesTable.clear()
|
self.differencesTable.clear()
|
||||||
|
|
||||||
if MediaDescriptor.TAGS_KEY in self.__mediaDifferences.keys():
|
|
||||||
|
|
||||||
currentTags = self.__currentMediaDescriptor.getTags()
|
if MediaDescriptorChangeSet.TAGS_KEY in self.__mediaChangeSetObj.keys():
|
||||||
targetTags = self.__targetMediaDescriptor.getTags()
|
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||||
for addedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
|
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY].items():
|
||||||
row = (f"added media tag: key='{addedTagKey}' value='{targetTags[addedTagKey]}'",)
|
if tagKey not in self.__ignoreGlobalKeys:
|
||||||
|
row = (f"add media tag: key='{tagKey}' value='{tagValue}'",)
|
||||||
self.differencesTable.add_row(*map(str, row))
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||||
for removedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
|
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY].items():
|
||||||
row = (f"removed media tag: key='{removedTagKey}' value='{currentTags[removedTagKey]}'",)
|
if tagKey not in self.__ignoreGlobalKeys and tagKey not in self.__removeGlobalKeys:
|
||||||
|
row = (f"remove media tag: key='{tagKey}' value='{tagValue}'",)
|
||||||
self.differencesTable.add_row(*map(str, row))
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||||
for changedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_CHANGED_KEY]:
|
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY].items():
|
||||||
row = (f"changed media tag: key='{changedTagKey}' value='{currentTags[changedTagKey]}'->'{targetTags[changedTagKey]}'",)
|
if tagKey not in self.__ignoreGlobalKeys:
|
||||||
|
row = (f"change media tag: key='{tagKey}' value='{tagValue}'",)
|
||||||
self.differencesTable.add_row(*map(str, row))
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if MediaDescriptor.TRACKS_KEY in self.__mediaDifferences.keys():
|
|
||||||
|
|
||||||
currentTracks = self.__currentMediaDescriptor.getAllTrackDescriptors() # 0,1,2,3
|
if MediaDescriptorChangeSet.TRACKS_KEY in self.__mediaChangeSetObj.keys():
|
||||||
targetTracks = self.__targetMediaDescriptor.getAllTrackDescriptors() # 0 <- from DB
|
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
|
||||||
#raise click.ClickException(f"add track {self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]}")
|
trackDescriptor: TrackDescriptor
|
||||||
for addedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]:
|
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY].items():
|
||||||
addedTrack : Track = targetTracks[addedTrackIndex]
|
row = (f"add {trackDescriptor.getType().label()} track: index={trackDescriptor.getIndex()} lang={trackDescriptor.getLanguage().threeLetter()}",)
|
||||||
row = (f"added {addedTrack.getType().label()} track: index={addedTrackIndex} lang={addedTrack.getLanguage().threeLetter()}",)
|
|
||||||
self.differencesTable.add_row(*map(str, row))
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
for removedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_REMOVED_KEY]:
|
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_REMOVED_KEY].items():
|
||||||
row = (f"removed track: index={removedTrackIndex}",)
|
row = (f"remove stream #{trackIndex}",)
|
||||||
self.differencesTable.add_row(*map(str, row))
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
for changedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY].keys():
|
|
||||||
|
|
||||||
changedTrack : Track = targetTracks[changedTrackIndex]
|
changedTracks: dict = self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
|
||||||
changedTrackDiff : dict = self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY][changedTrackIndex]
|
|
||||||
|
|
||||||
if MediaDescriptor.TAGS_KEY in changedTrackDiff.keys():
|
targetTrackDescriptors = self.__targetMediaDescriptor.getTrackDescriptors()
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in changedTrackDiff[MediaDescriptor.TAGS_KEY]:
|
trackDiffObj: dict
|
||||||
for addedTagKey in changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
|
for trackIndex, trackDiffObj in changedTracks.items():
|
||||||
addedTagValue = changedTrack.getTags()[addedTagKey]
|
|
||||||
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} added key={addedTagKey} value={addedTagValue}",)
|
ttd: TrackDescriptor = targetTrackDescriptors[trackIndex]
|
||||||
|
|
||||||
|
|
||||||
|
if MediaDescriptorChangeSet.TAGS_KEY in trackDiffObj.keys():
|
||||||
|
|
||||||
|
removedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY]
|
||||||
|
if DIFF_REMOVED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||||
|
for tagKey, tagValue in removedTags.items():
|
||||||
|
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) remove key={tagKey} value={tagValue}",)
|
||||||
self.differencesTable.add_row(*map(str, row))
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in changedTrackDiff[MediaDescriptor.TAGS_KEY]:
|
addedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY]
|
||||||
for removedTagKey in changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
|
if DIFF_ADDED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||||
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} removed key={removedTagKey}",)
|
for tagKey, tagValue in addedTags.items():
|
||||||
|
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) add key={tagKey} value={tagValue}",)
|
||||||
self.differencesTable.add_row(*map(str, row))
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if TrackDescriptor.DISPOSITION_SET_KEY in changedTrackDiff.keys():
|
changedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY]
|
||||||
|
if DIFF_CHANGED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||||
if DIFF_ADDED_KEY in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]:
|
for tagKey, tagValue in changedTags.items():
|
||||||
for addedDisposition in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY][DIFF_ADDED_KEY]:
|
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) change key={tagKey} value={tagValue}",)
|
||||||
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} added disposition={addedDisposition.label()}",)
|
|
||||||
self.differencesTable.add_row(*map(str, row))
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]:
|
|
||||||
for removedDisposition in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY][DIFF_REMOVED_KEY]:
|
if MediaDescriptorChangeSet.DISPOSITION_SET_KEY in trackDiffObj.keys():
|
||||||
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} removed disposition={removedDisposition.label()}",)
|
|
||||||
|
addedDispositions = (trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY][DIFF_ADDED_KEY]
|
||||||
|
if DIFF_ADDED_KEY in trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY].keys() else set())
|
||||||
|
for ad in addedDispositions:
|
||||||
|
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) add disposition={ad.label()}",)
|
||||||
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
|
removedDispositions = (trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY][DIFF_REMOVED_KEY]
|
||||||
|
if DIFF_REMOVED_KEY in trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY].keys() else set())
|
||||||
|
for rd in removedDispositions:
|
||||||
|
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) remove disposition={rd.label()}",)
|
||||||
self.differencesTable.add_row(*map(str, row))
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
|
|
||||||
@@ -282,8 +321,15 @@ class MediaDetailsScreen(Screen):
|
|||||||
row = (int(show.id), show.name, show.year) # Convert each element to a string before adding
|
row = (int(show.id), show.name, show.year) # Convert each element to a string before adding
|
||||||
self.showsTable.add_row(*map(str, row))
|
self.showsTable.add_row(*map(str, row))
|
||||||
|
|
||||||
for mediaTagKey, mediaTagValue in self.__currentMediaDescriptor.getTags().items():
|
for mediaTagKey, mediaTagValue in self.__sourceMediaDescriptor.getTags().items():
|
||||||
row = (mediaTagKey, mediaTagValue) # Convert each element to a string before adding
|
|
||||||
|
textColor = None
|
||||||
|
if mediaTagKey in self.__ignoreGlobalKeys:
|
||||||
|
textColor = 'blue'
|
||||||
|
if mediaTagKey in self.__removeGlobalKeys:
|
||||||
|
textColor = 'red'
|
||||||
|
|
||||||
|
row = (formatRichColor(mediaTagKey, textColor), formatRichColor(mediaTagValue, textColor)) # Convert each element to a string before adding
|
||||||
self.mediaTagsTable.add_row(*map(str, row))
|
self.mediaTagsTable.add_row(*map(str, row))
|
||||||
|
|
||||||
self.updateTracks()
|
self.updateTracks()
|
||||||
@@ -317,7 +363,8 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
self.tracksTable.clear()
|
self.tracksTable.clear()
|
||||||
|
|
||||||
trackDescriptorList = self.__currentMediaDescriptor.getAllTrackDescriptors()
|
# trackDescriptorList = self.__sourceMediaDescriptor.getAllTrackDescriptors()
|
||||||
|
trackDescriptorList = self.__sourceMediaDescriptor.getTrackDescriptors()
|
||||||
|
|
||||||
typeCounter = {}
|
typeCounter = {}
|
||||||
|
|
||||||
@@ -352,7 +399,7 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
# Define the columns with headers
|
# Define the columns with headers
|
||||||
self.column_key_show_id = self.showsTable.add_column("ID", width=10)
|
self.column_key_show_id = self.showsTable.add_column("ID", width=10)
|
||||||
self.column_key_show_name = self.showsTable.add_column("Name", width=50)
|
self.column_key_show_name = self.showsTable.add_column("Name", width=80)
|
||||||
self.column_key_show_year = self.showsTable.add_column("Year", width=10)
|
self.column_key_show_year = self.showsTable.add_column("Year", width=10)
|
||||||
|
|
||||||
self.showsTable.cursor_type = 'row'
|
self.showsTable.cursor_type = 'row'
|
||||||
@@ -361,8 +408,8 @@ class MediaDetailsScreen(Screen):
|
|||||||
self.mediaTagsTable = DataTable(classes="two")
|
self.mediaTagsTable = DataTable(classes="two")
|
||||||
|
|
||||||
# Define the columns with headers
|
# Define the columns with headers
|
||||||
self.column_key_track_tag_key = self.mediaTagsTable.add_column("Key", width=50)
|
self.column_key_track_tag_key = self.mediaTagsTable.add_column("Key", width=30)
|
||||||
self.column_key_track_tag_value = self.mediaTagsTable.add_column("Value", width=100)
|
self.column_key_track_tag_value = self.mediaTagsTable.add_column("Value", width=70)
|
||||||
|
|
||||||
self.mediaTagsTable.cursor_type = 'row'
|
self.mediaTagsTable.cursor_type = 'row'
|
||||||
|
|
||||||
@@ -370,15 +417,15 @@ class MediaDetailsScreen(Screen):
|
|||||||
self.tracksTable = DataTable(classes="two")
|
self.tracksTable = DataTable(classes="two")
|
||||||
|
|
||||||
# Define the columns with headers
|
# Define the columns with headers
|
||||||
self.column_key_track_index = self.tracksTable.add_column("Index", width=5)
|
self.column_key_track_index = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_INDEX_COLUMN_LABEL, width=5)
|
||||||
self.column_key_track_type = self.tracksTable.add_column("Type", width=10)
|
self.column_key_track_type = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_TYPE_COLUMN_LABEL, width=10)
|
||||||
self.column_key_track_sub_index = self.tracksTable.add_column("SubIndex", width=8)
|
self.column_key_track_sub_index = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_SUB_INDEX_COLUMN_LABEL, width=8)
|
||||||
self.column_key_track_codec = self.tracksTable.add_column("Codec", width=10)
|
self.column_key_track_codec = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_CODEC_COLUMN_LABEL, width=10)
|
||||||
self.column_key_track_layout = self.tracksTable.add_column("Layout", width=10)
|
self.column_key_track_layout = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_LAYOUT_COLUMN_LABEL, width=10)
|
||||||
self.column_key_track_language = self.tracksTable.add_column("Language", width=15)
|
self.column_key_track_language = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_LANGUAGE_COLUMN_LABEL, width=15)
|
||||||
self.column_key_track_title = self.tracksTable.add_column("Title", width=48)
|
self.column_key_track_title = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_TITLE_COLUMN_LABEL, width=48)
|
||||||
self.column_key_track_default = self.tracksTable.add_column("Default", width=8)
|
self.column_key_track_default = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_DEFAULT_COLUMN_LABEL, width=8)
|
||||||
self.column_key_track_forced = self.tracksTable.add_column("Forced", width=8)
|
self.column_key_track_forced = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_FORCED_COLUMN_LABEL, width=8)
|
||||||
|
|
||||||
self.tracksTable.cursor_type = 'row'
|
self.tracksTable.cursor_type = 'row'
|
||||||
|
|
||||||
@@ -387,7 +434,7 @@ class MediaDetailsScreen(Screen):
|
|||||||
self.differencesTable = DataTable(id='differences-table') # classes="triple"
|
self.differencesTable = DataTable(id='differences-table') # classes="triple"
|
||||||
|
|
||||||
# Define the columns with headers
|
# Define the columns with headers
|
||||||
self.column_key_differences = self.differencesTable.add_column("Differences (file->db)", width=70)
|
self.column_key_differences = self.differencesTable.add_column(MediaDetailsScreen.DIFFERENCES_TABLE_DIFFERENCES_COLUMN_LABEL, width=100)
|
||||||
|
|
||||||
self.differencesTable.cursor_type = 'row'
|
self.differencesTable.cursor_type = 'row'
|
||||||
|
|
||||||
@@ -439,15 +486,15 @@ class MediaDetailsScreen(Screen):
|
|||||||
yield Footer()
|
yield Footer()
|
||||||
|
|
||||||
|
|
||||||
def getPatternDescriptorFromInput(self):
|
def getPatternObjFromInput(self):
|
||||||
"""Returns show id and pattern from corresponding inputs"""
|
"""Returns show id and pattern as obj from corresponding inputs"""
|
||||||
patternDescriptor = {}
|
patternObj = {}
|
||||||
try:
|
try:
|
||||||
patternDescriptor['show_id'] = self.getSelectedShowDescriptor().getId()
|
patternObj['show_id'] = self.getSelectedShowDescriptor().getId()
|
||||||
patternDescriptor['pattern'] = str(self.query_one("#pattern_input", Input).value)
|
patternObj['pattern'] = str(self.query_one("#pattern_input", Input).value)
|
||||||
except:
|
except:
|
||||||
pass
|
return {}
|
||||||
return patternDescriptor
|
return patternObj
|
||||||
|
|
||||||
|
|
||||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||||
@@ -464,12 +511,12 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
if event.button.id == "select_default_button":
|
if event.button.id == "select_default_button":
|
||||||
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
||||||
self.__currentMediaDescriptor.setDefaultSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
|
self.__sourceMediaDescriptor.setDefaultSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
|
||||||
self.updateTracks()
|
self.updateTracks()
|
||||||
|
|
||||||
if event.button.id == "select_forced_button":
|
if event.button.id == "select_forced_button":
|
||||||
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
||||||
self.__currentMediaDescriptor.setForcedSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
|
self.__sourceMediaDescriptor.setForcedSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
|
||||||
self.updateTracks()
|
self.updateTracks()
|
||||||
|
|
||||||
|
|
||||||
@@ -512,6 +559,7 @@ class MediaDetailsScreen(Screen):
|
|||||||
try:
|
try:
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
|
|
||||||
|
kwargs[ShowDescriptor.CONTEXT_KEY] = self.context
|
||||||
kwargs[ShowDescriptor.ID_KEY] = int(selected_row_data[0])
|
kwargs[ShowDescriptor.ID_KEY] = int(selected_row_data[0])
|
||||||
kwargs[ShowDescriptor.NAME_KEY] = str(selected_row_data[1])
|
kwargs[ShowDescriptor.NAME_KEY] = str(selected_row_data[1])
|
||||||
kwargs[ShowDescriptor.YEAR_KEY] = int(selected_row_data[2])
|
kwargs[ShowDescriptor.YEAR_KEY] = int(selected_row_data[2])
|
||||||
@@ -526,10 +574,13 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
|
|
||||||
def handle_new_pattern(self, showDescriptor: ShowDescriptor):
|
def handle_new_pattern(self, showDescriptor: ShowDescriptor):
|
||||||
|
""""""
|
||||||
|
|
||||||
if type(showDescriptor) is not ShowDescriptor:
|
if type(showDescriptor) is not ShowDescriptor:
|
||||||
raise TypeError("MediaDetailsScreen.handle_new_pattern(): Argument 'showDescriptor' has to be of type ShowDescriptor")
|
raise TypeError("MediaDetailsScreen.handle_new_pattern(): Argument 'showDescriptor' has to be of type ShowDescriptor")
|
||||||
|
|
||||||
|
self.removeShow()
|
||||||
|
|
||||||
showRowIndex = self.getRowIndexFromShowId(showDescriptor.getId())
|
showRowIndex = self.getRowIndexFromShowId(showDescriptor.getId())
|
||||||
if showRowIndex is None:
|
if showRowIndex is None:
|
||||||
show = (showDescriptor.getId(), showDescriptor.getName(), showDescriptor.getYear())
|
show = (showDescriptor.getId(), showDescriptor.getName(), showDescriptor.getYear())
|
||||||
@@ -539,29 +590,29 @@ class MediaDetailsScreen(Screen):
|
|||||||
if showRowIndex is not None:
|
if showRowIndex is not None:
|
||||||
self.showsTable.move_cursor(row=showRowIndex)
|
self.showsTable.move_cursor(row=showRowIndex)
|
||||||
|
|
||||||
self.removeShow()
|
patternObj = self.getPatternObjFromInput()
|
||||||
|
|
||||||
patternDescriptor = self.getPatternDescriptorFromInput()
|
if patternObj:
|
||||||
|
mediaTags = {}
|
||||||
|
for tagKey, tagValue in self.__sourceMediaDescriptor.getTags().items():
|
||||||
|
|
||||||
if patternDescriptor:
|
# Filter tags that make no sense to preserve
|
||||||
patternId = self.__pc.addPattern(patternDescriptor)
|
if tagKey not in self.__ignoreGlobalKeys and not tagKey in self.__removeGlobalKeys:
|
||||||
|
mediaTags[tagKey] = tagValue
|
||||||
|
|
||||||
|
patternId = self.__pc.savePatternSchema(
|
||||||
|
patternObj,
|
||||||
|
trackDescriptors=self.__sourceMediaDescriptor.getTrackDescriptors(),
|
||||||
|
mediaTags=mediaTags,
|
||||||
|
)
|
||||||
if patternId:
|
if patternId:
|
||||||
self.highlightPattern(False)
|
self.highlightPattern(False)
|
||||||
|
|
||||||
for tagKey, tagValue in self.__currentMediaDescriptor.getTags().items():
|
|
||||||
self.__tac.updateMediaTag(patternId, tagKey, tagValue)
|
|
||||||
|
|
||||||
for trackDescriptor in self.__currentMediaDescriptor.getAllTrackDescriptors():
|
|
||||||
self.__tc.addTrack(trackDescriptor, patternId = patternId)
|
|
||||||
|
|
||||||
|
|
||||||
def action_new_pattern(self):
|
def action_new_pattern(self):
|
||||||
|
"""Adding new patterns
|
||||||
|
|
||||||
#TODO #427: Fehlermeldung in TUI
|
If the corresponding show does not exists in DB it is added beforehand"""
|
||||||
# try:
|
|
||||||
# self.__currentMediaDescriptor.checkConfiguration()
|
|
||||||
# except ValueError:
|
|
||||||
# return
|
|
||||||
|
|
||||||
selectedShowDescriptor = self.getSelectedShowDescriptor()
|
selectedShowDescriptor = self.getSelectedShowDescriptor()
|
||||||
|
|
||||||
@@ -574,90 +625,104 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
|
|
||||||
def action_update_pattern(self):
|
def action_update_pattern(self):
|
||||||
"""When updating the database the actions must reverse the difference (eq to diff db->file)"""
|
"""Updating patterns
|
||||||
|
|
||||||
|
When updating the database the actions must reverse the difference (eq to diff db->file)"""
|
||||||
|
|
||||||
if self.__currentPattern is not None:
|
if self.__currentPattern is not None:
|
||||||
patternDescriptor = self.getPatternDescriptorFromInput()
|
patternObj = self.getPatternObjFromInput()
|
||||||
if (patternDescriptor
|
if (patternObj
|
||||||
and self.__currentPattern.getPattern() != patternDescriptor['pattern']):
|
and self.__currentPattern.getPattern() != patternObj['pattern']):
|
||||||
return self.__pc.updatePattern(self.__currentPattern.getId(), patternDescriptor)
|
return self.__pc.updatePattern(self.__currentPattern.getId(), patternObj)
|
||||||
|
|
||||||
self.loadProperties()
|
self.loadProperties()
|
||||||
|
|
||||||
if MediaDescriptor.TAGS_KEY in self.__mediaDifferences.keys():
|
# __mediaChangeSetObj is file vs database
|
||||||
|
if MediaDescriptorChangeSet.TAGS_KEY in self.__mediaChangeSetObj.keys():
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||||
for addedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
|
for addedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY].keys():
|
||||||
|
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} addedTagKey={addedTagKey}")
|
||||||
self.__tac.deleteMediaTagByKey(self.__currentPattern.getId(), addedTagKey)
|
self.__tac.deleteMediaTagByKey(self.__currentPattern.getId(), addedTagKey)
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||||
for removedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
|
for removedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY].keys():
|
||||||
currentTags = self.__currentMediaDescriptor.getTags()
|
currentTags = self.__sourceMediaDescriptor.getTags()
|
||||||
|
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} removedTagKey={removedTagKey} currentTags={currentTags[removedTagKey]}")
|
||||||
self.__tac.updateMediaTag(self.__currentPattern.getId(), removedTagKey, currentTags[removedTagKey])
|
self.__tac.updateMediaTag(self.__currentPattern.getId(), removedTagKey, currentTags[removedTagKey])
|
||||||
|
|
||||||
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||||
for changedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_CHANGED_KEY]:
|
for changedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY].keys():
|
||||||
currentTags = self.__currentMediaDescriptor.getTags()
|
currentTags = self.__sourceMediaDescriptor.getTags()
|
||||||
|
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} changedTagKey={changedTagKey} currentTags={currentTags[changedTagKey]}")
|
||||||
self.__tac.updateMediaTag(self.__currentPattern.getId(), changedTagKey, currentTags[changedTagKey])
|
self.__tac.updateMediaTag(self.__currentPattern.getId(), changedTagKey, currentTags[changedTagKey])
|
||||||
|
|
||||||
if MediaDescriptor.TRACKS_KEY in self.__mediaDifferences.keys():
|
if MediaDescriptorChangeSet.TRACKS_KEY in self.__mediaChangeSetObj.keys():
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
|
||||||
for addedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]:
|
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY].items():
|
||||||
targetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == addedTrackIndex]
|
#targetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == addedTrackIndex]
|
||||||
if targetTracks:
|
# if targetTracks:
|
||||||
self.__tc.deleteTrack(targetTracks[0].getId()) # id
|
# self.__tc.deleteTrack(targetTracks[0].getId()) # id
|
||||||
|
# self.__tc.deleteTrack(targetTracks[0].getId())
|
||||||
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
self.__tc.addTrack(trackDescriptor, patternId = self.__currentPattern.getId())
|
||||||
for removedTrackIndex, removedTrack in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_REMOVED_KEY].items():
|
|
||||||
|
|
||||||
|
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
trackDescriptor: TrackDescriptor
|
||||||
|
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_REMOVED_KEY].items():
|
||||||
# Track per inspect/update hinzufügen
|
# Track per inspect/update hinzufügen
|
||||||
self.__tc.addTrack(removedTrack, patternId = self.__currentPattern.getId())
|
#self.__tc.addTrack(removedTrack, patternId = self.__currentPattern.getId())
|
||||||
|
self.__tc.deleteTrack(trackDescriptor.getId())
|
||||||
|
|
||||||
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
|
||||||
# [vsTracks[tp].getIndex()] = trackDiff
|
# [vsTracks[tp].getIndex()] = trackDiff
|
||||||
for changedTrackIndex, changedTrackDiff in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY].items():
|
for trackIndex, trackDiff in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY].items():
|
||||||
|
|
||||||
changedTargetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == changedTrackIndex]
|
targetTracks = [t for t in self.__targetMediaDescriptor.getTrackDescriptors() if t.getIndex() == trackIndex]
|
||||||
changedTargeTrackId = changedTargetTracks[0].getId() if changedTargetTracks else None
|
targetTrackId = targetTracks[0].getId() if targetTracks else None
|
||||||
changedTargetTrackIndex = changedTargetTracks[0].getIndex() if changedTargetTracks else None
|
targetTrackIndex = targetTracks[0].getIndex() if targetTracks else None
|
||||||
|
|
||||||
changedCurrentTracks = [t for t in self.__currentMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == changedTrackIndex]
|
changedCurrentTracks = [t for t in self.__sourceMediaDescriptor.getTrackDescriptors() if t.getIndex() == trackIndex]
|
||||||
# changedCurrentTrackId #HINT: Undefined as track descriptors do not come from file with track_id
|
# changedCurrentTrackId #HINT: Undefined as track descriptors do not come from file with track_id
|
||||||
|
|
||||||
if TrackDescriptor.TAGS_KEY in changedTrackDiff.keys():
|
if TrackDescriptor.TAGS_KEY in trackDiff.keys():
|
||||||
changedTrackTagsDiff = changedTrackDiff[TrackDescriptor.TAGS_KEY]
|
tagsDiff = trackDiff[TrackDescriptor.TAGS_KEY]
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in changedTrackTagsDiff.keys():
|
if DIFF_ADDED_KEY in tagsDiff.keys():
|
||||||
for addedTrackTagKey in changedTrackTagsDiff[DIFF_ADDED_KEY]:
|
for tagKey, tagValue in tagsDiff[DIFF_ADDED_KEY].items():
|
||||||
|
|
||||||
if changedTargetTracks:
|
# if targetTracks:
|
||||||
self.__tac.deleteTrackTagByKey(changedTargeTrackId, addedTrackTagKey)
|
# self.__tac.deleteTrackTagByKey(targetTrackId, addedTrackTagKey)
|
||||||
|
self.__tac.updateTrackTag(targetTrackId, tagKey, tagValue)
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in changedTrackTagsDiff.keys():
|
|
||||||
for removedTrackTagKey in changedTrackTagsDiff[DIFF_REMOVED_KEY]:
|
|
||||||
if changedCurrentTracks:
|
|
||||||
self.__tac.updateTrackTag(changedTargeTrackId, removedTrackTagKey, changedCurrentTracks[0].getTags()[removedTrackTagKey])
|
|
||||||
|
|
||||||
if DIFF_CHANGED_KEY in changedTrackTagsDiff.keys():
|
if DIFF_REMOVED_KEY in tagsDiff.keys():
|
||||||
for changedTrackTagKey in changedTrackTagsDiff[DIFF_CHANGED_KEY]:
|
for tagKey, tagValue in tagsDiff[DIFF_REMOVED_KEY].items():
|
||||||
if changedCurrentTracks:
|
# if changedCurrentTracks:
|
||||||
self.__tac.updateTrackTag(changedTargeTrackId, changedTrackTagKey, changedCurrentTracks[0].getTags()[changedTrackTagKey])
|
# self.__tac.updateTrackTag(targetTrackId, removedTrackTagKey, changedCurrentTracks[0].getTags()[removedTrackTagKey])
|
||||||
|
self.__tac.deleteTrackTagByKey(targetTrackId, tagKey)
|
||||||
|
|
||||||
if TrackDescriptor.DISPOSITION_SET_KEY in changedTrackDiff.keys():
|
if DIFF_CHANGED_KEY in tagsDiff.keys():
|
||||||
changedTrackDispositionDiff = changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]
|
for tagKey, tagValue in tagsDiff[DIFF_CHANGED_KEY].items():
|
||||||
|
# if changedCurrentTracks:
|
||||||
|
# self.__tac.updateTrackTag(targetTrackId, changedTrackTagKey, changedCurrentTracks[0].getTags()[changedTrackTagKey])
|
||||||
|
self.__tac.updateTrackTag(targetTrackId, tagKey, tagValue)
|
||||||
|
|
||||||
|
|
||||||
|
if TrackDescriptor.DISPOSITION_SET_KEY in trackDiff.keys():
|
||||||
|
changedTrackDispositionDiff = trackDiff[TrackDescriptor.DISPOSITION_SET_KEY]
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in changedTrackDispositionDiff.keys():
|
if DIFF_ADDED_KEY in changedTrackDispositionDiff.keys():
|
||||||
for changedTrackAddedDisposition in changedTrackDispositionDiff[DIFF_ADDED_KEY]:
|
for changedDisposition in changedTrackDispositionDiff[DIFF_ADDED_KEY]:
|
||||||
if changedTargetTrackIndex is not None:
|
if targetTrackIndex is not None:
|
||||||
self.__tc.setDispositionState(self.__currentPattern.getId(), changedTargetTrackIndex, changedTrackAddedDisposition, False)
|
self.__tc.setDispositionState(self.__currentPattern.getId(), targetTrackIndex, changedDisposition, True)
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in changedTrackDispositionDiff.keys():
|
if DIFF_REMOVED_KEY in changedTrackDispositionDiff.keys():
|
||||||
for changedTrackRemovedDisposition in changedTrackDispositionDiff[DIFF_REMOVED_KEY]:
|
for changedDisposition in changedTrackDispositionDiff[DIFF_REMOVED_KEY]:
|
||||||
if changedTargetTrackIndex is not None:
|
if targetTrackIndex is not None:
|
||||||
self.__tc.setDispositionState(self.__currentPattern.getId(), changedTargetTrackIndex, changedTrackRemovedDisposition, True)
|
self.__tc.setDispositionState(self.__currentPattern.getId(), targetTrackIndex, changedDisposition, False)
|
||||||
|
|
||||||
|
|
||||||
self.updateDifferences()
|
self.updateDifferences()
|
||||||
@@ -666,11 +731,11 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
def action_edit_pattern(self):
|
def action_edit_pattern(self):
|
||||||
|
|
||||||
patternDescriptor = self.getPatternDescriptorFromInput()
|
patternObj = self.getPatternObjFromInput()
|
||||||
|
|
||||||
if patternDescriptor['pattern']:
|
if patternObj['pattern']:
|
||||||
|
|
||||||
selectedPatternId = self.__pc.findPattern(patternDescriptor)
|
selectedPatternId = self.__pc.findPattern(patternObj)
|
||||||
|
|
||||||
if selectedPatternId is None:
|
if selectedPatternId is None:
|
||||||
raise click.ClickException(f"MediaDetailsScreen.action_edit_pattern(): Pattern to edit has no id")
|
raise click.ClickException(f"MediaDetailsScreen.action_edit_pattern(): Pattern to edit has no id")
|
||||||
@@ -681,4 +746,3 @@ class MediaDetailsScreen(Screen):
|
|||||||
def handle_edit_pattern(self, screenResult):
|
def handle_edit_pattern(self, screenResult):
|
||||||
self.query_one("#pattern_input", Input).value = screenResult['pattern']
|
self.query_one("#pattern_input", Input).value = screenResult['pattern']
|
||||||
self.updateDifferences()
|
self.updateDifferences()
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,20 @@
|
|||||||
|
"""Load ORM model modules so SQLAlchemy relationship strings can resolve."""
|
||||||
|
|
||||||
|
from .show import Base, Show
|
||||||
|
from .pattern import Pattern
|
||||||
|
from .track import Track
|
||||||
|
from .track_tag import TrackTag
|
||||||
|
from .media_tag import MediaTag
|
||||||
|
from .shifted_season import ShiftedSeason
|
||||||
|
from .property import Property
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'Base',
|
||||||
|
'Show',
|
||||||
|
'Pattern',
|
||||||
|
'Track',
|
||||||
|
'TrackTag',
|
||||||
|
'MediaTag',
|
||||||
|
'ShiftedSeason',
|
||||||
|
'Property',
|
||||||
|
]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import click
|
import click
|
||||||
|
|
||||||
from sqlalchemy import Column, Integer, String, ForeignKey
|
from sqlalchemy import Column, Integer, String, Text, ForeignKey, UniqueConstraint
|
||||||
from sqlalchemy.orm import relationship
|
from sqlalchemy.orm import relationship
|
||||||
|
|
||||||
from .show import Base, Show
|
from .show import Base, Show
|
||||||
@@ -12,6 +12,9 @@ from ffx.show_descriptor import ShowDescriptor
|
|||||||
class Pattern(Base):
|
class Pattern(Base):
|
||||||
|
|
||||||
__tablename__ = 'patterns'
|
__tablename__ = 'patterns'
|
||||||
|
__table_args__ = (
|
||||||
|
UniqueConstraint('show_id', 'pattern', name='uq_patterns_show_id_pattern'),
|
||||||
|
)
|
||||||
|
|
||||||
# v1.x
|
# v1.x
|
||||||
id = Column(Integer, primary_key=True)
|
id = Column(Integer, primary_key=True)
|
||||||
@@ -31,9 +34,13 @@ class Pattern(Base):
|
|||||||
|
|
||||||
tracks = relationship('Track', back_populates='pattern', cascade="all, delete", lazy='joined')
|
tracks = relationship('Track', back_populates='pattern', cascade="all, delete", lazy='joined')
|
||||||
|
|
||||||
|
|
||||||
media_tags = relationship('MediaTag', back_populates='pattern', cascade="all, delete", lazy='joined')
|
media_tags = relationship('MediaTag', back_populates='pattern', cascade="all, delete", lazy='joined')
|
||||||
|
|
||||||
|
quality = Column(Integer, default=0)
|
||||||
|
|
||||||
|
notes = Column(Text, default='')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def getId(self):
|
def getId(self):
|
||||||
return int(self.id)
|
return int(self.id)
|
||||||
|
|||||||
@@ -1,156 +1,411 @@
|
|||||||
import click, re
|
import re
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from ffx.model.media_tag import MediaTag
|
||||||
from ffx.model.pattern import Pattern
|
from ffx.model.pattern import Pattern
|
||||||
|
from ffx.model.track import Track
|
||||||
|
from ffx.model.track_tag import TrackTag
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
|
||||||
|
|
||||||
class PatternController():
|
class DuplicatePatternMatchError(click.ClickException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidPatternSchemaError(click.ClickException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class PatternController:
|
||||||
|
_compiled_regex_cache: dict[str, re.Pattern] = {}
|
||||||
|
|
||||||
def __init__(self, context):
|
def __init__(self, context):
|
||||||
|
|
||||||
self.context = context
|
self.context = context
|
||||||
self.Session = self.context['database']['session'] # convenience
|
self.Session = self.context["database"]["session"]
|
||||||
|
|
||||||
|
self.__configurationData = self.context["config"].getData()
|
||||||
|
|
||||||
def addPattern(self, patternDescriptor):
|
metadataConfiguration = (
|
||||||
|
self.__configurationData["metadata"]
|
||||||
|
if "metadata" in self.__configurationData.keys()
|
||||||
|
else {}
|
||||||
|
)
|
||||||
|
|
||||||
|
self.__removeTrackKeys = (
|
||||||
|
metadataConfiguration["streams"]["remove"]
|
||||||
|
if "streams" in metadataConfiguration.keys()
|
||||||
|
and "remove" in metadataConfiguration["streams"].keys()
|
||||||
|
else []
|
||||||
|
)
|
||||||
|
self.__ignoreTrackKeys = (
|
||||||
|
metadataConfiguration["streams"]["ignore"]
|
||||||
|
if "streams" in metadataConfiguration.keys()
|
||||||
|
and "ignore" in metadataConfiguration["streams"].keys()
|
||||||
|
else []
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _clear_regex_cache(cls):
|
||||||
|
cls._compiled_regex_cache.clear()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _compile_pattern_expression(cls, pattern_id: int, expression: str) -> re.Pattern:
|
||||||
|
expression_text = str(expression)
|
||||||
|
compiled = cls._compiled_regex_cache.get(expression_text)
|
||||||
|
if compiled is None:
|
||||||
|
try:
|
||||||
|
compiled = re.compile(expression_text)
|
||||||
|
except re.error as ex:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Pattern #{pattern_id} contains an invalid regex {expression_text!r}: {ex}"
|
||||||
|
)
|
||||||
|
cls._compiled_regex_cache[expression_text] = compiled
|
||||||
|
return compiled
|
||||||
|
|
||||||
|
def _coerce_pattern_fields(self, patternObj):
|
||||||
|
return {
|
||||||
|
"show_id": int(patternObj["show_id"]),
|
||||||
|
"pattern": str(patternObj["pattern"]),
|
||||||
|
"quality": int(patternObj.get("quality", 0) or 0),
|
||||||
|
"notes": str(patternObj.get("notes", "")),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _coerce_media_tags(self, mediaTags):
|
||||||
|
return {
|
||||||
|
str(tagKey): str(tagValue)
|
||||||
|
for tagKey, tagValue in (mediaTags or {}).items()
|
||||||
|
}
|
||||||
|
|
||||||
|
def _normalize_track_descriptors(self, trackDescriptors):
|
||||||
|
if trackDescriptors is None:
|
||||||
|
raise InvalidPatternSchemaError(
|
||||||
|
"Patterns must define at least one track before they can be stored."
|
||||||
|
)
|
||||||
|
|
||||||
|
normalized_descriptors = []
|
||||||
|
for trackDescriptor in trackDescriptors:
|
||||||
|
if type(trackDescriptor) is not TrackDescriptor:
|
||||||
|
raise TypeError(
|
||||||
|
"PatternController: All track descriptors are required to be of type TrackDescriptor"
|
||||||
|
)
|
||||||
|
normalized_descriptors.append(trackDescriptor)
|
||||||
|
|
||||||
|
if not normalized_descriptors:
|
||||||
|
raise InvalidPatternSchemaError(
|
||||||
|
"Patterns must define at least one track before they can be stored."
|
||||||
|
)
|
||||||
|
|
||||||
|
normalized_descriptors = sorted(
|
||||||
|
normalized_descriptors, key=lambda descriptor: descriptor.getIndex()
|
||||||
|
)
|
||||||
|
|
||||||
|
index_set = {descriptor.getIndex() for descriptor in normalized_descriptors}
|
||||||
|
expected_indexes = set(range(len(normalized_descriptors)))
|
||||||
|
if index_set != expected_indexes:
|
||||||
|
raise click.ClickException(
|
||||||
|
"Pattern tracks must use a contiguous zero-based index order."
|
||||||
|
)
|
||||||
|
|
||||||
|
return normalized_descriptors
|
||||||
|
|
||||||
|
def _ensure_unique_pattern_definition(
|
||||||
|
self,
|
||||||
|
session,
|
||||||
|
show_id: int,
|
||||||
|
pattern_expression: str,
|
||||||
|
exclude_pattern_id: int | None = None,
|
||||||
|
):
|
||||||
|
query = session.query(Pattern).filter(
|
||||||
|
Pattern.show_id == show_id,
|
||||||
|
Pattern.pattern == pattern_expression,
|
||||||
|
)
|
||||||
|
if exclude_pattern_id is not None:
|
||||||
|
query = query.filter(Pattern.id != int(exclude_pattern_id))
|
||||||
|
|
||||||
|
existing_pattern = query.first()
|
||||||
|
if existing_pattern is not None:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Pattern {pattern_expression!r} already exists for show #{show_id}."
|
||||||
|
)
|
||||||
|
|
||||||
|
def _build_track_row(self, trackDescriptor: TrackDescriptor) -> Track:
|
||||||
|
track = Track(
|
||||||
|
track_type=int(trackDescriptor.getType().index()),
|
||||||
|
codec_name=str(trackDescriptor.getCodec().identifier()),
|
||||||
|
index=int(trackDescriptor.getIndex()),
|
||||||
|
source_index=int(trackDescriptor.getSourceIndex()),
|
||||||
|
disposition_flags=int(
|
||||||
|
TrackDisposition.toFlags(trackDescriptor.getDispositionSet())
|
||||||
|
),
|
||||||
|
audio_layout=trackDescriptor.getAudioLayout().index(),
|
||||||
|
)
|
||||||
|
|
||||||
|
for tagKey, tagValue in trackDescriptor.getTags().items():
|
||||||
|
if tagKey in self.__ignoreTrackKeys or tagKey in self.__removeTrackKeys:
|
||||||
|
continue
|
||||||
|
track.track_tags.append(TrackTag(key=str(tagKey), value=str(tagValue)))
|
||||||
|
|
||||||
|
return track
|
||||||
|
|
||||||
|
def _replace_pattern_schema(
|
||||||
|
self,
|
||||||
|
session,
|
||||||
|
pattern: Pattern,
|
||||||
|
mediaTags: dict[str, str],
|
||||||
|
trackDescriptors: list[TrackDescriptor],
|
||||||
|
):
|
||||||
|
for mediaTag in list(pattern.media_tags):
|
||||||
|
session.delete(mediaTag)
|
||||||
|
for track in list(pattern.tracks):
|
||||||
|
session.delete(track)
|
||||||
|
session.flush()
|
||||||
|
|
||||||
|
for tagKey, tagValue in mediaTags.items():
|
||||||
|
pattern.media_tags.append(MediaTag(key=str(tagKey), value=str(tagValue)))
|
||||||
|
|
||||||
|
for trackDescriptor in trackDescriptors:
|
||||||
|
pattern.tracks.append(self._build_track_row(trackDescriptor))
|
||||||
|
|
||||||
|
def _validate_persisted_pattern(self, pattern: Pattern):
|
||||||
|
if not pattern.tracks:
|
||||||
|
raise InvalidPatternSchemaError(
|
||||||
|
f"Pattern #{pattern.getId()} ({pattern.getPattern()!r}) is invalid because it has no tracks."
|
||||||
|
)
|
||||||
|
|
||||||
|
def savePatternSchema(
|
||||||
|
self,
|
||||||
|
patternObj,
|
||||||
|
trackDescriptors,
|
||||||
|
mediaTags=None,
|
||||||
|
patternId: int | None = None,
|
||||||
|
) -> int:
|
||||||
|
fields = self._coerce_pattern_fields(patternObj)
|
||||||
|
normalized_tracks = self._normalize_track_descriptors(trackDescriptors)
|
||||||
|
normalized_tags = self._coerce_media_tags(mediaTags)
|
||||||
|
session = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
session = self.Session()
|
||||||
|
self._ensure_unique_pattern_definition(
|
||||||
|
session,
|
||||||
|
fields["show_id"],
|
||||||
|
fields["pattern"],
|
||||||
|
exclude_pattern_id=patternId,
|
||||||
|
)
|
||||||
|
|
||||||
s = self.Session()
|
if patternId is None:
|
||||||
q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']),
|
pattern = Pattern(
|
||||||
Pattern.pattern == str(patternDescriptor['pattern']))
|
show_id=fields["show_id"],
|
||||||
|
pattern=fields["pattern"],
|
||||||
if not q.count():
|
quality=fields["quality"],
|
||||||
pattern = Pattern(show_id = int(patternDescriptor['show_id']),
|
notes=fields["notes"],
|
||||||
pattern = str(patternDescriptor['pattern']))
|
)
|
||||||
s.add(pattern)
|
session.add(pattern)
|
||||||
s.commit()
|
session.flush()
|
||||||
return pattern.getId()
|
|
||||||
else:
|
else:
|
||||||
return 0
|
pattern = session.query(Pattern).filter(Pattern.id == int(patternId)).first()
|
||||||
|
if pattern is None:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"PatternController.savePatternSchema(): Pattern #{patternId} not found"
|
||||||
|
)
|
||||||
|
pattern.show_id = fields["show_id"]
|
||||||
|
pattern.pattern = fields["pattern"]
|
||||||
|
pattern.quality = fields["quality"]
|
||||||
|
pattern.notes = fields["notes"]
|
||||||
|
|
||||||
|
self._replace_pattern_schema(
|
||||||
|
session,
|
||||||
|
pattern,
|
||||||
|
normalized_tags,
|
||||||
|
normalized_tracks,
|
||||||
|
)
|
||||||
|
|
||||||
|
session.commit()
|
||||||
|
self._clear_regex_cache()
|
||||||
|
return pattern.getId()
|
||||||
|
|
||||||
|
except click.ClickException:
|
||||||
|
raise
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"PatternController.addPattern(): {repr(ex)}")
|
raise click.ClickException(
|
||||||
|
f"PatternController.savePatternSchema(): {repr(ex)}"
|
||||||
|
)
|
||||||
finally:
|
finally:
|
||||||
s.close()
|
if session is not None:
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
def addPattern(self, patternObj, trackDescriptors=None, mediaTags=None):
|
||||||
|
return self.savePatternSchema(
|
||||||
|
patternObj,
|
||||||
|
trackDescriptors=trackDescriptors,
|
||||||
|
mediaTags=mediaTags,
|
||||||
|
)
|
||||||
|
|
||||||
def updatePattern(self, patternId, patternDescriptor):
|
def updatePattern(self, patternId, patternObj):
|
||||||
|
|
||||||
|
fields = self._coerce_pattern_fields(patternObj)
|
||||||
|
session = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
session = self.Session()
|
||||||
q = s.query(Pattern).filter(Pattern.id == int(patternId))
|
pattern = session.query(Pattern).filter(Pattern.id == int(patternId)).first()
|
||||||
|
|
||||||
if q.count():
|
if pattern is not None:
|
||||||
|
self._ensure_unique_pattern_definition(
|
||||||
|
session,
|
||||||
|
fields["show_id"],
|
||||||
|
fields["pattern"],
|
||||||
|
exclude_pattern_id=patternId,
|
||||||
|
)
|
||||||
|
self._validate_persisted_pattern(pattern)
|
||||||
|
|
||||||
pattern = q.first()
|
pattern.show_id = fields["show_id"]
|
||||||
|
pattern.pattern = fields["pattern"]
|
||||||
|
pattern.quality = fields["quality"]
|
||||||
|
pattern.notes = fields["notes"]
|
||||||
|
|
||||||
pattern.show_id = int(patternDescriptor['show_id'])
|
session.commit()
|
||||||
pattern.pattern = str(patternDescriptor['pattern'])
|
self._clear_regex_cache()
|
||||||
|
|
||||||
s.commit()
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
else:
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
except click.ClickException:
|
||||||
|
raise
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"PatternController.updatePattern(): {repr(ex)}")
|
raise click.ClickException(f"PatternController.updatePattern(): {repr(ex)}")
|
||||||
finally:
|
finally:
|
||||||
s.close()
|
if session is not None:
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
def findPattern(self, patternObj):
|
||||||
|
session = None
|
||||||
def findPattern(self, patternDescriptor):
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
session = self.Session()
|
||||||
q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']), Pattern.pattern == str(patternDescriptor['pattern']))
|
pattern = (
|
||||||
|
session.query(Pattern)
|
||||||
|
.filter(
|
||||||
|
Pattern.show_id == int(patternObj["show_id"]),
|
||||||
|
Pattern.pattern == str(patternObj["pattern"]),
|
||||||
|
)
|
||||||
|
.first()
|
||||||
|
)
|
||||||
|
|
||||||
if q.count():
|
if pattern is not None:
|
||||||
pattern = q.first()
|
|
||||||
return int(pattern.id)
|
return int(pattern.id)
|
||||||
else:
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"PatternController.findPattern(): {repr(ex)}")
|
raise click.ClickException(f"PatternController.findPattern(): {repr(ex)}")
|
||||||
finally:
|
finally:
|
||||||
s.close()
|
if session is not None:
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
def getPatternsForShow(self, showId: int) -> list[Pattern]:
|
||||||
|
|
||||||
|
if type(showId) is not int:
|
||||||
|
raise ValueError(
|
||||||
|
"PatternController.getPatternsForShow(): Argument showId is required to be of type int"
|
||||||
|
)
|
||||||
|
|
||||||
|
session = None
|
||||||
|
try:
|
||||||
|
session = self.Session()
|
||||||
|
return (
|
||||||
|
session.query(Pattern)
|
||||||
|
.filter(Pattern.show_id == int(showId))
|
||||||
|
.order_by(Pattern.id)
|
||||||
|
.all()
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as ex:
|
||||||
|
raise click.ClickException(f"PatternController.getPatternsForShow(): {repr(ex)}")
|
||||||
|
finally:
|
||||||
|
if session is not None:
|
||||||
|
session.close()
|
||||||
|
|
||||||
def getPattern(self, patternId: int):
|
def getPattern(self, patternId: int):
|
||||||
|
|
||||||
if type(patternId) is not int:
|
if type(patternId) is not int:
|
||||||
raise ValueError(f"PatternController.getPattern(): Argument patternId is required to be of type int")
|
raise ValueError(
|
||||||
|
"PatternController.getPattern(): Argument patternId is required to be of type int"
|
||||||
|
)
|
||||||
|
|
||||||
|
session = None
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
session = self.Session()
|
||||||
q = s.query(Pattern).filter(Pattern.id == int(patternId))
|
return session.query(Pattern).filter(Pattern.id == int(patternId)).first()
|
||||||
|
|
||||||
return q.first() if q.count() else None
|
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"PatternController.getPattern(): {repr(ex)}")
|
raise click.ClickException(f"PatternController.getPattern(): {repr(ex)}")
|
||||||
finally:
|
finally:
|
||||||
s.close()
|
if session is not None:
|
||||||
|
session.close()
|
||||||
|
|
||||||
def deletePattern(self, patternId):
|
def deletePattern(self, patternId):
|
||||||
|
session = None
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
session = self.Session()
|
||||||
q = s.query(Pattern).filter(Pattern.id == int(patternId))
|
pattern = session.query(Pattern).filter(Pattern.id == int(patternId)).first()
|
||||||
|
|
||||||
if q.count():
|
if pattern is not None:
|
||||||
|
session.delete(pattern)
|
||||||
#DAFUQ: https://stackoverflow.com/a/19245058
|
session.commit()
|
||||||
# q.delete()
|
self._clear_regex_cache()
|
||||||
pattern = q.first()
|
|
||||||
s.delete(pattern)
|
|
||||||
|
|
||||||
s.commit()
|
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"PatternController.deletePattern(): {repr(ex)}")
|
raise click.ClickException(f"PatternController.deletePattern(): {repr(ex)}")
|
||||||
finally:
|
finally:
|
||||||
s.close()
|
if session is not None:
|
||||||
|
session.close()
|
||||||
|
|
||||||
def matchFilename(self, filename: str) -> dict:
|
def matchFilename(self, filename: str) -> dict:
|
||||||
"""Returns dict {'match': <a regex match obj>, 'pattern': <ffx pattern obj>} or empty dict of no pattern was found"""
|
"""Return {'match': regex match, 'pattern': Pattern} or {} when unmatched."""
|
||||||
|
session = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
session = self.Session()
|
||||||
q = s.query(Pattern)
|
matches = []
|
||||||
|
query = session.query(Pattern).order_by(Pattern.show_id, Pattern.id)
|
||||||
|
|
||||||
matchResult = {}
|
for pattern in query.all():
|
||||||
|
compiled = self._compile_pattern_expression(
|
||||||
|
pattern.getId(),
|
||||||
|
pattern.getPattern(),
|
||||||
|
)
|
||||||
|
patternMatch = compiled.search(str(filename))
|
||||||
|
if patternMatch is None:
|
||||||
|
continue
|
||||||
|
|
||||||
for pattern in q.all():
|
self._validate_persisted_pattern(pattern)
|
||||||
patternMatch = re.search(str(pattern.pattern), str(filename))
|
matches.append({"match": patternMatch, "pattern": pattern})
|
||||||
if patternMatch is not None:
|
|
||||||
matchResult['match'] = patternMatch
|
|
||||||
matchResult['pattern'] = pattern
|
|
||||||
|
|
||||||
return matchResult
|
if not matches:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
if len(matches) > 1:
|
||||||
|
duplicateDescriptions = ", ".join(
|
||||||
|
[
|
||||||
|
f"show #{match['pattern'].getShowId()} pattern #{match['pattern'].getId()} {match['pattern'].getPattern()!r}"
|
||||||
|
for match in matches
|
||||||
|
]
|
||||||
|
)
|
||||||
|
raise DuplicatePatternMatchError(
|
||||||
|
f"Filename {filename!r} matched more than one pattern: {duplicateDescriptions}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return matches[0]
|
||||||
|
|
||||||
|
except click.ClickException:
|
||||||
|
raise
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"PatternController.matchFilename(): {repr(ex)}")
|
raise click.ClickException(f"PatternController.matchFilename(): {repr(ex)}")
|
||||||
finally:
|
finally:
|
||||||
s.close()
|
if session is not None:
|
||||||
|
session.close()
|
||||||
# def getMediaDescriptor(self, context, patternId):
|
|
||||||
#
|
|
||||||
# try:
|
|
||||||
# s = self.Session()
|
|
||||||
# q = s.query(Pattern).filter(Pattern.id == int(patternId))
|
|
||||||
#
|
|
||||||
# if q.count():
|
|
||||||
# return q.first().getMediaDescriptor(context)
|
|
||||||
# else:
|
|
||||||
# return None
|
|
||||||
#
|
|
||||||
# except Exception as ex:
|
|
||||||
# raise click.ClickException(f"PatternController.getMediaDescriptor(): {repr(ex)}")
|
|
||||||
# finally:
|
|
||||||
# s.close()
|
|
||||||
|
|||||||
@@ -2,22 +2,17 @@ import click, re
|
|||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from textual.screen import Screen
|
from textual.screen import Screen
|
||||||
from textual.widgets import Header, Footer, Static, Button, Input, DataTable
|
from textual.widgets import Header, Footer, Static, Button, Input, DataTable, TextArea
|
||||||
from textual.containers import Grid
|
from textual.containers import Grid
|
||||||
|
|
||||||
from ffx.model.pattern import Pattern
|
from ffx.model.pattern import Pattern
|
||||||
from ffx.model.track import Track
|
|
||||||
|
|
||||||
from .pattern_controller import PatternController
|
|
||||||
from .show_controller import ShowController
|
|
||||||
from .track_controller import TrackController
|
|
||||||
from .tag_controller import TagController
|
|
||||||
|
|
||||||
from .track_details_screen import TrackDetailsScreen
|
from .track_details_screen import TrackDetailsScreen
|
||||||
from .track_delete_screen import TrackDeleteScreen
|
from .track_delete_screen import TrackDeleteScreen
|
||||||
|
|
||||||
from .tag_details_screen import TagDetailsScreen
|
from .tag_details_screen import TagDetailsScreen
|
||||||
from .tag_delete_screen import TagDeleteScreen
|
from .tag_delete_screen import TagDeleteScreen
|
||||||
|
from .screen_support import build_screen_bootstrap, build_screen_controllers
|
||||||
|
|
||||||
from ffx.track_type import TrackType
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
@@ -30,6 +25,8 @@ from ffx.file_properties import FileProperties
|
|||||||
from ffx.iso_language import IsoLanguage
|
from ffx.iso_language import IsoLanguage
|
||||||
from ffx.audio_layout import AudioLayout
|
from ffx.audio_layout import AudioLayout
|
||||||
|
|
||||||
|
from ffx.helper import formatRichColor, removeRichColor
|
||||||
|
|
||||||
|
|
||||||
# Screen[dict[int, str, int]]
|
# Screen[dict[int, str, int]]
|
||||||
class PatternDetailsScreen(Screen):
|
class PatternDetailsScreen(Screen):
|
||||||
@@ -37,8 +34,8 @@ class PatternDetailsScreen(Screen):
|
|||||||
CSS = """
|
CSS = """
|
||||||
|
|
||||||
Grid {
|
Grid {
|
||||||
grid-size: 7 13;
|
grid-size: 7 17;
|
||||||
grid-rows: 2 2 2 2 2 8 2 2 8 2 2 2 2;
|
grid-rows: 2 2 2 2 2 2 6 2 2 8 2 2 8 2 2 2 2;
|
||||||
grid-columns: 25 25 25 25 25 25 25;
|
grid-columns: 25 25 25 25 25 25 25;
|
||||||
height: 100%;
|
height: 100%;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
@@ -87,6 +84,12 @@ class PatternDetailsScreen(Screen):
|
|||||||
column-span: 7;
|
column-span: 7;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
.four_box {
|
||||||
|
min-height: 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
.box {
|
.box {
|
||||||
height: 100%;
|
height: 100%;
|
||||||
border: solid green;
|
border: solid green;
|
||||||
@@ -100,54 +103,43 @@ class PatternDetailsScreen(Screen):
|
|||||||
def __init__(self, patternId = None, showId = None):
|
def __init__(self, patternId = None, showId = None):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
self.context = self.app.getContext()
|
bootstrap = build_screen_bootstrap(self.app.getContext())
|
||||||
self.Session = self.context['database']['session'] # convenience
|
self.context = bootstrap.context
|
||||||
|
|
||||||
self.__pc = PatternController(context = self.context)
|
self.__removeGlobalKeys = bootstrap.remove_global_keys
|
||||||
self.__sc = ShowController(context = self.context)
|
self.__ignoreGlobalKeys = bootstrap.ignore_global_keys
|
||||||
self.__tc = TrackController(context = self.context)
|
|
||||||
self.__tac = TagController(context = self.context)
|
controllers = build_screen_controllers(
|
||||||
|
self.context,
|
||||||
|
pattern=True,
|
||||||
|
show=True,
|
||||||
|
track=True,
|
||||||
|
tag=True,
|
||||||
|
)
|
||||||
|
self.__pc = controllers['pattern']
|
||||||
|
self.__sc = controllers['show']
|
||||||
|
self.__tc = controllers['track']
|
||||||
|
self.__tac = controllers['tag']
|
||||||
|
|
||||||
self.__pattern : Pattern = self.__pc.getPattern(patternId) if patternId is not None else None
|
self.__pattern : Pattern = self.__pc.getPattern(patternId) if patternId is not None else None
|
||||||
self.__showDescriptor = self.__sc.getShowDescriptor(showId) if showId is not None else None
|
self.__showDescriptor = self.__sc.getShowDescriptor(showId) if showId is not None else None
|
||||||
|
self.__draftTracks : List[TrackDescriptor] = []
|
||||||
|
self.__draftTags : dict[str, str] = {}
|
||||||
#TODO: per controller
|
|
||||||
def loadTracks(self, show_id):
|
|
||||||
|
|
||||||
try:
|
|
||||||
|
|
||||||
tracks = {}
|
|
||||||
tracks['audio'] = {}
|
|
||||||
tracks['subtitle'] = {}
|
|
||||||
|
|
||||||
s = self.Session()
|
|
||||||
q = s.query(Pattern).filter(Pattern.show_id == int(show_id))
|
|
||||||
|
|
||||||
return [{'id': int(p.id), 'pattern': p.pattern} for p in q.all()]
|
|
||||||
|
|
||||||
except Exception as ex:
|
|
||||||
raise click.ClickException(f"loadTracks(): {repr(ex)}")
|
|
||||||
finally:
|
|
||||||
s.close()
|
|
||||||
|
|
||||||
|
|
||||||
def updateTracks(self):
|
def updateTracks(self):
|
||||||
|
|
||||||
self.tracksTable.clear()
|
self.tracksTable.clear()
|
||||||
|
|
||||||
if self.__pattern is not None:
|
tracks = self.getCurrentTrackDescriptors()
|
||||||
|
|
||||||
tracks = self.__tc.findTracks(self.__pattern.getId())
|
|
||||||
|
|
||||||
typeCounter = {}
|
typeCounter = {}
|
||||||
|
|
||||||
tr: Track
|
td: TrackDescriptor
|
||||||
for tr in tracks:
|
for td in tracks:
|
||||||
|
|
||||||
td : TrackDescriptor = tr.getDescriptor(self.context)
|
if (trackType := td.getType()) != TrackType.ATTACHMENT:
|
||||||
|
|
||||||
trackType = td.getType()
|
|
||||||
if not trackType in typeCounter.keys():
|
if not trackType in typeCounter.keys():
|
||||||
typeCounter[trackType] = 0
|
typeCounter[trackType] = 0
|
||||||
|
|
||||||
@@ -155,6 +147,7 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
trackLanguage = td.getLanguage()
|
trackLanguage = td.getLanguage()
|
||||||
audioLayout = td.getAudioLayout()
|
audioLayout = td.getAudioLayout()
|
||||||
|
|
||||||
row = (td.getIndex(),
|
row = (td.getIndex(),
|
||||||
trackType.label(),
|
trackType.label(),
|
||||||
typeCounter[trackType],
|
typeCounter[trackType],
|
||||||
@@ -172,11 +165,47 @@ class PatternDetailsScreen(Screen):
|
|||||||
typeCounter[trackType] += 1
|
typeCounter[trackType] += 1
|
||||||
|
|
||||||
|
|
||||||
|
def getCurrentTrackDescriptors(self) -> List[TrackDescriptor]:
|
||||||
|
if self.__pattern is not None:
|
||||||
|
return self.__tc.findSiblingDescriptors(self.__pattern.getId())
|
||||||
|
return list(self.__draftTracks)
|
||||||
|
|
||||||
|
|
||||||
|
def normalizeDraftTracks(self):
|
||||||
|
|
||||||
|
typeCounter = {}
|
||||||
|
|
||||||
|
for index, trackDescriptor in enumerate(self.__draftTracks):
|
||||||
|
trackDescriptor.setIndex(index)
|
||||||
|
|
||||||
|
trackType = trackDescriptor.getType()
|
||||||
|
subIndex = typeCounter.get(trackType, 0)
|
||||||
|
trackDescriptor.setSubIndex(subIndex)
|
||||||
|
typeCounter[trackType] = subIndex + 1
|
||||||
|
|
||||||
|
if trackDescriptor.getSourceIndex() < 0:
|
||||||
|
trackDescriptor.setSourceIndex(index)
|
||||||
|
|
||||||
|
|
||||||
def swapTracks(self, trackIndex1: int, trackIndex2: int):
|
def swapTracks(self, trackIndex1: int, trackIndex2: int):
|
||||||
|
|
||||||
ti1 = int(trackIndex1)
|
ti1 = int(trackIndex1)
|
||||||
ti2 = int(trackIndex2)
|
ti2 = int(trackIndex2)
|
||||||
|
|
||||||
|
if self.__pattern is None:
|
||||||
|
numSiblings = len(self.__draftTracks)
|
||||||
|
|
||||||
|
if ti1 < 0 or ti1 >= numSiblings:
|
||||||
|
raise ValueError(f"PatternDetailsScreen.swapTracks(): trackIndex1 ({ti1}) is out of range ({numSiblings})")
|
||||||
|
|
||||||
|
if ti2 < 0 or ti2 >= numSiblings:
|
||||||
|
raise ValueError(f"PatternDetailsScreen.swapTracks(): trackIndex2 ({ti2}) is out of range ({numSiblings})")
|
||||||
|
|
||||||
|
self.__draftTracks[ti1], self.__draftTracks[ti2] = self.__draftTracks[ti2], self.__draftTracks[ti1]
|
||||||
|
self.normalizeDraftTracks()
|
||||||
|
self.updateTracks()
|
||||||
|
return
|
||||||
|
|
||||||
siblingDescriptors: List[TrackDescriptor] = self.__tc.findSiblingDescriptors(self.__pattern.getId())
|
siblingDescriptors: List[TrackDescriptor] = self.__tc.findSiblingDescriptors(self.__pattern.getId())
|
||||||
|
|
||||||
numSiblings = len(siblingDescriptors)
|
numSiblings = len(siblingDescriptors)
|
||||||
@@ -212,12 +241,21 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
self.tagsTable.clear()
|
self.tagsTable.clear()
|
||||||
|
|
||||||
if self.__pattern is not None:
|
tags = (
|
||||||
|
self.__tac.findAllMediaTags(self.__pattern.getId())
|
||||||
tags = self.__tac.findAllMediaTags(self.__pattern.getId())
|
if self.__pattern is not None
|
||||||
|
else self.__draftTags
|
||||||
|
)
|
||||||
|
|
||||||
for tagKey, tagValue in tags.items():
|
for tagKey, tagValue in tags.items():
|
||||||
row = (tagKey, tagValue)
|
|
||||||
|
textColor = None
|
||||||
|
if tagKey in self.__ignoreGlobalKeys:
|
||||||
|
textColor = 'blue'
|
||||||
|
if tagKey in self.__removeGlobalKeys:
|
||||||
|
textColor = 'red'
|
||||||
|
|
||||||
|
row = (formatRichColor(tagKey, textColor), formatRichColor(tagValue, textColor))
|
||||||
self.tagsTable.add_row(*map(str, row))
|
self.tagsTable.add_row(*map(str, row))
|
||||||
|
|
||||||
|
|
||||||
@@ -230,6 +268,12 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
self.query_one("#pattern_input", Input).value = str(self.__pattern.getPattern())
|
self.query_one("#pattern_input", Input).value = str(self.__pattern.getPattern())
|
||||||
|
|
||||||
|
if self.__pattern and self.__pattern.quality:
|
||||||
|
self.query_one("#quality_input", Input).value = str(self.__pattern.quality)
|
||||||
|
|
||||||
|
if self.__pattern and self.__pattern.notes:
|
||||||
|
self.query_one("#notes_textarea", TextArea).text = str(self.__pattern.notes)
|
||||||
|
|
||||||
self.updateTags()
|
self.updateTags()
|
||||||
self.updateTracks()
|
self.updateTracks()
|
||||||
|
|
||||||
@@ -276,64 +320,71 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
# 3
|
# 3
|
||||||
yield Static(" ", classes="seven")
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# 4
|
# 4
|
||||||
yield Static(" ", classes="seven")
|
yield Static("Quality")
|
||||||
|
yield Input(type="integer", id="quality_input")
|
||||||
|
yield Static(' ', classes="five")
|
||||||
|
|
||||||
|
|
||||||
# 5
|
# 5
|
||||||
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
|
|
||||||
|
# 6
|
||||||
|
yield Static("Notes")
|
||||||
|
yield Static(" ", classes="six")
|
||||||
|
|
||||||
|
# 7
|
||||||
|
yield TextArea(id="notes_textarea", classes="four_box seven")
|
||||||
|
|
||||||
|
|
||||||
|
# 8
|
||||||
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
|
# 9
|
||||||
yield Static("Media Tags")
|
yield Static("Media Tags")
|
||||||
|
|
||||||
|
|
||||||
if self.__pattern is not None:
|
|
||||||
yield Button("Add", id="button_add_tag")
|
yield Button("Add", id="button_add_tag")
|
||||||
yield Button("Edit", id="button_edit_tag")
|
yield Button("Edit", id="button_edit_tag")
|
||||||
yield Button("Delete", id="button_delete_tag")
|
yield Button("Delete", id="button_delete_tag")
|
||||||
else:
|
|
||||||
yield Static(" ")
|
|
||||||
yield Static(" ")
|
|
||||||
yield Static(" ")
|
|
||||||
|
|
||||||
yield Static(" ")
|
yield Static(" ")
|
||||||
yield Static(" ")
|
yield Static(" ")
|
||||||
yield Static(" ")
|
yield Static(" ")
|
||||||
|
|
||||||
# 6
|
|
||||||
yield self.tagsTable
|
|
||||||
|
|
||||||
# 7
|
|
||||||
yield Static(" ", classes="seven")
|
|
||||||
|
|
||||||
# 8
|
|
||||||
yield Static("Streams")
|
|
||||||
|
|
||||||
|
|
||||||
if self.__pattern is not None:
|
|
||||||
yield Button("Add", id="button_add_track")
|
|
||||||
yield Button("Edit", id="button_edit_track")
|
|
||||||
yield Button("Delete", id="button_delete_track")
|
|
||||||
else:
|
|
||||||
yield Static(" ")
|
|
||||||
yield Static(" ")
|
|
||||||
yield Static(" ")
|
|
||||||
|
|
||||||
yield Static(" ")
|
|
||||||
yield Button("Up", id="button_track_up")
|
|
||||||
yield Button("Down", id="button_track_down")
|
|
||||||
|
|
||||||
# 9
|
|
||||||
yield self.tracksTable
|
|
||||||
|
|
||||||
# 10
|
# 10
|
||||||
yield Static(" ", classes="seven")
|
yield self.tagsTable
|
||||||
|
|
||||||
# 11
|
# 11
|
||||||
yield Static(" ", classes="seven")
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
# 12
|
# 12
|
||||||
|
yield Static("Streams")
|
||||||
|
yield Button("Add", id="button_add_track")
|
||||||
|
yield Button("Edit", id="button_edit_track")
|
||||||
|
yield Button("Delete", id="button_delete_track")
|
||||||
|
|
||||||
|
yield Static(" ")
|
||||||
|
yield Button("Up", id="button_track_up")
|
||||||
|
yield Button("Down", id="button_track_down")
|
||||||
|
|
||||||
|
# 13
|
||||||
|
yield self.tracksTable
|
||||||
|
|
||||||
|
# 14
|
||||||
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
|
# 15
|
||||||
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
|
# 16
|
||||||
yield Button("Save", id="save_button")
|
yield Button("Save", id="save_button")
|
||||||
yield Button("Cancel", id="cancel_button")
|
yield Button("Cancel", id="cancel_button")
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
|
|
||||||
# 13
|
# 17
|
||||||
yield Static(" ", classes="seven")
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
yield Footer()
|
yield Footer()
|
||||||
@@ -342,17 +393,20 @@ class PatternDetailsScreen(Screen):
|
|||||||
def getPatternFromInput(self):
|
def getPatternFromInput(self):
|
||||||
return str(self.query_one("#pattern_input", Input).value)
|
return str(self.query_one("#pattern_input", Input).value)
|
||||||
|
|
||||||
|
def getQualityFromInput(self):
|
||||||
|
try:
|
||||||
|
return int(self.query_one("#quality_input", Input).value)
|
||||||
|
except ValueError:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def getNotesFromInput(self):
|
||||||
|
return str(self.query_one("#notes_textarea", TextArea).text)
|
||||||
|
|
||||||
|
|
||||||
def getSelectedTrackDescriptor(self):
|
def getSelectedTrackDescriptor(self):
|
||||||
|
|
||||||
if not self.__pattern:
|
|
||||||
return None
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
||||||
# Fetch the currently selected row when 'Enter' is pressed
|
|
||||||
#selected_row_index = self.table.cursor_row
|
|
||||||
row_key, col_key = self.tracksTable.coordinate_to_cell_key(self.tracksTable.cursor_coordinate)
|
row_key, col_key = self.tracksTable.coordinate_to_cell_key(self.tracksTable.cursor_coordinate)
|
||||||
|
|
||||||
if row_key is not None:
|
if row_key is not None:
|
||||||
@@ -361,9 +415,11 @@ class PatternDetailsScreen(Screen):
|
|||||||
trackIndex = int(selected_track_data[0])
|
trackIndex = int(selected_track_data[0])
|
||||||
trackSubIndex = int(selected_track_data[2])
|
trackSubIndex = int(selected_track_data[2])
|
||||||
|
|
||||||
return self.__tc.getTrack(self.__pattern.getId(), trackIndex).getDescriptor(self.context, subIndex=trackSubIndex)
|
for trackDescriptor in self.getCurrentTrackDescriptors():
|
||||||
|
if (trackDescriptor.getIndex() == trackIndex
|
||||||
|
and trackDescriptor.getSubIndex() == trackSubIndex):
|
||||||
|
return trackDescriptor
|
||||||
|
|
||||||
else:
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
except CellDoesNotExist:
|
except CellDoesNotExist:
|
||||||
@@ -382,8 +438,8 @@ class PatternDetailsScreen(Screen):
|
|||||||
if row_key is not None:
|
if row_key is not None:
|
||||||
selected_tag_data = self.tagsTable.get_row(row_key)
|
selected_tag_data = self.tagsTable.get_row(row_key)
|
||||||
|
|
||||||
tagKey = str(selected_tag_data[0])
|
tagKey = removeRichColor(selected_tag_data[0])
|
||||||
tagValue = str(selected_tag_data[1])
|
tagValue = removeRichColor(selected_tag_data[1])
|
||||||
|
|
||||||
return tagKey, tagValue
|
return tagKey, tagValue
|
||||||
|
|
||||||
@@ -403,6 +459,8 @@ class PatternDetailsScreen(Screen):
|
|||||||
patternDescriptor = {}
|
patternDescriptor = {}
|
||||||
patternDescriptor['show_id'] = self.__showDescriptor.getId()
|
patternDescriptor['show_id'] = self.__showDescriptor.getId()
|
||||||
patternDescriptor['pattern'] = self.getPatternFromInput()
|
patternDescriptor['pattern'] = self.getPatternFromInput()
|
||||||
|
patternDescriptor['quality'] = self.getQualityFromInput()
|
||||||
|
patternDescriptor['notes'] = self.getNotesFromInput()
|
||||||
|
|
||||||
if self.__pattern is not None:
|
if self.__pattern is not None:
|
||||||
|
|
||||||
@@ -413,7 +471,11 @@ class PatternDetailsScreen(Screen):
|
|||||||
self.app.pop_screen()
|
self.app.pop_screen()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
patternId = self.__pc.addPattern(patternDescriptor)
|
patternId = self.__pc.savePatternSchema(
|
||||||
|
patternDescriptor,
|
||||||
|
trackDescriptors=self.__draftTracks,
|
||||||
|
mediaTags=self.__draftTags,
|
||||||
|
)
|
||||||
if patternId:
|
if patternId:
|
||||||
self.dismiss(patternDescriptor)
|
self.dismiss(patternDescriptor)
|
||||||
else:
|
else:
|
||||||
@@ -425,32 +487,51 @@ class PatternDetailsScreen(Screen):
|
|||||||
self.app.pop_screen()
|
self.app.pop_screen()
|
||||||
|
|
||||||
|
|
||||||
# Save pattern when just created before adding streams
|
numTracks = len(self.getCurrentTrackDescriptors())
|
||||||
if self.__pattern is not None:
|
|
||||||
|
|
||||||
numTracks = len(self.tracksTable.rows)
|
|
||||||
|
|
||||||
if event.button.id == "button_add_track":
|
if event.button.id == "button_add_track":
|
||||||
self.app.push_screen(TrackDetailsScreen(patternId = self.__pattern.getId(), index = numTracks), self.handle_add_track)
|
self.app.push_screen(
|
||||||
|
TrackDetailsScreen(
|
||||||
|
patternId=self.__pattern.getId() if self.__pattern is not None else None,
|
||||||
|
patternLabel=self.getPatternFromInput(),
|
||||||
|
siblingTrackDescriptors=self.getCurrentTrackDescriptors(),
|
||||||
|
index=numTracks,
|
||||||
|
),
|
||||||
|
self.handle_add_track,
|
||||||
|
)
|
||||||
|
|
||||||
selectedTrack = self.getSelectedTrackDescriptor()
|
selectedTrack = self.getSelectedTrackDescriptor()
|
||||||
if selectedTrack is not None:
|
if selectedTrack is not None:
|
||||||
if event.button.id == "button_edit_track":
|
if event.button.id == "button_edit_track":
|
||||||
self.app.push_screen(TrackDetailsScreen(trackDescriptor = selectedTrack), self.handle_edit_track)
|
self.app.push_screen(
|
||||||
|
TrackDetailsScreen(
|
||||||
|
trackDescriptor=selectedTrack,
|
||||||
|
patternId=self.__pattern.getId() if self.__pattern is not None else None,
|
||||||
|
patternLabel=self.getPatternFromInput(),
|
||||||
|
siblingTrackDescriptors=self.getCurrentTrackDescriptors(),
|
||||||
|
),
|
||||||
|
self.handle_edit_track,
|
||||||
|
)
|
||||||
if event.button.id == "button_delete_track":
|
if event.button.id == "button_delete_track":
|
||||||
self.app.push_screen(TrackDeleteScreen(trackDescriptor = selectedTrack), self.handle_delete_track)
|
self.app.push_screen(
|
||||||
|
TrackDeleteScreen(trackDescriptor = selectedTrack),
|
||||||
|
self.handle_delete_track,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if event.button.id == "button_add_tag":
|
if event.button.id == "button_add_tag":
|
||||||
if self.__pattern is not None:
|
|
||||||
self.app.push_screen(TagDetailsScreen(), self.handle_update_tag)
|
self.app.push_screen(TagDetailsScreen(), self.handle_update_tag)
|
||||||
|
|
||||||
if event.button.id == "button_edit_tag":
|
if event.button.id == "button_edit_tag":
|
||||||
tagKey, tagValue = self.getSelectedTag()
|
selectedTag = self.getSelectedTag()
|
||||||
|
if selectedTag is not None:
|
||||||
|
tagKey, tagValue = selectedTag
|
||||||
self.app.push_screen(TagDetailsScreen(key=tagKey, value=tagValue), self.handle_update_tag)
|
self.app.push_screen(TagDetailsScreen(key=tagKey, value=tagValue), self.handle_update_tag)
|
||||||
|
|
||||||
if event.button.id == "button_delete_tag":
|
if event.button.id == "button_delete_tag":
|
||||||
tagKey, tagValue = self.getSelectedTag()
|
selectedTag = self.getSelectedTag()
|
||||||
|
if selectedTag is not None:
|
||||||
|
tagKey, tagValue = selectedTag
|
||||||
self.app.push_screen(TagDeleteScreen(key=tagKey, value=tagValue), self.handle_delete_tag)
|
self.app.push_screen(TagDeleteScreen(key=tagKey, value=tagValue), self.handle_delete_tag)
|
||||||
|
|
||||||
|
|
||||||
@@ -468,6 +549,7 @@ class PatternDetailsScreen(Screen):
|
|||||||
if event.button.id == "button_track_up":
|
if event.button.id == "button_track_up":
|
||||||
|
|
||||||
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
||||||
|
if selectedTrackDescriptor is not None:
|
||||||
selectedTrackIndex = selectedTrackDescriptor.getIndex()
|
selectedTrackIndex = selectedTrackDescriptor.getIndex()
|
||||||
|
|
||||||
if selectedTrackIndex > 0 and selectedTrackIndex < self.tracksTable.row_count:
|
if selectedTrackIndex > 0 and selectedTrackIndex < self.tracksTable.row_count:
|
||||||
@@ -478,6 +560,7 @@ class PatternDetailsScreen(Screen):
|
|||||||
if event.button.id == "button_track_down":
|
if event.button.id == "button_track_down":
|
||||||
|
|
||||||
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
||||||
|
if selectedTrackDescriptor is not None:
|
||||||
selectedTrackIndex = selectedTrackDescriptor.getIndex()
|
selectedTrackIndex = selectedTrackDescriptor.getIndex()
|
||||||
|
|
||||||
if selectedTrackIndex >= 0 and selectedTrackIndex < (self.tracksTable.row_count - 1):
|
if selectedTrackIndex >= 0 and selectedTrackIndex < (self.tracksTable.row_count - 1):
|
||||||
@@ -486,63 +569,88 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
|
|
||||||
def handle_add_track(self, trackDescriptor : TrackDescriptor):
|
def handle_add_track(self, trackDescriptor : TrackDescriptor):
|
||||||
|
if trackDescriptor is None:
|
||||||
|
return
|
||||||
|
|
||||||
dispoSet = trackDescriptor.getDispositionSet()
|
if self.__pattern is not None:
|
||||||
trackType = trackDescriptor.getType()
|
self.__tc.addTrack(trackDescriptor, patternId=self.__pattern.getId())
|
||||||
index = trackDescriptor.getIndex()
|
else:
|
||||||
subIndex = trackDescriptor.getSubIndex()
|
self.__draftTracks.append(trackDescriptor)
|
||||||
codec = trackDescriptor.getCodec()
|
self.normalizeDraftTracks()
|
||||||
language = trackDescriptor.getLanguage()
|
|
||||||
title = trackDescriptor.getTitle()
|
|
||||||
|
|
||||||
row = (index,
|
self.updateTracks()
|
||||||
trackType.label(),
|
|
||||||
subIndex,
|
|
||||||
codec.label(),
|
|
||||||
language.label(),
|
|
||||||
title,
|
|
||||||
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
|
|
||||||
'Yes' if TrackDisposition.FORCED in dispoSet else 'No')
|
|
||||||
|
|
||||||
self.tracksTable.add_row(*map(str, row))
|
|
||||||
|
|
||||||
|
|
||||||
def handle_edit_track(self, trackDescriptor : TrackDescriptor):
|
def handle_edit_track(self, trackDescriptor : TrackDescriptor):
|
||||||
|
if trackDescriptor is None:
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
if self.__pattern is not None:
|
||||||
|
if not self.__tc.updateTrack(trackDescriptor.getId(), trackDescriptor):
|
||||||
|
raise click.ClickException("PatternDetailsScreen.handle_edit_track(): track update failed")
|
||||||
|
else:
|
||||||
|
selectedTrack = self.getSelectedTrackDescriptor()
|
||||||
|
for index, currentTrack in enumerate(self.__draftTracks):
|
||||||
|
if (selectedTrack is not None
|
||||||
|
and currentTrack.getIndex() == selectedTrack.getIndex()
|
||||||
|
and currentTrack.getSubIndex() == selectedTrack.getSubIndex()):
|
||||||
|
self.__draftTracks[index] = trackDescriptor
|
||||||
|
break
|
||||||
|
self.normalizeDraftTracks()
|
||||||
|
|
||||||
row_key, col_key = self.tracksTable.coordinate_to_cell_key(self.tracksTable.cursor_coordinate)
|
self.updateTracks()
|
||||||
|
|
||||||
self.tracksTable.update_cell(row_key, self.column_key_track_audio_layout,
|
|
||||||
trackDescriptor.getAudioLayout().label()
|
|
||||||
if trackDescriptor.getType() == TrackType.AUDIO else ' ')
|
|
||||||
|
|
||||||
self.tracksTable.update_cell(row_key, self.column_key_track_language, trackDescriptor.getLanguage().label())
|
|
||||||
self.tracksTable.update_cell(row_key, self.column_key_track_title, trackDescriptor.getTitle())
|
|
||||||
self.tracksTable.update_cell(row_key, self.column_key_track_default, 'Yes' if TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet() else 'No')
|
|
||||||
self.tracksTable.update_cell(row_key, self.column_key_track_forced, 'Yes' if TrackDisposition.FORCED in trackDescriptor.getDispositionSet() else 'No')
|
|
||||||
|
|
||||||
except CellDoesNotExist:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def handle_delete_track(self, trackDescriptor : TrackDescriptor):
|
def handle_delete_track(self, trackDescriptor : TrackDescriptor):
|
||||||
|
if trackDescriptor is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.__pattern is not None:
|
||||||
|
track = self.__tc.getTrack(trackDescriptor.getPatternId(), trackDescriptor.getIndex())
|
||||||
|
|
||||||
|
if track is None:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Track is none: patternId={trackDescriptor.getPatternId()} type={trackDescriptor.getType()} subIndex={trackDescriptor.getSubIndex()}"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.__tc.deleteTrack(track.getId())
|
||||||
|
else:
|
||||||
|
self.__draftTracks = [
|
||||||
|
currentTrack
|
||||||
|
for currentTrack in self.__draftTracks
|
||||||
|
if not (
|
||||||
|
currentTrack.getIndex() == trackDescriptor.getIndex()
|
||||||
|
and currentTrack.getSubIndex() == trackDescriptor.getSubIndex()
|
||||||
|
)
|
||||||
|
]
|
||||||
|
self.normalizeDraftTracks()
|
||||||
|
|
||||||
self.updateTracks()
|
self.updateTracks()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def handle_update_tag(self, tag):
|
def handle_update_tag(self, tag):
|
||||||
|
if tag is None:
|
||||||
|
return
|
||||||
|
|
||||||
if self.__pattern is None:
|
if self.__pattern is None:
|
||||||
raise click.ClickException(f"PatternDetailsScreen.handle_update_tag: pattern not set")
|
self.__draftTags[str(tag[0])] = str(tag[1])
|
||||||
|
else:
|
||||||
|
if self.__tac.updateMediaTag(self.__pattern.getId(), tag[0], tag[1]) is None:
|
||||||
|
raise click.ClickException("PatternDetailsScreen.handle_update_tag(): tag update failed")
|
||||||
|
|
||||||
if self.__tac.updateMediaTag(self.__pattern.getId(), tag[0], tag[1]) is not None:
|
|
||||||
self.updateTags()
|
self.updateTags()
|
||||||
|
|
||||||
def handle_delete_tag(self, tag):
|
def handle_delete_tag(self, tag):
|
||||||
|
if tag is None:
|
||||||
|
return
|
||||||
|
|
||||||
if self.__pattern is None:
|
if self.__pattern is None:
|
||||||
raise click.ClickException(f"PatternDetailsScreen.handle_delete_tag: pattern not set")
|
self.__draftTags.pop(str(tag[0]), None)
|
||||||
|
self.updateTags()
|
||||||
|
return
|
||||||
|
|
||||||
if self.__tac.deleteMediaTagByKey(self.__pattern.getId(), tag[0]):
|
if self.__tac.deleteMediaTagByKey(self.__pattern.getId(), tag[0]):
|
||||||
self.updateTags()
|
self.updateTags()
|
||||||
|
else:
|
||||||
|
raise click.ClickException('tag delete failed')
|
||||||
|
|||||||
@@ -1,39 +1,169 @@
|
|||||||
import subprocess, logging
|
import os
|
||||||
from typing import List
|
import shlex
|
||||||
|
import subprocess
|
||||||
|
from typing import Iterable, List
|
||||||
|
|
||||||
def executeProcess(commandSequence: List[str], directory: str = None, context: dict = None):
|
from .logging_utils import get_ffx_logger
|
||||||
|
|
||||||
|
COMMAND_TIMED_OUT_RETURN_CODE = 124
|
||||||
|
COMMAND_NOT_FOUND_RETURN_CODE = 127
|
||||||
|
MIN_NICENESS = -20
|
||||||
|
MAX_NICENESS = 19
|
||||||
|
DISABLED_NICENESS_SENTINEL = 99
|
||||||
|
DISABLED_CPU_PERCENT_SENTINEL = 0
|
||||||
|
MIN_CPU_PERCENT = 1
|
||||||
|
MAX_CPU_PERCENT = 100
|
||||||
|
|
||||||
|
|
||||||
|
def formatCommandSequence(commandSequence: Iterable[str]) -> str:
|
||||||
|
return shlex.join([str(token) for token in commandSequence])
|
||||||
|
|
||||||
|
|
||||||
|
def normalizeNiceness(niceness) -> int | None:
|
||||||
|
if niceness is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
niceness = int(niceness)
|
||||||
|
if niceness == DISABLED_NICENESS_SENTINEL:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if niceness < MIN_NICENESS or niceness > MAX_NICENESS:
|
||||||
|
raise ValueError(
|
||||||
|
f"Niceness must be between {MIN_NICENESS} and {MAX_NICENESS}, "
|
||||||
|
+ f"or {DISABLED_NICENESS_SENTINEL} to disable."
|
||||||
|
)
|
||||||
|
|
||||||
|
return niceness
|
||||||
|
|
||||||
|
|
||||||
|
def getPresentCpuCount() -> int:
|
||||||
|
if hasattr(os, 'sched_getaffinity'):
|
||||||
|
affinity = os.sched_getaffinity(0)
|
||||||
|
if affinity:
|
||||||
|
return len(affinity)
|
||||||
|
|
||||||
|
cpuCount = os.cpu_count()
|
||||||
|
return cpuCount if cpuCount and cpuCount > 0 else 1
|
||||||
|
|
||||||
|
|
||||||
|
def normalizeCpuPercent(cpuPercent) -> int | None:
|
||||||
|
if cpuPercent is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
cpuPercent = str(cpuPercent).strip()
|
||||||
|
if cpuPercent.endswith('%'):
|
||||||
|
percentValue = int(cpuPercent[:-1].strip())
|
||||||
|
if percentValue == DISABLED_CPU_PERCENT_SENTINEL:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if percentValue < MIN_CPU_PERCENT or percentValue > MAX_CPU_PERCENT:
|
||||||
|
raise ValueError(
|
||||||
|
f"CPU percentage must be between {MIN_CPU_PERCENT}% and {MAX_CPU_PERCENT}%, "
|
||||||
|
+ f"or {DISABLED_CPU_PERCENT_SENTINEL} to disable."
|
||||||
|
)
|
||||||
|
|
||||||
|
return percentValue * getPresentCpuCount()
|
||||||
|
|
||||||
|
cpuPercent = int(cpuPercent)
|
||||||
|
if cpuPercent == DISABLED_CPU_PERCENT_SENTINEL:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if cpuPercent < MIN_CPU_PERCENT:
|
||||||
|
raise ValueError(
|
||||||
|
"CPU limit must be a positive absolute value such as 200, "
|
||||||
|
+ f"a percentage such as 25%, or {DISABLED_CPU_PERCENT_SENTINEL} to disable."
|
||||||
|
)
|
||||||
|
|
||||||
|
return cpuPercent
|
||||||
|
|
||||||
|
|
||||||
|
def getWrappedCommandSequence(commandSequence: List[str], context: dict = None) -> List[str]:
|
||||||
"""
|
"""
|
||||||
niceness -20 bis +19
|
niceness: -20 to 19, disabled when unset
|
||||||
cpu_percent: 1 bis 99
|
cpu limit: positive absolute cpulimit value, or a machine-wide percentage
|
||||||
|
|
||||||
|
When both limits are configured, cpulimit wraps a nice-adjusted command:
|
||||||
|
cpulimit -l <cpu> -- nice -n <niceness> <command>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if context is None:
|
resourceLimits = (context or {}).get('resource_limits', {})
|
||||||
logger = logging.getLogger('FFX')
|
niceness = normalizeNiceness(resourceLimits.get('niceness'))
|
||||||
logger.addHandler(logging.NullHandler())
|
cpu_percent = normalizeCpuPercent(
|
||||||
else:
|
resourceLimits.get('cpu_limit', resourceLimits.get('cpu_percent'))
|
||||||
logger = context['logger']
|
)
|
||||||
|
wrappedCommandSequence = [str(token) for token in commandSequence]
|
||||||
|
|
||||||
niceSequence = []
|
if niceness is not None:
|
||||||
|
wrappedCommandSequence = ['nice', '-n', str(niceness)] + wrappedCommandSequence
|
||||||
|
if cpu_percent is not None:
|
||||||
|
wrappedCommandSequence = ['cpulimit', '-l', str(cpu_percent), '--'] + wrappedCommandSequence
|
||||||
|
|
||||||
niceness = (int(context['resource_limits']['niceness'])
|
return wrappedCommandSequence
|
||||||
if not context is None
|
|
||||||
and 'resource_limits' in context.keys()
|
|
||||||
and 'niceness' in context['resource_limits'].keys() else 99)
|
|
||||||
cpu_percent = (int(context['resource_limits']['cpu_percent'])
|
|
||||||
if not context is None
|
|
||||||
and 'resource_limits' in context.keys()
|
|
||||||
and 'cpu_percent' in context['resource_limits'].keys() else 0)
|
|
||||||
|
|
||||||
if niceness >= -20 and niceness <= 19:
|
|
||||||
niceSequence += ['nice', '-n', str(niceness)]
|
|
||||||
if cpu_percent >= 1:
|
|
||||||
niceSequence += ['cpulimit', '-l', str(cpu_percent), '--']
|
|
||||||
|
|
||||||
niceCommand = niceSequence + commandSequence
|
def getProcessTimeoutSeconds(context: dict = None, timeoutSeconds: float = None):
|
||||||
|
if timeoutSeconds is None:
|
||||||
|
timeoutSeconds = (context or {}).get('resource_limits', {}).get('timeout_seconds')
|
||||||
|
|
||||||
logger.debug(f"executeProcess() command sequence: {' '.join(niceCommand)}")
|
if timeoutSeconds is None:
|
||||||
|
return None
|
||||||
|
|
||||||
process = subprocess.Popen(niceCommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8', cwd = directory)
|
timeoutSeconds = float(timeoutSeconds)
|
||||||
output, error = process.communicate()
|
|
||||||
|
|
||||||
return output, error, process.returncode
|
return timeoutSeconds if timeoutSeconds > 0 else None
|
||||||
|
|
||||||
|
|
||||||
|
def executeProcess(
|
||||||
|
commandSequence: List[str],
|
||||||
|
directory: str = None,
|
||||||
|
context: dict = None,
|
||||||
|
timeoutSeconds: float = None,
|
||||||
|
):
|
||||||
|
|
||||||
|
logger = context['logger'] if context is not None and 'logger' in context else get_ffx_logger()
|
||||||
|
wrappedCommandSequence = getWrappedCommandSequence(commandSequence, context=context)
|
||||||
|
timeoutSeconds = getProcessTimeoutSeconds(context=context, timeoutSeconds=timeoutSeconds)
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
"executeProcess() cwd=%s timeout=%s command=%s",
|
||||||
|
directory or '.',
|
||||||
|
timeoutSeconds if timeoutSeconds is not None else 'none',
|
||||||
|
formatCommandSequence(wrappedCommandSequence),
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
completed = subprocess.run(
|
||||||
|
wrappedCommandSequence,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
cwd=directory,
|
||||||
|
timeout=timeoutSeconds,
|
||||||
|
check=False,
|
||||||
|
)
|
||||||
|
except FileNotFoundError as ex:
|
||||||
|
error = (
|
||||||
|
"Command not found while running "
|
||||||
|
+ f"{formatCommandSequence(wrappedCommandSequence)}: {ex.filename or ex}"
|
||||||
|
)
|
||||||
|
logger.error(error)
|
||||||
|
return '', error, COMMAND_NOT_FOUND_RETURN_CODE
|
||||||
|
except subprocess.TimeoutExpired as ex:
|
||||||
|
stdout = ex.stdout or ''
|
||||||
|
stderr = ex.stderr or ''
|
||||||
|
error = (
|
||||||
|
f"Command timed out after {timeoutSeconds} seconds while running "
|
||||||
|
+ formatCommandSequence(wrappedCommandSequence)
|
||||||
|
)
|
||||||
|
if stderr:
|
||||||
|
error = f"{error}\n{stderr}"
|
||||||
|
logger.error(error)
|
||||||
|
return stdout, error, COMMAND_TIMED_OUT_RETURN_CODE
|
||||||
|
|
||||||
|
if completed.returncode != 0:
|
||||||
|
logger.warning(
|
||||||
|
"executeProcess() rc=%s command=%s",
|
||||||
|
completed.returncode,
|
||||||
|
formatCommandSequence(wrappedCommandSequence),
|
||||||
|
)
|
||||||
|
|
||||||
|
return completed.stdout, completed.stderr, completed.returncode
|
||||||
|
|||||||
65
src/ffx/screen_support.py
Normal file
65
src/ffx/screen_support.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
from .pattern_controller import PatternController
|
||||||
|
from .show_controller import ShowController
|
||||||
|
from .shifted_season_controller import ShiftedSeasonController
|
||||||
|
from .tag_controller import TagController
|
||||||
|
from .tmdb_controller import TmdbController
|
||||||
|
from .track_controller import TrackController
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class ScreenBootstrap:
|
||||||
|
context: dict
|
||||||
|
configuration_data: dict
|
||||||
|
signature_tags: dict
|
||||||
|
remove_global_keys: list
|
||||||
|
ignore_global_keys: list
|
||||||
|
remove_track_keys: list
|
||||||
|
ignore_track_keys: list
|
||||||
|
|
||||||
|
|
||||||
|
def build_screen_bootstrap(context: dict) -> ScreenBootstrap:
|
||||||
|
configurationData = context['config'].getData()
|
||||||
|
metadataConfiguration = configurationData.get('metadata', {})
|
||||||
|
streamMetadataConfiguration = metadataConfiguration.get('streams', {})
|
||||||
|
|
||||||
|
return ScreenBootstrap(
|
||||||
|
context=context,
|
||||||
|
configuration_data=configurationData,
|
||||||
|
signature_tags=metadataConfiguration.get('signature', {}),
|
||||||
|
remove_global_keys=metadataConfiguration.get('remove', []),
|
||||||
|
ignore_global_keys=metadataConfiguration.get('ignore', []),
|
||||||
|
remove_track_keys=streamMetadataConfiguration.get('remove', []),
|
||||||
|
ignore_track_keys=streamMetadataConfiguration.get('ignore', []),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def build_screen_controllers(
|
||||||
|
context: dict,
|
||||||
|
*,
|
||||||
|
pattern: bool = False,
|
||||||
|
show: bool = False,
|
||||||
|
track: bool = False,
|
||||||
|
tag: bool = False,
|
||||||
|
tmdb: bool = False,
|
||||||
|
shifted_season: bool = False,
|
||||||
|
) -> dict[str, object]:
|
||||||
|
controllers = {}
|
||||||
|
|
||||||
|
if pattern:
|
||||||
|
controllers['pattern'] = PatternController(context=context)
|
||||||
|
if show:
|
||||||
|
controllers['show'] = ShowController(context=context)
|
||||||
|
if track:
|
||||||
|
controllers['track'] = TrackController(context=context)
|
||||||
|
if tag:
|
||||||
|
controllers['tag'] = TagController(context=context)
|
||||||
|
if tmdb:
|
||||||
|
controllers['tmdb'] = TmdbController()
|
||||||
|
if shifted_season:
|
||||||
|
controllers['shifted_season'] = ShiftedSeasonController(context=context)
|
||||||
|
|
||||||
|
return controllers
|
||||||
@@ -18,9 +18,16 @@ class ShiftedSeasonController():
|
|||||||
self.Session = self.context['database']['session'] # convenience
|
self.Session = self.context['database']['session'] # convenience
|
||||||
|
|
||||||
def checkShiftedSeason(self, showId: int, shiftedSeasonObj: dict, shiftedSeasonId: int = 0):
|
def checkShiftedSeason(self, showId: int, shiftedSeasonObj: dict, shiftedSeasonId: int = 0):
|
||||||
|
"""
|
||||||
|
Check if for a particula season
|
||||||
|
|
||||||
|
shiftedSeasonId
|
||||||
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
|
|
||||||
|
originalSeason = shiftedSeasonObj['original_season']
|
||||||
firstEpisode = int(shiftedSeasonObj['first_episode'])
|
firstEpisode = int(shiftedSeasonObj['first_episode'])
|
||||||
lastEpisode = int(shiftedSeasonObj['last_episode'])
|
lastEpisode = int(shiftedSeasonObj['last_episode'])
|
||||||
|
|
||||||
@@ -31,11 +38,14 @@ class ShiftedSeasonController():
|
|||||||
siblingShiftedSeason: ShiftedSeason
|
siblingShiftedSeason: ShiftedSeason
|
||||||
for siblingShiftedSeason in q.all():
|
for siblingShiftedSeason in q.all():
|
||||||
|
|
||||||
|
siblingOriginalSeason = siblingShiftedSeason.getOriginalSeason
|
||||||
siblingFirstEpisode = siblingShiftedSeason.getFirstEpisode()
|
siblingFirstEpisode = siblingShiftedSeason.getFirstEpisode()
|
||||||
siblingLastEpisode = siblingShiftedSeason.getLastEpisode()
|
siblingLastEpisode = siblingShiftedSeason.getLastEpisode()
|
||||||
|
|
||||||
if (lastEpisode >= siblingFirstEpisode
|
if (originalSeason == siblingOriginalSeason
|
||||||
|
and lastEpisode >= siblingFirstEpisode
|
||||||
and siblingLastEpisode >= firstEpisode):
|
and siblingLastEpisode >= firstEpisode):
|
||||||
|
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@@ -91,11 +101,9 @@ class ShiftedSeasonController():
|
|||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
|
|
||||||
q = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId))
|
shiftedSeason = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId)).first()
|
||||||
|
|
||||||
if q.count():
|
if shiftedSeason is not None:
|
||||||
|
|
||||||
shiftedSeason = q.first()
|
|
||||||
|
|
||||||
shiftedSeason.original_season = int(shiftedSeasonObj['original_season'])
|
shiftedSeason.original_season = int(shiftedSeasonObj['original_season'])
|
||||||
shiftedSeason.first_episode = int(shiftedSeasonObj['first_episode'])
|
shiftedSeason.first_episode = int(shiftedSeasonObj['first_episode'])
|
||||||
@@ -131,12 +139,14 @@ class ShiftedSeasonController():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(ShiftedSeason).filter(ShiftedSeason.show_id == int(showId),
|
shiftedSeason = s.query(ShiftedSeason).filter(
|
||||||
|
ShiftedSeason.show_id == int(showId),
|
||||||
ShiftedSeason.original_season == int(originalSeason),
|
ShiftedSeason.original_season == int(originalSeason),
|
||||||
ShiftedSeason.first_episode == int(firstEpisode),
|
ShiftedSeason.first_episode == int(firstEpisode),
|
||||||
ShiftedSeason.last_episode == int(lastEpisode))
|
ShiftedSeason.last_episode == int(lastEpisode),
|
||||||
|
).first()
|
||||||
|
|
||||||
return q.first().getId() if q.count() else None
|
return shiftedSeason.getId() if shiftedSeason is not None else None
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"PatternController.findShiftedSeason(): {repr(ex)}")
|
raise click.ClickException(f"PatternController.findShiftedSeason(): {repr(ex)}")
|
||||||
@@ -167,9 +177,7 @@ class ShiftedSeasonController():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId))
|
return s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId)).first()
|
||||||
|
|
||||||
return q.first() if q.count() else None
|
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"ShiftedSeasonController.getShiftedSeason(): {repr(ex)}")
|
raise click.ClickException(f"ShiftedSeasonController.getShiftedSeason(): {repr(ex)}")
|
||||||
@@ -184,13 +192,12 @@ class ShiftedSeasonController():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId))
|
shiftedSeason = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId)).first()
|
||||||
|
|
||||||
if q.count():
|
if shiftedSeason is not None:
|
||||||
|
|
||||||
#DAFUQ: https://stackoverflow.com/a/19245058
|
#DAFUQ: https://stackoverflow.com/a/19245058
|
||||||
# q.delete()
|
# q.delete()
|
||||||
shiftedSeason = q.first()
|
|
||||||
s.delete(shiftedSeason)
|
s.delete(shiftedSeason)
|
||||||
|
|
||||||
s.commit()
|
s.commit()
|
||||||
|
|||||||
@@ -16,10 +16,9 @@ class ShowController():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(Show).filter(Show.id == showId)
|
show = s.query(Show).filter(Show.id == showId).first()
|
||||||
|
|
||||||
if q.count():
|
if show is not None:
|
||||||
show: Show = q.first()
|
|
||||||
return show.getDescriptor(self.context)
|
return show.getDescriptor(self.context)
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
@@ -31,9 +30,7 @@ class ShowController():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(Show).filter(Show.id == showId)
|
return s.query(Show).filter(Show.id == showId).first()
|
||||||
|
|
||||||
return q.first() if q.count() else None
|
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"ShowController.getShow(): {repr(ex)}")
|
raise click.ClickException(f"ShowController.getShow(): {repr(ex)}")
|
||||||
@@ -44,12 +41,7 @@ class ShowController():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(Show)
|
return s.query(Show).all()
|
||||||
|
|
||||||
if q.count():
|
|
||||||
return q.all()
|
|
||||||
else:
|
|
||||||
return []
|
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"ShowController.getAllShows(): {repr(ex)}")
|
raise click.ClickException(f"ShowController.getAllShows(): {repr(ex)}")
|
||||||
@@ -61,9 +53,9 @@ class ShowController():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(Show).filter(Show.id == showDescriptor.getId())
|
currentShow = s.query(Show).filter(Show.id == showDescriptor.getId()).first()
|
||||||
|
|
||||||
if not q.count():
|
if currentShow is None:
|
||||||
show = Show(id = int(showDescriptor.getId()),
|
show = Show(id = int(showDescriptor.getId()),
|
||||||
name = str(showDescriptor.getName()),
|
name = str(showDescriptor.getName()),
|
||||||
year = int(showDescriptor.getYear()),
|
year = int(showDescriptor.getYear()),
|
||||||
@@ -76,9 +68,6 @@ class ShowController():
|
|||||||
s.commit()
|
s.commit()
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
|
|
||||||
currentShow = q.first()
|
|
||||||
|
|
||||||
changed = False
|
changed = False
|
||||||
if currentShow.name != str(showDescriptor.getName()):
|
if currentShow.name != str(showDescriptor.getName()):
|
||||||
currentShow.name = str(showDescriptor.getName())
|
currentShow.name = str(showDescriptor.getName())
|
||||||
@@ -113,14 +102,12 @@ class ShowController():
|
|||||||
def deleteShow(self, show_id):
|
def deleteShow(self, show_id):
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(Show).filter(Show.id == int(show_id))
|
show = s.query(Show).filter(Show.id == int(show_id)).first()
|
||||||
|
|
||||||
|
if show is not None:
|
||||||
if q.count():
|
|
||||||
|
|
||||||
#DAFUQ: https://stackoverflow.com/a/19245058
|
#DAFUQ: https://stackoverflow.com/a/19245058
|
||||||
# q.delete()
|
# q.delete()
|
||||||
show = q.first()
|
|
||||||
s.delete(show)
|
s.delete(show)
|
||||||
|
|
||||||
s.commit()
|
s.commit()
|
||||||
|
|||||||
@@ -1,4 +1,11 @@
|
|||||||
import logging
|
from .configuration_controller import ConfigurationController
|
||||||
|
from .constants import (
|
||||||
|
DEFAULT_SHOW_INDEX_EPISODE_DIGITS,
|
||||||
|
DEFAULT_SHOW_INDEX_SEASON_DIGITS,
|
||||||
|
DEFAULT_SHOW_INDICATOR_EPISODE_DIGITS,
|
||||||
|
DEFAULT_SHOW_INDICATOR_SEASON_DIGITS,
|
||||||
|
)
|
||||||
|
from .logging_utils import get_ffx_logger
|
||||||
|
|
||||||
|
|
||||||
class ShowDescriptor():
|
class ShowDescriptor():
|
||||||
@@ -15,10 +22,42 @@ class ShowDescriptor():
|
|||||||
INDICATOR_SEASON_DIGITS_KEY = 'indicator_season_digits'
|
INDICATOR_SEASON_DIGITS_KEY = 'indicator_season_digits'
|
||||||
INDICATOR_EPISODE_DIGITS_KEY = 'indicator_episode_digits'
|
INDICATOR_EPISODE_DIGITS_KEY = 'indicator_episode_digits'
|
||||||
|
|
||||||
DEFAULT_INDEX_SEASON_DIGITS = 2
|
DEFAULT_INDEX_SEASON_DIGITS = DEFAULT_SHOW_INDEX_SEASON_DIGITS
|
||||||
DEFAULT_INDEX_EPISODE_DIGITS = 2
|
DEFAULT_INDEX_EPISODE_DIGITS = DEFAULT_SHOW_INDEX_EPISODE_DIGITS
|
||||||
DEFAULT_INDICATOR_SEASON_DIGITS = 2
|
DEFAULT_INDICATOR_SEASON_DIGITS = DEFAULT_SHOW_INDICATOR_SEASON_DIGITS
|
||||||
DEFAULT_INDICATOR_EPISODE_DIGITS = 2
|
DEFAULT_INDICATOR_EPISODE_DIGITS = DEFAULT_SHOW_INDICATOR_EPISODE_DIGITS
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def getDefaultDigitLengths(cls, context: dict | None = None) -> dict[str, int]:
|
||||||
|
configurationData = {}
|
||||||
|
|
||||||
|
if context is not None:
|
||||||
|
configController = context.get('config')
|
||||||
|
if configController is not None and hasattr(configController, 'getData'):
|
||||||
|
configurationData = configController.getData()
|
||||||
|
|
||||||
|
return {
|
||||||
|
cls.INDEX_SEASON_DIGITS_KEY: ConfigurationController.getConfiguredIntegerValue(
|
||||||
|
configurationData,
|
||||||
|
ConfigurationController.DEFAULT_INDEX_SEASON_DIGITS_CONFIG_KEY,
|
||||||
|
cls.DEFAULT_INDEX_SEASON_DIGITS,
|
||||||
|
),
|
||||||
|
cls.INDEX_EPISODE_DIGITS_KEY: ConfigurationController.getConfiguredIntegerValue(
|
||||||
|
configurationData,
|
||||||
|
ConfigurationController.DEFAULT_INDEX_EPISODE_DIGITS_CONFIG_KEY,
|
||||||
|
cls.DEFAULT_INDEX_EPISODE_DIGITS,
|
||||||
|
),
|
||||||
|
cls.INDICATOR_SEASON_DIGITS_KEY: ConfigurationController.getConfiguredIntegerValue(
|
||||||
|
configurationData,
|
||||||
|
ConfigurationController.DEFAULT_INDICATOR_SEASON_DIGITS_CONFIG_KEY,
|
||||||
|
cls.DEFAULT_INDICATOR_SEASON_DIGITS,
|
||||||
|
),
|
||||||
|
cls.INDICATOR_EPISODE_DIGITS_KEY: ConfigurationController.getConfiguredIntegerValue(
|
||||||
|
configurationData,
|
||||||
|
ConfigurationController.DEFAULT_INDICATOR_EPISODE_DIGITS_CONFIG_KEY,
|
||||||
|
cls.DEFAULT_INDICATOR_EPISODE_DIGITS,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
@@ -32,8 +71,7 @@ class ShowDescriptor():
|
|||||||
self.__logger = self.__context['logger']
|
self.__logger = self.__context['logger']
|
||||||
else:
|
else:
|
||||||
self.__context = {}
|
self.__context = {}
|
||||||
self.__logger = logging.getLogger('FFX')
|
self.__logger = get_ffx_logger()
|
||||||
self.__logger.addHandler(logging.NullHandler())
|
|
||||||
|
|
||||||
if ShowDescriptor.ID_KEY in kwargs.keys():
|
if ShowDescriptor.ID_KEY in kwargs.keys():
|
||||||
if type(kwargs[ShowDescriptor.ID_KEY]) is not int:
|
if type(kwargs[ShowDescriptor.ID_KEY]) is not int:
|
||||||
@@ -56,34 +94,35 @@ class ShowDescriptor():
|
|||||||
else:
|
else:
|
||||||
self.__showYear = -1
|
self.__showYear = -1
|
||||||
|
|
||||||
|
defaultDigitLengths = self.getDefaultDigitLengths(self.__context)
|
||||||
|
|
||||||
if ShowDescriptor.INDEX_SEASON_DIGITS_KEY in kwargs.keys():
|
if ShowDescriptor.INDEX_SEASON_DIGITS_KEY in kwargs.keys():
|
||||||
if type(kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]) is not int:
|
if type(kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]) is not int:
|
||||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDEX_SEASON_DIGITS_KEY} is required to be of type int")
|
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDEX_SEASON_DIGITS_KEY} is required to be of type int")
|
||||||
self.__indexSeasonDigits = kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]
|
self.__indexSeasonDigits = kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]
|
||||||
else:
|
else:
|
||||||
self.__indexSeasonDigits = ShowDescriptor.DEFAULT_INDEX_SEASON_DIGITS
|
self.__indexSeasonDigits = defaultDigitLengths[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]
|
||||||
|
|
||||||
if ShowDescriptor.INDEX_EPISODE_DIGITS_KEY in kwargs.keys():
|
if ShowDescriptor.INDEX_EPISODE_DIGITS_KEY in kwargs.keys():
|
||||||
if type(kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]) is not int:
|
if type(kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]) is not int:
|
||||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDEX_EPISODE_DIGITS_KEY} is required to be of type int")
|
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDEX_EPISODE_DIGITS_KEY} is required to be of type int")
|
||||||
self.__indexEpisodeDigits = kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]
|
self.__indexEpisodeDigits = kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]
|
||||||
else:
|
else:
|
||||||
self.__indexEpisodeDigits = ShowDescriptor.DEFAULT_INDEX_EPISODE_DIGITS
|
self.__indexEpisodeDigits = defaultDigitLengths[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]
|
||||||
|
|
||||||
if ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY in kwargs.keys():
|
if ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY in kwargs.keys():
|
||||||
if type(kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]) is not int:
|
if type(kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]) is not int:
|
||||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY} is required to be of type int")
|
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY} is required to be of type int")
|
||||||
self.__indicatorSeasonDigits = kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]
|
self.__indicatorSeasonDigits = kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]
|
||||||
else:
|
else:
|
||||||
self.__indicatorSeasonDigits = ShowDescriptor.DEFAULT_INDICATOR_SEASON_DIGITS
|
self.__indicatorSeasonDigits = defaultDigitLengths[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]
|
||||||
|
|
||||||
if ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY in kwargs.keys():
|
if ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY in kwargs.keys():
|
||||||
if type(kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]) is not int:
|
if type(kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]) is not int:
|
||||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY} is required to be of type int")
|
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY} is required to be of type int")
|
||||||
self.__indicatorEpisodeDigits = kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]
|
self.__indicatorEpisodeDigits = kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]
|
||||||
else:
|
else:
|
||||||
self.__indicatorEpisodeDigits = ShowDescriptor.DEFAULT_INDICATOR_EPISODE_DIGITS
|
self.__indicatorEpisodeDigits = defaultDigitLengths[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]
|
||||||
|
|
||||||
|
|
||||||
def getId(self):
|
def getId(self):
|
||||||
|
|||||||
@@ -5,16 +5,9 @@ from textual.widgets import Header, Footer, Static, Button, DataTable, Input
|
|||||||
from textual.containers import Grid
|
from textual.containers import Grid
|
||||||
from textual.widgets._data_table import CellDoesNotExist
|
from textual.widgets._data_table import CellDoesNotExist
|
||||||
|
|
||||||
from ffx.model.pattern import Pattern
|
|
||||||
|
|
||||||
from .pattern_details_screen import PatternDetailsScreen
|
from .pattern_details_screen import PatternDetailsScreen
|
||||||
from .pattern_delete_screen import PatternDeleteScreen
|
from .pattern_delete_screen import PatternDeleteScreen
|
||||||
|
|
||||||
from .show_controller import ShowController
|
|
||||||
from .pattern_controller import PatternController
|
|
||||||
from .tmdb_controller import TmdbController
|
|
||||||
from .shifted_season_controller import ShiftedSeasonController
|
|
||||||
|
|
||||||
from .show_descriptor import ShowDescriptor
|
from .show_descriptor import ShowDescriptor
|
||||||
|
|
||||||
from .shifted_season_details_screen import ShiftedSeasonDetailsScreen
|
from .shifted_season_details_screen import ShiftedSeasonDetailsScreen
|
||||||
@@ -23,6 +16,7 @@ from .shifted_season_delete_screen import ShiftedSeasonDeleteScreen
|
|||||||
from ffx.model.shifted_season import ShiftedSeason
|
from ffx.model.shifted_season import ShiftedSeason
|
||||||
|
|
||||||
from .helper import filterFilename
|
from .helper import filterFilename
|
||||||
|
from .screen_support import build_screen_bootstrap, build_screen_controllers
|
||||||
|
|
||||||
|
|
||||||
# Screen[dict[int, str, int]]
|
# Screen[dict[int, str, int]]
|
||||||
@@ -94,31 +88,24 @@ class ShowDetailsScreen(Screen):
|
|||||||
def __init__(self, showId = None):
|
def __init__(self, showId = None):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
self.context = self.app.getContext()
|
bootstrap = build_screen_bootstrap(self.app.getContext())
|
||||||
self.Session = self.context['database']['session'] # convenience
|
self.context = bootstrap.context
|
||||||
|
|
||||||
self.__sc = ShowController(context = self.context)
|
controllers = build_screen_controllers(
|
||||||
self.__pc = PatternController(context = self.context)
|
self.context,
|
||||||
self.__tc = TmdbController()
|
pattern=True,
|
||||||
self.__ssc = ShiftedSeasonController(context = self.context)
|
show=True,
|
||||||
|
tmdb=True,
|
||||||
|
shifted_season=True,
|
||||||
|
)
|
||||||
|
self.__sc = controllers['show']
|
||||||
|
self.__pc = controllers['pattern']
|
||||||
|
self.__tc = controllers['tmdb']
|
||||||
|
self.__ssc = controllers['shifted_season']
|
||||||
|
|
||||||
self.__showDescriptor = self.__sc.getShowDescriptor(showId) if showId is not None else None
|
self.__showDescriptor = self.__sc.getShowDescriptor(showId) if showId is not None else None
|
||||||
|
|
||||||
|
|
||||||
def loadPatterns(self, show_id : int):
|
|
||||||
|
|
||||||
try:
|
|
||||||
s = self.Session()
|
|
||||||
q = s.query(Pattern).filter(Pattern.show_id == int(show_id))
|
|
||||||
|
|
||||||
return [{'id': int(p.id), 'pattern': str(p.pattern)} for p in q.all()]
|
|
||||||
|
|
||||||
except Exception as ex:
|
|
||||||
raise click.ClickException(f"ShowDetailsScreen.loadPatterns(): {repr(ex)}")
|
|
||||||
finally:
|
|
||||||
s.close()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def updateShiftedSeasons(self):
|
def updateShiftedSeasons(self):
|
||||||
|
|
||||||
@@ -166,20 +153,27 @@ class ShowDetailsScreen(Screen):
|
|||||||
|
|
||||||
|
|
||||||
#raise click.ClickException(f"show_id {showId}")
|
#raise click.ClickException(f"show_id {showId}")
|
||||||
patternList = self.loadPatterns(showId)
|
for pattern in self.__pc.getPatternsForShow(showId):
|
||||||
# raise click.ClickException(f"patternList {patternList}")
|
row = (pattern.getPattern(),)
|
||||||
for pattern in patternList:
|
|
||||||
row = (pattern['pattern'],)
|
|
||||||
self.patternTable.add_row(*map(str, row))
|
self.patternTable.add_row(*map(str, row))
|
||||||
|
|
||||||
self.updateShiftedSeasons()
|
self.updateShiftedSeasons()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
defaultDigitLengths = ShowDescriptor.getDefaultDigitLengths(self.context)
|
||||||
|
|
||||||
self.query_one("#index_season_digits_input", Input).value = "2"
|
self.query_one("#index_season_digits_input", Input).value = str(
|
||||||
self.query_one("#index_episode_digits_input", Input).value = "2"
|
defaultDigitLengths[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]
|
||||||
self.query_one("#indicator_season_digits_input", Input).value = "2"
|
)
|
||||||
self.query_one("#indicator_episode_digits_input", Input).value = "2"
|
self.query_one("#index_episode_digits_input", Input).value = str(
|
||||||
|
defaultDigitLengths[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]
|
||||||
|
)
|
||||||
|
self.query_one("#indicator_season_digits_input", Input).value = str(
|
||||||
|
defaultDigitLengths[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]
|
||||||
|
)
|
||||||
|
self.query_one("#indicator_episode_digits_input", Input).value = str(
|
||||||
|
defaultDigitLengths[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def getSelectedPatternDescriptor(self):
|
def getSelectedPatternDescriptor(self):
|
||||||
@@ -402,7 +396,7 @@ class ShowDetailsScreen(Screen):
|
|||||||
|
|
||||||
def getShowDescriptorFromInput(self) -> ShowDescriptor:
|
def getShowDescriptorFromInput(self) -> ShowDescriptor:
|
||||||
|
|
||||||
kwargs = {}
|
kwargs = {ShowDescriptor.CONTEXT_KEY: self.context}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if self.__showDescriptor:
|
if self.__showDescriptor:
|
||||||
@@ -444,7 +438,7 @@ class ShowDetailsScreen(Screen):
|
|||||||
|
|
||||||
# Event handler for button press
|
# Event handler for button press
|
||||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||||
# Check if the button pressed is the one we are interested in
|
|
||||||
if event.button.id == "save_button":
|
if event.button.id == "save_button":
|
||||||
|
|
||||||
showDescriptor = self.getShowDescriptorFromInput()
|
showDescriptor = self.getShowDescriptorFromInput()
|
||||||
|
|||||||
@@ -162,4 +162,7 @@ class ShowsScreen(Screen):
|
|||||||
|
|
||||||
yield self.table
|
yield self.table
|
||||||
|
|
||||||
yield Footer()
|
f = Footer()
|
||||||
|
f.description = "yolo"
|
||||||
|
|
||||||
|
yield f
|
||||||
|
|||||||
@@ -67,10 +67,11 @@ class TagController():
|
|||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
|
|
||||||
q = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId),
|
tag = s.query(MediaTag).filter(
|
||||||
MediaTag.key == str(tagKey))
|
MediaTag.pattern_id == int(patternId),
|
||||||
if q.count():
|
MediaTag.key == str(tagKey),
|
||||||
tag = q.first()
|
).first()
|
||||||
|
if tag is not None:
|
||||||
s.delete(tag)
|
s.delete(tag)
|
||||||
s.commit()
|
s.commit()
|
||||||
return True
|
return True
|
||||||
@@ -107,12 +108,8 @@ class TagController():
|
|||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
|
|
||||||
q = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId))
|
tags = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId)).all()
|
||||||
|
return {t.key:t.value for t in tags}
|
||||||
if q.count():
|
|
||||||
return {t.key:t.value for t in q.all()}
|
|
||||||
else:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"TagController.findAllMediaTags(): {repr(ex)}")
|
raise click.ClickException(f"TagController.findAllMediaTags(): {repr(ex)}")
|
||||||
@@ -125,12 +122,8 @@ class TagController():
|
|||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
|
|
||||||
q = s.query(TrackTag).filter(TrackTag.track_id == int(trackId))
|
tags = s.query(TrackTag).filter(TrackTag.track_id == int(trackId)).all()
|
||||||
|
return {t.key:t.value for t in tags}
|
||||||
if q.count():
|
|
||||||
return {t.key:t.value for t in q.all()}
|
|
||||||
else:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"TagController.findAllTracks(): {repr(ex)}")
|
raise click.ClickException(f"TagController.findAllTracks(): {repr(ex)}")
|
||||||
@@ -142,12 +135,7 @@ class TagController():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(Track).filter(MediaTag.track_id == int(trackId), MediaTag.key == str(trackKey))
|
return s.query(Track).filter(MediaTag.track_id == int(trackId), MediaTag.key == str(trackKey)).first()
|
||||||
|
|
||||||
if q.count():
|
|
||||||
return q.first()
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"TagController.findMediaTag(): {repr(ex)}")
|
raise click.ClickException(f"TagController.findMediaTag(): {repr(ex)}")
|
||||||
@@ -158,12 +146,10 @@ class TagController():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(TrackTag).filter(TrackTag.track_id == int(trackId), TrackTag.key == str(tagKey))
|
return s.query(TrackTag).filter(
|
||||||
|
TrackTag.track_id == int(trackId),
|
||||||
if q.count():
|
TrackTag.key == str(tagKey),
|
||||||
return q.first()
|
).first()
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"TagController.findTrackTag(): {repr(ex)}")
|
raise click.ClickException(f"TagController.findTrackTag(): {repr(ex)}")
|
||||||
@@ -175,11 +161,9 @@ class TagController():
|
|||||||
def deleteMediaTag(self, tagId) -> bool:
|
def deleteMediaTag(self, tagId) -> bool:
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(MediaTag).filter(MediaTag.id == int(tagId))
|
tag = s.query(MediaTag).filter(MediaTag.id == int(tagId)).first()
|
||||||
|
|
||||||
if q.count():
|
if tag is not None:
|
||||||
|
|
||||||
tag = q.first()
|
|
||||||
|
|
||||||
s.delete(tag)
|
s.delete(tag)
|
||||||
|
|
||||||
@@ -201,11 +185,9 @@ class TagController():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(TrackTag).filter(TrackTag.id == int(tagId))
|
tag = s.query(TrackTag).filter(TrackTag.id == int(tagId)).first()
|
||||||
|
|
||||||
if q.count():
|
if tag is not None:
|
||||||
|
|
||||||
tag = q.first()
|
|
||||||
|
|
||||||
s.delete(tag)
|
s.delete(tag)
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
import os, requests, time, logging
|
import os, requests, time
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
|
from .logging_utils import get_ffx_logger
|
||||||
|
|
||||||
|
|
||||||
class TMDB_REQUEST_EXCEPTION(Exception):
|
class TMDB_REQUEST_EXCEPTION(Exception):
|
||||||
def __init__(self, statusCode, statusMessage):
|
def __init__(self, statusCode, statusMessage):
|
||||||
@@ -27,8 +29,7 @@ class TmdbController():
|
|||||||
self.__context = context
|
self.__context = context
|
||||||
|
|
||||||
if context is None:
|
if context is None:
|
||||||
self.__logger = logging.getLogger('FFX')
|
self.__logger = get_ffx_logger()
|
||||||
self.__logger.addHandler(logging.NullHandler())
|
|
||||||
else:
|
else:
|
||||||
self.__logger = context['logger']
|
self.__logger = context['logger']
|
||||||
|
|
||||||
|
|||||||
@@ -5,11 +5,22 @@ class TrackCodec(Enum):
|
|||||||
|
|
||||||
H265 = {'identifier': 'hevc', 'format': 'h265', 'extension': 'h265' ,'label': 'H.265'}
|
H265 = {'identifier': 'hevc', 'format': 'h265', 'extension': 'h265' ,'label': 'H.265'}
|
||||||
H264 = {'identifier': 'h264', 'format': 'h264', 'extension': 'h264' ,'label': 'H.264'}
|
H264 = {'identifier': 'h264', 'format': 'h264', 'extension': 'h264' ,'label': 'H.264'}
|
||||||
|
MPEG4 = {'identifier': 'mpeg4', 'format': 'm4v', 'extension': 'm4v' ,'label': 'MPEG-4'}
|
||||||
|
MPEG2 = {'identifier': 'mpeg2video', 'format': 'mpeg2video', 'extension': 'mpg' ,'label': 'MPEG-2'}
|
||||||
|
|
||||||
AAC = {'identifier': 'aac', 'format': None, 'extension': 'aac' , 'label': 'AAC'}
|
AAC = {'identifier': 'aac', 'format': None, 'extension': 'aac' , 'label': 'AAC'}
|
||||||
AC3 = {'identifier': 'ac3', 'format': 'ac3', 'extension': 'ac3' , 'label': 'AC3'}
|
AC3 = {'identifier': 'ac3', 'format': 'ac3', 'extension': 'ac3' , 'label': 'AC3'}
|
||||||
|
EAC3 = {'identifier': 'eac3', 'format': 'eac3', 'extension': 'eac3' , 'label': 'EAC3'}
|
||||||
DTS = {'identifier': 'dts', 'format': 'dts', 'extension': 'dts' , 'label': 'DTS'}
|
DTS = {'identifier': 'dts', 'format': 'dts', 'extension': 'dts' , 'label': 'DTS'}
|
||||||
|
MP3 = {'identifier': 'mp3', 'format': 'mp3', 'extension': 'mp3' , 'label': 'MP3'}
|
||||||
|
|
||||||
|
SRT = {'identifier': 'subrip', 'format': 'srt', 'extension': 'srt' , 'label': 'SRT'}
|
||||||
ASS = {'identifier': 'ass', 'format': 'ass', 'extension': 'ass' , 'label': 'ASS'}
|
ASS = {'identifier': 'ass', 'format': 'ass', 'extension': 'ass' , 'label': 'ASS'}
|
||||||
|
TTF = {'identifier': 'ttf', 'format': None, 'extension': 'ttf' , 'label': 'TTF'}
|
||||||
PGS = {'identifier': 'hdmv_pgs_subtitle', 'format': 'sup', 'extension': 'sup' , 'label': 'PGS'}
|
PGS = {'identifier': 'hdmv_pgs_subtitle', 'format': 'sup', 'extension': 'sup' , 'label': 'PGS'}
|
||||||
|
VOBSUB = {'identifier': 'dvd_subtitle', 'format': None, 'extension': 'mkv' , 'label': 'VobSub'}
|
||||||
|
|
||||||
|
PNG = {'identifier': 'png', 'format': None, 'extension': 'png' , 'label': 'PNG'}
|
||||||
|
|
||||||
UNKNOWN = {'identifier': 'unknown', 'format': None, 'extension': None, 'label': 'UNKNOWN'}
|
UNKNOWN = {'identifier': 'unknown', 'format': None, 'extension': None, 'label': 'UNKNOWN'}
|
||||||
|
|
||||||
@@ -23,8 +34,8 @@ class TrackCodec(Enum):
|
|||||||
return str(self.value['label'])
|
return str(self.value['label'])
|
||||||
|
|
||||||
def format(self):
|
def format(self):
|
||||||
"""Returns the codec as single letter"""
|
"""Returns the codec """
|
||||||
return str(self.value['format'])
|
return self.value['format']
|
||||||
|
|
||||||
def extension(self):
|
def extension(self):
|
||||||
"""Returns the corresponding extension"""
|
"""Returns the corresponding extension"""
|
||||||
|
|||||||
@@ -19,6 +19,20 @@ class TrackController():
|
|||||||
self.context = context
|
self.context = context
|
||||||
self.Session = self.context['database']['session'] # convenience
|
self.Session = self.context['database']['session'] # convenience
|
||||||
|
|
||||||
|
self.__configurationData = self.context['config'].getData()
|
||||||
|
|
||||||
|
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
|
||||||
|
|
||||||
|
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
|
||||||
|
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
|
||||||
|
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
|
||||||
|
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'remove' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'ignore' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
|
||||||
|
|
||||||
def addTrack(self, trackDescriptor : TrackDescriptor, patternId = None):
|
def addTrack(self, trackDescriptor : TrackDescriptor, patternId = None):
|
||||||
|
|
||||||
@@ -40,6 +54,8 @@ class TrackController():
|
|||||||
|
|
||||||
for k,v in trackDescriptor.getTags().items():
|
for k,v in trackDescriptor.getTags().items():
|
||||||
|
|
||||||
|
# Filter tags that make no sense to preserve
|
||||||
|
if k not in self.__ignoreTrackKeys and k not in self.__removeTrackKeys:
|
||||||
tag = TrackTag(track_id = track.id,
|
tag = TrackTag(track_id = track.id,
|
||||||
key = k,
|
key = k,
|
||||||
value = v)
|
value = v)
|
||||||
@@ -59,11 +75,9 @@ class TrackController():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(Track).filter(Track.id == int(trackId))
|
track = s.query(Track).filter(Track.id == int(trackId)).first()
|
||||||
|
|
||||||
if q.count():
|
if track is not None:
|
||||||
|
|
||||||
track : Track = q.first()
|
|
||||||
|
|
||||||
track.index = int(trackDescriptor.getIndex())
|
track.index = int(trackDescriptor.getIndex())
|
||||||
|
|
||||||
@@ -177,12 +191,10 @@ class TrackController():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(Track).filter(Track.pattern_id == int(patternId), Track.index == int(index))
|
return s.query(Track).filter(
|
||||||
|
Track.pattern_id == int(patternId),
|
||||||
if q.count():
|
Track.index == int(index),
|
||||||
return q.first()
|
).first()
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"TrackController.getTrack(): {repr(ex)}")
|
raise click.ClickException(f"TrackController.getTrack(): {repr(ex)}")
|
||||||
@@ -202,11 +214,9 @@ class TrackController():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(Track).filter(Track.pattern_id == patternId, Track.index == index)
|
track = s.query(Track).filter(Track.pattern_id == patternId, Track.index == index).first()
|
||||||
|
|
||||||
if q.count():
|
if track is not None:
|
||||||
|
|
||||||
track : Track = q.first()
|
|
||||||
|
|
||||||
if state:
|
if state:
|
||||||
track.setDisposition(disposition)
|
track.setDisposition(disposition)
|
||||||
@@ -228,15 +238,21 @@ class TrackController():
|
|||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
|
|
||||||
q = s.query(Track).filter(Track.id == int(trackId))
|
track = s.query(Track).filter(Track.id == int(trackId)).first()
|
||||||
|
|
||||||
if q.count():
|
if track is not None:
|
||||||
patternId = int(q.first().pattern_id)
|
patternId = int(track.pattern_id)
|
||||||
|
|
||||||
q_siblings = s.query(Track).filter(Track.pattern_id == patternId).order_by(Track.index)
|
q_siblings = s.query(Track).filter(Track.pattern_id == patternId).order_by(Track.index)
|
||||||
|
siblingTracks = q_siblings.all()
|
||||||
|
|
||||||
|
if len(siblingTracks) <= 1:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Cannot delete the last track from pattern #{patternId}. Patterns must define at least one track."
|
||||||
|
)
|
||||||
|
|
||||||
index = 0
|
index = 0
|
||||||
for track in q_siblings.all():
|
for track in siblingTracks:
|
||||||
|
|
||||||
if track.id == int(trackId):
|
if track.id == int(trackId):
|
||||||
s.delete(track)
|
s.delete(track)
|
||||||
|
|||||||
@@ -6,8 +6,6 @@ from textual.containers import Grid
|
|||||||
|
|
||||||
from ffx.track_descriptor import TrackDescriptor
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
|
||||||
from .track_controller import TrackController
|
|
||||||
|
|
||||||
|
|
||||||
# Screen[dict[int, str, int]]
|
# Screen[dict[int, str, int]]
|
||||||
class TrackDeleteScreen(Screen):
|
class TrackDeleteScreen(Screen):
|
||||||
@@ -52,14 +50,9 @@ class TrackDeleteScreen(Screen):
|
|||||||
def __init__(self, trackDescriptor : TrackDescriptor):
|
def __init__(self, trackDescriptor : TrackDescriptor):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
self.context = self.app.getContext()
|
|
||||||
self.Session = self.context['database']['session'] # convenience
|
|
||||||
|
|
||||||
if type(trackDescriptor) is not TrackDescriptor:
|
if type(trackDescriptor) is not TrackDescriptor:
|
||||||
raise click.ClickException('TrackDeleteScreen.init(): trackDescriptor is required to be of type TrackDescriptor')
|
raise click.ClickException('TrackDeleteScreen.init(): trackDescriptor is required to be of type TrackDescriptor')
|
||||||
|
|
||||||
self.__tc = TrackController(context = self.context)
|
|
||||||
|
|
||||||
self.__trackDescriptor = trackDescriptor
|
self.__trackDescriptor = trackDescriptor
|
||||||
|
|
||||||
|
|
||||||
@@ -116,21 +109,7 @@ class TrackDeleteScreen(Screen):
|
|||||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||||
|
|
||||||
if event.button.id == "delete_button":
|
if event.button.id == "delete_button":
|
||||||
|
|
||||||
track = self.__tc.getTrack(self.__trackDescriptor.getPatternId(), self.__trackDescriptor.getIndex())
|
|
||||||
|
|
||||||
if track is None:
|
|
||||||
raise click.ClickException(f"Track is none: patternId={self.__trackDescriptor.getPatternId()} type={self.__trackDescriptor.getType()} subIndex={self.__trackDescriptor.getSubIndex()}")
|
|
||||||
|
|
||||||
if track is not None:
|
|
||||||
|
|
||||||
if self.__tc.deleteTrack(track.getId()):
|
|
||||||
self.dismiss(self.__trackDescriptor)
|
self.dismiss(self.__trackDescriptor)
|
||||||
|
|
||||||
else:
|
|
||||||
#TODO: Meldung
|
|
||||||
self.app.pop_screen()
|
|
||||||
|
|
||||||
if event.button.id == "cancel_button":
|
if event.button.id == "cancel_button":
|
||||||
self.app.pop_screen()
|
self.app.pop_screen()
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
import logging
|
|
||||||
from typing import Self
|
from typing import Self
|
||||||
|
|
||||||
from .iso_language import IsoLanguage
|
from .iso_language import IsoLanguage
|
||||||
@@ -6,8 +5,9 @@ from .track_type import TrackType
|
|||||||
from .audio_layout import AudioLayout
|
from .audio_layout import AudioLayout
|
||||||
from .track_disposition import TrackDisposition
|
from .track_disposition import TrackDisposition
|
||||||
from .track_codec import TrackCodec
|
from .track_codec import TrackCodec
|
||||||
|
from .logging_utils import get_ffx_logger
|
||||||
|
|
||||||
from .helper import dictDiff, setDiff
|
# from .helper import dictDiff, setDiff
|
||||||
|
|
||||||
|
|
||||||
class TrackDescriptor:
|
class TrackDescriptor:
|
||||||
@@ -34,7 +34,6 @@ class TrackDescriptor:
|
|||||||
FFPROBE_CODEC_TYPE_KEY = "codec_type"
|
FFPROBE_CODEC_TYPE_KEY = "codec_type"
|
||||||
FFPROBE_CODEC_KEY = "codec_name"
|
FFPROBE_CODEC_KEY = "codec_name"
|
||||||
|
|
||||||
CODEC_PGS = 'hdmv_pgs_subtitle'
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
|
|
||||||
@@ -47,8 +46,7 @@ class TrackDescriptor:
|
|||||||
self.__logger = self.__context['logger']
|
self.__logger = self.__context['logger']
|
||||||
else:
|
else:
|
||||||
self.__context = {}
|
self.__context = {}
|
||||||
self.__logger = logging.getLogger('FFX')
|
self.__logger = get_ffx_logger()
|
||||||
self.__logger.addHandler(logging.NullHandler())
|
|
||||||
|
|
||||||
if TrackDescriptor.ID_KEY in kwargs.keys():
|
if TrackDescriptor.ID_KEY in kwargs.keys():
|
||||||
if type(kwargs[TrackDescriptor.ID_KEY]) is not int:
|
if type(kwargs[TrackDescriptor.ID_KEY]) is not int:
|
||||||
@@ -321,24 +319,24 @@ class TrackDescriptor:
|
|||||||
else:
|
else:
|
||||||
self.__dispositionSet.discard(disposition)
|
self.__dispositionSet.discard(disposition)
|
||||||
|
|
||||||
def compare(self, vsTrackDescriptor: Self):
|
# def compare(self, vsTrackDescriptor: Self):
|
||||||
|
#
|
||||||
compareResult = {}
|
# compareResult = {}
|
||||||
|
#
|
||||||
tagsDiffResult = dictDiff(vsTrackDescriptor.getTags(), self.getTags())
|
# tagsDiffResult = dictKeysDiff(vsTrackDescriptor.getTags(), self.getTags())
|
||||||
|
#
|
||||||
if tagsDiffResult:
|
# if tagsDiffResult:
|
||||||
compareResult[TrackDescriptor.TAGS_KEY] = tagsDiffResult
|
# compareResult[TrackDescriptor.TAGS_KEY] = tagsDiffResult
|
||||||
|
#
|
||||||
vsDispositions = vsTrackDescriptor.getDispositionSet()
|
# vsDispositions = vsTrackDescriptor.getDispositionSet()
|
||||||
dispositions = self.getDispositionSet()
|
# dispositions = self.getDispositionSet()
|
||||||
|
#
|
||||||
dispositionDiffResult = setDiff(vsDispositions, dispositions)
|
# dispositionDiffResult = setDiff(vsDispositions, dispositions)
|
||||||
|
#
|
||||||
if dispositionDiffResult:
|
# if dispositionDiffResult:
|
||||||
compareResult[TrackDescriptor.DISPOSITION_SET_KEY] = dispositionDiffResult
|
# compareResult[TrackDescriptor.DISPOSITION_SET_KEY] = dispositionDiffResult
|
||||||
|
#
|
||||||
return compareResult
|
# return compareResult
|
||||||
|
|
||||||
def setExternalSourceFilePath(self, filePath: str):
|
def setExternalSourceFilePath(self, filePath: str):
|
||||||
self.__externalSourceFilePath = str(filePath)
|
self.__externalSourceFilePath = str(filePath)
|
||||||
|
|||||||
@@ -3,29 +3,20 @@ import click
|
|||||||
from textual.screen import Screen
|
from textual.screen import Screen
|
||||||
from textual.widgets import Header, Footer, Static, Button, SelectionList, Select, DataTable, Input
|
from textual.widgets import Header, Footer, Static, Button, SelectionList, Select, DataTable, Input
|
||||||
from textual.containers import Grid
|
from textual.containers import Grid
|
||||||
|
|
||||||
from ffx.model.pattern import Pattern
|
|
||||||
|
|
||||||
from .track_controller import TrackController
|
|
||||||
from .pattern_controller import PatternController
|
|
||||||
from .tag_controller import TagController
|
|
||||||
|
|
||||||
from .track_type import TrackType
|
|
||||||
from .track_codec import TrackCodec
|
|
||||||
|
|
||||||
from .iso_language import IsoLanguage
|
|
||||||
from .track_disposition import TrackDisposition
|
|
||||||
from .audio_layout import AudioLayout
|
|
||||||
|
|
||||||
from .track_descriptor import TrackDescriptor
|
|
||||||
|
|
||||||
from .tag_details_screen import TagDetailsScreen
|
|
||||||
from .tag_delete_screen import TagDeleteScreen
|
|
||||||
|
|
||||||
from textual.widgets._data_table import CellDoesNotExist
|
from textual.widgets._data_table import CellDoesNotExist
|
||||||
|
|
||||||
|
from .audio_layout import AudioLayout
|
||||||
|
from .iso_language import IsoLanguage
|
||||||
|
from .tag_delete_screen import TagDeleteScreen
|
||||||
|
from .tag_details_screen import TagDetailsScreen
|
||||||
|
from .track_codec import TrackCodec
|
||||||
|
from .track_descriptor import TrackDescriptor
|
||||||
|
from .track_disposition import TrackDisposition
|
||||||
|
from .track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.helper import formatRichColor, removeRichColor
|
||||||
|
|
||||||
|
|
||||||
# Screen[dict[int, str, int]]
|
|
||||||
class TrackDetailsScreen(Screen):
|
class TrackDetailsScreen(Screen):
|
||||||
|
|
||||||
CSS = """
|
CSS = """
|
||||||
@@ -95,339 +86,383 @@ class TrackDetailsScreen(Screen):
|
|||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, trackDescriptor : TrackDescriptor = None, patternId = None, trackType : TrackType = None, index = None, subIndex = None):
|
def __init__(
|
||||||
|
self,
|
||||||
|
trackDescriptor: TrackDescriptor = None,
|
||||||
|
patternId=None,
|
||||||
|
patternLabel: str = "",
|
||||||
|
siblingTrackDescriptors=None,
|
||||||
|
trackType: TrackType = None,
|
||||||
|
index=None,
|
||||||
|
subIndex=None,
|
||||||
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
self.context = self.app.getContext()
|
self.context = self.app.getContext()
|
||||||
self.Session = self.context['database']['session'] # convenience
|
|
||||||
|
|
||||||
self.__tc = TrackController(context = self.context)
|
self.__configurationData = self.context["config"].getData()
|
||||||
self.__pc = PatternController(context = self.context)
|
|
||||||
self.__tac = TagController(context = self.context)
|
metadataConfiguration = (
|
||||||
|
self.__configurationData["metadata"]
|
||||||
|
if "metadata" in self.__configurationData.keys()
|
||||||
|
else {}
|
||||||
|
)
|
||||||
|
|
||||||
|
self.__removeTrackKeys = (
|
||||||
|
metadataConfiguration["streams"]["remove"]
|
||||||
|
if "streams" in metadataConfiguration.keys()
|
||||||
|
and "remove" in metadataConfiguration["streams"].keys()
|
||||||
|
else []
|
||||||
|
)
|
||||||
|
self.__ignoreTrackKeys = (
|
||||||
|
metadataConfiguration["streams"]["ignore"]
|
||||||
|
if "streams" in metadataConfiguration.keys()
|
||||||
|
and "ignore" in metadataConfiguration["streams"].keys()
|
||||||
|
else []
|
||||||
|
)
|
||||||
|
|
||||||
self.__isNew = trackDescriptor is None
|
self.__isNew = trackDescriptor is None
|
||||||
|
self.__trackDescriptor = trackDescriptor
|
||||||
|
self.__patternId = (
|
||||||
|
int(patternId)
|
||||||
|
if patternId is not None
|
||||||
|
else (
|
||||||
|
int(trackDescriptor.getPatternId())
|
||||||
|
if trackDescriptor is not None and trackDescriptor.getPatternId() != -1
|
||||||
|
else -1
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self.__patternLabel = str(patternLabel)
|
||||||
|
self.__siblingTrackDescriptors = list(siblingTrackDescriptors or [])
|
||||||
|
|
||||||
if self.__isNew:
|
if self.__isNew:
|
||||||
self.__trackType = trackType
|
self.__trackType = trackType
|
||||||
self.__trackCodec = TrackCodec.UNKNOWN
|
self.__trackCodec = TrackCodec.UNKNOWN
|
||||||
self.__audioLayout = AudioLayout.LAYOUT_UNDEFINED
|
self.__audioLayout = AudioLayout.LAYOUT_UNDEFINED
|
||||||
self.__index = index
|
self.__index = index
|
||||||
self.__subIndex = subIndex
|
self.__subIndex = subIndex
|
||||||
self.__trackDescriptor : TrackDescriptor = None
|
self.__draftTrackTags = {}
|
||||||
self.__pattern : Pattern = self.__pc.getPattern(patternId) if patternId is not None else {}
|
|
||||||
else:
|
else:
|
||||||
self.__trackType = trackDescriptor.getType()
|
self.__trackType = trackDescriptor.getType()
|
||||||
self.__trackCodec = trackDescriptor.getCodec()
|
self.__trackCodec = trackDescriptor.getCodec()
|
||||||
self.__audioLayout = trackDescriptor.getAudioLayout()
|
self.__audioLayout = trackDescriptor.getAudioLayout()
|
||||||
self.__index = trackDescriptor.getIndex()
|
self.__index = trackDescriptor.getIndex()
|
||||||
self.__subIndex = trackDescriptor.getSubIndex()
|
self.__subIndex = trackDescriptor.getSubIndex()
|
||||||
self.__trackDescriptor : TrackDescriptor = trackDescriptor
|
self.__draftTrackTags = {
|
||||||
self.__pattern : Pattern = self.__pc.getPattern(self.__trackDescriptor.getPatternId())
|
key: value
|
||||||
|
for key, value in trackDescriptor.getTags().items()
|
||||||
|
if key not in ("language", "title")
|
||||||
|
}
|
||||||
|
|
||||||
|
def _descriptor_refs_same_track(self, descriptor: TrackDescriptor) -> bool:
|
||||||
|
if self.__trackDescriptor is None:
|
||||||
|
return False
|
||||||
|
if descriptor.getId() != -1 and self.__trackDescriptor.getId() != -1:
|
||||||
|
return descriptor.getId() == self.__trackDescriptor.getId()
|
||||||
|
return (
|
||||||
|
descriptor.getPatternId() == self.__trackDescriptor.getPatternId()
|
||||||
|
and descriptor.getIndex() == self.__trackDescriptor.getIndex()
|
||||||
|
and descriptor.getSubIndex() == self.__trackDescriptor.getSubIndex()
|
||||||
|
)
|
||||||
|
|
||||||
def updateTags(self):
|
def updateTags(self):
|
||||||
|
|
||||||
self.trackTagsTable.clear()
|
self.trackTagsTable.clear()
|
||||||
|
|
||||||
trackId = self.__trackDescriptor.getId()
|
for key, value in self.__draftTrackTags.items():
|
||||||
|
textColor = None
|
||||||
|
if key in self.__ignoreTrackKeys:
|
||||||
|
textColor = "blue"
|
||||||
|
if key in self.__removeTrackKeys:
|
||||||
|
textColor = "red"
|
||||||
|
|
||||||
if trackId != -1:
|
row = (formatRichColor(key, textColor), formatRichColor(value, textColor))
|
||||||
|
|
||||||
trackTags = self.__tac.findAllTrackTags(trackId)
|
|
||||||
|
|
||||||
for k,v in trackTags.items():
|
|
||||||
|
|
||||||
if k != 'language' and k != 'title':
|
|
||||||
row = (k,v)
|
|
||||||
self.trackTagsTable.add_row(*map(str, row))
|
self.trackTagsTable.add_row(*map(str, row))
|
||||||
|
|
||||||
|
|
||||||
def on_mount(self):
|
def on_mount(self):
|
||||||
|
|
||||||
self.query_one("#index_label", Static).update(str(self.__index) if self.__index is not None else '-')
|
self.query_one("#index_label", Static).update(
|
||||||
self.query_one("#subindex_label", Static).update(str(self.__subIndex)if self.__subIndex is not None else '-')
|
str(self.__index) if self.__index is not None else "-"
|
||||||
|
)
|
||||||
if self.__pattern is not None:
|
self.query_one("#subindex_label", Static).update(
|
||||||
self.query_one("#pattern_label", Static).update(self.__pattern.getPattern())
|
str(self.__subIndex) if self.__subIndex is not None else "-"
|
||||||
|
)
|
||||||
|
self.query_one("#pattern_label", Static).update(self.__patternLabel)
|
||||||
|
|
||||||
if self.__trackType is not None:
|
if self.__trackType is not None:
|
||||||
self.query_one("#type_select", Select).value = self.__trackType.label()
|
self.query_one("#type_select", Select).value = self.__trackType.label()
|
||||||
if self.__trackType == TrackType.AUDIO:
|
|
||||||
self.query_one("#audio_layout_select", Select).value = self.__audioLayout.label()
|
self.query_one("#audio_layout_select", Select).value = self.__audioLayout.label()
|
||||||
|
|
||||||
for d in TrackDisposition:
|
for disposition in TrackDisposition:
|
||||||
|
|
||||||
dispositionIsSet = (self.__trackDescriptor is not None
|
dispositionIsSet = (
|
||||||
and d in self.__trackDescriptor.getDispositionSet())
|
self.__trackDescriptor is not None
|
||||||
|
and disposition in self.__trackDescriptor.getDispositionSet()
|
||||||
|
)
|
||||||
|
|
||||||
dispositionOption = (d.label(), d.index(), dispositionIsSet)
|
dispositionOption = (
|
||||||
self.query_one("#dispositions_selection_list", SelectionList).add_option(dispositionOption)
|
disposition.label(),
|
||||||
|
disposition.index(),
|
||||||
|
dispositionIsSet,
|
||||||
|
)
|
||||||
|
self.query_one("#dispositions_selection_list", SelectionList).add_option(
|
||||||
|
dispositionOption
|
||||||
|
)
|
||||||
|
|
||||||
if self.__trackDescriptor is not None:
|
if self.__trackDescriptor is not None:
|
||||||
|
self.query_one("#language_select", Select).value = (
|
||||||
self.query_one("#language_select", Select).value = self.__trackDescriptor.getLanguage().label()
|
self.__trackDescriptor.getLanguage().label()
|
||||||
|
)
|
||||||
self.query_one("#title_input", Input).value = self.__trackDescriptor.getTitle()
|
self.query_one("#title_input", Input).value = self.__trackDescriptor.getTitle()
|
||||||
self.updateTags()
|
self.updateTags()
|
||||||
|
|
||||||
|
|
||||||
def compose(self):
|
def compose(self):
|
||||||
|
|
||||||
self.trackTagsTable = DataTable(classes="five")
|
self.trackTagsTable = DataTable(classes="five")
|
||||||
|
|
||||||
# Define the columns with headers
|
|
||||||
self.column_key_track_tag_key = self.trackTagsTable.add_column("Key", width=50)
|
self.column_key_track_tag_key = self.trackTagsTable.add_column("Key", width=50)
|
||||||
self.column_key_track_tag_value = self.trackTagsTable.add_column("Value", width=100)
|
self.column_key_track_tag_value = self.trackTagsTable.add_column("Value", width=100)
|
||||||
|
|
||||||
self.trackTagsTable.cursor_type = 'row'
|
self.trackTagsTable.cursor_type = "row"
|
||||||
|
|
||||||
|
languages = [language.label() for language in IsoLanguage]
|
||||||
languages = [l.label() for l in IsoLanguage]
|
|
||||||
|
|
||||||
yield Header()
|
yield Header()
|
||||||
|
|
||||||
with Grid():
|
with Grid():
|
||||||
|
|
||||||
# 1
|
yield Static(
|
||||||
yield Static(f"New stream" if self.__isNew else f"Edit stream", id="toplabel", classes="five")
|
"New stream" if self.__isNew else "Edit stream",
|
||||||
|
id="toplabel",
|
||||||
|
classes="five",
|
||||||
|
)
|
||||||
|
|
||||||
# 2
|
|
||||||
yield Static("for pattern")
|
yield Static("for pattern")
|
||||||
yield Static("", id="pattern_label", classes="four")
|
yield Static("", id="pattern_label", classes="four", markup=False)
|
||||||
|
|
||||||
# 3
|
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
|
|
||||||
# 4
|
|
||||||
yield Static("Index / Subindex")
|
yield Static("Index / Subindex")
|
||||||
yield Static("", id="index_label", classes="two")
|
yield Static("", id="index_label", classes="two")
|
||||||
yield Static("", id="subindex_label", classes="two")
|
yield Static("", id="subindex_label", classes="two")
|
||||||
|
|
||||||
# 5
|
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
|
|
||||||
# 6
|
|
||||||
yield Static("Type")
|
yield Static("Type")
|
||||||
yield Select.from_values([t.label() for t in TrackType], classes="four", id="type_select")
|
yield Select.from_values(
|
||||||
|
[trackType.label() for trackType in TrackType],
|
||||||
|
classes="four",
|
||||||
|
id="type_select",
|
||||||
|
)
|
||||||
|
|
||||||
# 7
|
|
||||||
if self.__trackType == TrackType.AUDIO:
|
|
||||||
yield Static("Audio Layout")
|
yield Static("Audio Layout")
|
||||||
yield Select.from_values([t.label() for t in AudioLayout], classes="four", id="audio_layout_select")
|
yield Select.from_values(
|
||||||
else:
|
[layout.label() for layout in AudioLayout],
|
||||||
|
classes="four",
|
||||||
|
id="audio_layout_select",
|
||||||
|
)
|
||||||
|
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
|
|
||||||
# 8
|
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
|
|
||||||
# 9
|
|
||||||
yield Static(" ", classes="five")
|
|
||||||
|
|
||||||
# 10
|
|
||||||
yield Static("Language")
|
yield Static("Language")
|
||||||
yield Select.from_values(languages, classes="four", id="language_select")
|
yield Select.from_values(languages, classes="four", id="language_select")
|
||||||
# 11
|
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
|
|
||||||
# 12
|
|
||||||
yield Static("Title")
|
yield Static("Title")
|
||||||
yield Input(id="title_input", classes="four")
|
yield Input(id="title_input", classes="four")
|
||||||
|
|
||||||
# 13
|
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
|
|
||||||
# 14
|
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
|
|
||||||
# 15
|
|
||||||
yield Static("Stream tags")
|
yield Static("Stream tags")
|
||||||
yield Static(" ")
|
yield Static(" ")
|
||||||
yield Button("Add", id="button_add_stream_tag")
|
yield Button("Add", id="button_add_stream_tag")
|
||||||
yield Button("Edit", id="button_edit_stream_tag")
|
yield Button("Edit", id="button_edit_stream_tag")
|
||||||
yield Button("Delete", id="button_delete_stream_tag")
|
yield Button("Delete", id="button_delete_stream_tag")
|
||||||
# 16
|
|
||||||
yield self.trackTagsTable
|
yield self.trackTagsTable
|
||||||
|
|
||||||
# 17
|
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
|
|
||||||
# 18
|
|
||||||
yield Static("Stream dispositions", classes="five")
|
yield Static("Stream dispositions", classes="five")
|
||||||
|
|
||||||
# 19
|
|
||||||
yield SelectionList[int](
|
yield SelectionList[int](
|
||||||
classes="five",
|
classes="five",
|
||||||
id = "dispositions_selection_list"
|
id="dispositions_selection_list",
|
||||||
)
|
)
|
||||||
|
|
||||||
# 20
|
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
# 21
|
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
|
|
||||||
# 22
|
|
||||||
yield Button("Save", id="save_button")
|
yield Button("Save", id="save_button")
|
||||||
yield Button("Cancel", id="cancel_button")
|
yield Button("Cancel", id="cancel_button")
|
||||||
|
|
||||||
# 23
|
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
|
|
||||||
# 24
|
|
||||||
yield Static(" ", classes="five", id="messagestatic")
|
yield Static(" ", classes="five", id="messagestatic")
|
||||||
|
|
||||||
|
|
||||||
yield Footer(id="footer")
|
yield Footer(id="footer")
|
||||||
|
|
||||||
|
|
||||||
def getTrackDescriptorFromInput(self):
|
def getTrackDescriptorFromInput(self):
|
||||||
|
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
|
|
||||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self.context
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self.context
|
||||||
|
|
||||||
kwargs[TrackDescriptor.PATTERN_ID_KEY] = int(self.__pattern.getId())
|
if self.__trackDescriptor is not None and self.__trackDescriptor.getId() != -1:
|
||||||
|
kwargs[TrackDescriptor.ID_KEY] = self.__trackDescriptor.getId()
|
||||||
|
|
||||||
kwargs[TrackDescriptor.INDEX_KEY] = self.__index
|
if self.__patternId != -1:
|
||||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = self.__subIndex #!
|
kwargs[TrackDescriptor.PATTERN_ID_KEY] = int(self.__patternId)
|
||||||
|
|
||||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.fromLabel(self.query_one("#type_select", Select).value)
|
kwargs[TrackDescriptor.INDEX_KEY] = int(self.__index)
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = (
|
||||||
|
int(self.__trackDescriptor.getSourceIndex())
|
||||||
|
if self.__trackDescriptor is not None
|
||||||
|
else int(self.__index)
|
||||||
|
)
|
||||||
|
if self.__subIndex is not None and int(self.__subIndex) >= 0:
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = int(self.__subIndex)
|
||||||
|
|
||||||
|
selectedTrackType = TrackType.fromLabel(
|
||||||
|
self.query_one("#type_select", Select).value
|
||||||
|
)
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = selectedTrackType
|
||||||
kwargs[TrackDescriptor.CODEC_KEY] = self.__trackCodec
|
kwargs[TrackDescriptor.CODEC_KEY] = self.__trackCodec
|
||||||
|
|
||||||
if self.__trackType == TrackType.AUDIO:
|
if selectedTrackType == TrackType.AUDIO:
|
||||||
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(self.query_one("#audio_layout_select", Select).value)
|
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(
|
||||||
|
self.query_one("#audio_layout_select", Select).value
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.LAYOUT_UNDEFINED
|
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.LAYOUT_UNDEFINED
|
||||||
|
|
||||||
trackTags = {}
|
trackTags = dict(self.__draftTrackTags)
|
||||||
|
|
||||||
language = self.query_one("#language_select", Select).value
|
language = self.query_one("#language_select", Select).value
|
||||||
if language:
|
if language:
|
||||||
trackTags['language'] = IsoLanguage.find(language).threeLetter()
|
trackTags["language"] = IsoLanguage.find(language).threeLetter()
|
||||||
|
|
||||||
title = self.query_one("#title_input", Input).value
|
title = self.query_one("#title_input", Input).value
|
||||||
if title:
|
if title:
|
||||||
trackTags['title'] = title
|
trackTags["title"] = title
|
||||||
|
|
||||||
tableTags = {row[0]:row[1] for r in self.trackTagsTable.rows if (row := self.trackTagsTable.get_row(r)) and row[0] != 'language' and row[0] != 'title'}
|
kwargs[TrackDescriptor.TAGS_KEY] = trackTags
|
||||||
|
|
||||||
kwargs[TrackDescriptor.TAGS_KEY] = trackTags | tableTags
|
dispositionFlags = sum(
|
||||||
|
[2 ** flag for flag in self.query_one("#dispositions_selection_list", SelectionList).selected]
|
||||||
dispositionFlags = sum([2**f for f in self.query_one("#dispositions_selection_list", SelectionList).selected])
|
)
|
||||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = TrackDisposition.toSet(dispositionFlags)
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = TrackDisposition.toSet(
|
||||||
|
dispositionFlags
|
||||||
|
)
|
||||||
|
|
||||||
return TrackDescriptor(**kwargs)
|
return TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def getSelectedTag(self):
|
def getSelectedTag(self):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
row_key, _ = self.trackTagsTable.coordinate_to_cell_key(
|
||||||
# Fetch the currently selected row when 'Enter' is pressed
|
self.trackTagsTable.cursor_coordinate
|
||||||
#selected_row_index = self.table.cursor_row
|
)
|
||||||
row_key, col_key = self.trackTagsTable.coordinate_to_cell_key(self.trackTagsTable.cursor_coordinate)
|
|
||||||
|
|
||||||
if row_key is not None:
|
if row_key is not None:
|
||||||
selected_tag_data = self.trackTagsTable.get_row(row_key)
|
selected_tag_data = self.trackTagsTable.get_row(row_key)
|
||||||
|
|
||||||
tagKey = str(selected_tag_data[0])
|
tagKey = removeRichColor(selected_tag_data[0])
|
||||||
tagValue = str(selected_tag_data[1])
|
tagValue = removeRichColor(selected_tag_data[1])
|
||||||
|
|
||||||
return tagKey, tagValue
|
return tagKey, tagValue
|
||||||
|
|
||||||
else:
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
except CellDoesNotExist:
|
except CellDoesNotExist:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Event handler for button press
|
|
||||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||||
|
|
||||||
# Check if the button pressed is the one we are interested in
|
|
||||||
if event.button.id == "save_button":
|
if event.button.id == "save_button":
|
||||||
|
|
||||||
# Check for multiple default/forced disposition flags
|
|
||||||
|
|
||||||
if self.__trackType == TrackType.VIDEO:
|
|
||||||
trackList = self.__tc.findVideoTracks(self.__pattern.getId())
|
|
||||||
if self.__trackType == TrackType.AUDIO:
|
|
||||||
trackList = self.__tc.findAudioTracks(self.__pattern.getId())
|
|
||||||
elif self.__trackType == TrackType.SUBTITLE:
|
|
||||||
trackList = self.__tc.findSubtitleTracks(self.__pattern.getId())
|
|
||||||
else:
|
|
||||||
trackList = []
|
|
||||||
|
|
||||||
siblingTrackList = [t for t in trackList if t.getType() == self.__trackType and t.getIndex() != self.__index]
|
|
||||||
|
|
||||||
numDefaultTracks = len([t for t in siblingTrackList if TrackDisposition.DEFAULT in t.getDispositionSet()])
|
|
||||||
numForcedTracks = len([t for t in siblingTrackList if TrackDisposition.FORCED in t.getDispositionSet()])
|
|
||||||
|
|
||||||
self.__subIndex = len(trackList)
|
|
||||||
trackDescriptor = self.getTrackDescriptorFromInput()
|
trackDescriptor = self.getTrackDescriptorFromInput()
|
||||||
|
|
||||||
if ((TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet() and numDefaultTracks)
|
siblingTrackList = [
|
||||||
or (TrackDisposition.FORCED in trackDescriptor.getDispositionSet() and numForcedTracks)):
|
descriptor
|
||||||
|
for descriptor in self.__siblingTrackDescriptors
|
||||||
|
if not self._descriptor_refs_same_track(descriptor)
|
||||||
|
]
|
||||||
|
siblingTrackList = [
|
||||||
|
descriptor
|
||||||
|
for descriptor in siblingTrackList
|
||||||
|
if descriptor.getType() == trackDescriptor.getType()
|
||||||
|
]
|
||||||
|
|
||||||
self.query_one("#messagestatic", Static).update("Cannot add another stream with disposition flag 'debug' or 'forced' set")
|
numDefaultTracks = len(
|
||||||
|
[
|
||||||
else:
|
descriptor
|
||||||
|
for descriptor in siblingTrackList
|
||||||
self.query_one("#messagestatic", Static).update(" ")
|
if TrackDisposition.DEFAULT in descriptor.getDispositionSet()
|
||||||
|
]
|
||||||
|
)
|
||||||
|
numForcedTracks = len(
|
||||||
|
[
|
||||||
|
descriptor
|
||||||
|
for descriptor in siblingTrackList
|
||||||
|
if TrackDisposition.FORCED in descriptor.getDispositionSet()
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
if self.__isNew:
|
if self.__isNew:
|
||||||
|
trackDescriptor.setSubIndex(len(siblingTrackList))
|
||||||
|
elif self.__subIndex is not None and int(self.__subIndex) >= 0:
|
||||||
|
trackDescriptor.setSubIndex(int(self.__subIndex))
|
||||||
|
|
||||||
# Track per Screen hinzufügen
|
if (
|
||||||
self.__tc.addTrack(trackDescriptor)
|
TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet()
|
||||||
self.dismiss(trackDescriptor)
|
and numDefaultTracks
|
||||||
|
) or (
|
||||||
|
TrackDisposition.FORCED in trackDescriptor.getDispositionSet()
|
||||||
|
and numForcedTracks
|
||||||
|
):
|
||||||
|
|
||||||
|
self.query_one("#messagestatic", Static).update(
|
||||||
|
"Cannot add another stream with disposition flag 'default' or 'forced' set"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
|
self.query_one("#messagestatic", Static).update(" ")
|
||||||
track = self.__tc.getTrack(self.__pattern.getId(), self.__index)
|
|
||||||
|
|
||||||
# Track per details screen updaten
|
|
||||||
if self.__tc.updateTrack(track.getId(), trackDescriptor):
|
|
||||||
self.dismiss(trackDescriptor)
|
self.dismiss(trackDescriptor)
|
||||||
|
|
||||||
else:
|
|
||||||
self.app.pop_screen()
|
|
||||||
|
|
||||||
if event.button.id == "cancel_button":
|
if event.button.id == "cancel_button":
|
||||||
self.app.pop_screen()
|
self.app.pop_screen()
|
||||||
|
|
||||||
|
|
||||||
if event.button.id == "button_add_stream_tag":
|
if event.button.id == "button_add_stream_tag":
|
||||||
if not self.__isNew:
|
|
||||||
self.app.push_screen(TagDetailsScreen(), self.handle_update_tag)
|
self.app.push_screen(TagDetailsScreen(), self.handle_update_tag)
|
||||||
|
|
||||||
if event.button.id == "button_edit_stream_tag":
|
if event.button.id == "button_edit_stream_tag":
|
||||||
tagKey, tagValue = self.getSelectedTag()
|
selectedTag = self.getSelectedTag()
|
||||||
self.app.push_screen(TagDetailsScreen(key=tagKey, value=tagValue), self.handle_update_tag)
|
if selectedTag is not None:
|
||||||
|
self.app.push_screen(
|
||||||
|
TagDetailsScreen(key=selectedTag[0], value=selectedTag[1]),
|
||||||
|
self.handle_update_tag,
|
||||||
|
)
|
||||||
|
|
||||||
if event.button.id == "button_delete_stream_tag":
|
if event.button.id == "button_delete_stream_tag":
|
||||||
tagKey, tagValue = self.getSelectedTag()
|
selectedTag = self.getSelectedTag()
|
||||||
self.app.push_screen(TagDeleteScreen(key=tagKey, value=tagValue), self.handle_delete_tag)
|
if selectedTag is not None:
|
||||||
|
self.app.push_screen(
|
||||||
|
TagDeleteScreen(key=selectedTag[0], value=selectedTag[1]),
|
||||||
|
self.handle_delete_tag,
|
||||||
|
)
|
||||||
|
|
||||||
def handle_update_tag(self, tag):
|
def handle_update_tag(self, tag):
|
||||||
|
if tag is None:
|
||||||
trackId = self.__trackDescriptor.getId()
|
return
|
||||||
|
self.__draftTrackTags[str(tag[0])] = str(tag[1])
|
||||||
if trackId == -1:
|
|
||||||
raise click.ClickException(f"TrackDetailsScreen.handle_update_tag: trackId not set (-1) trackDescriptor={self.__trackDescriptor}")
|
|
||||||
|
|
||||||
if self.__tac.updateTrackTag(trackId, tag[0], tag[1]) is not None:
|
|
||||||
self.updateTags()
|
self.updateTags()
|
||||||
|
|
||||||
def handle_delete_tag(self, trackTag):
|
def handle_delete_tag(self, trackTag):
|
||||||
|
if trackTag is None:
|
||||||
trackId = self.__trackDescriptor.getId()
|
return
|
||||||
|
self.__draftTrackTags.pop(str(trackTag[0]), None)
|
||||||
if trackId == -1:
|
|
||||||
raise click.ClickException(f"TrackDetailsScreen.handle_delete_tag: trackId not set (-1) trackDescriptor={self.__trackDescriptor}")
|
|
||||||
|
|
||||||
tag = self.__tac.findTrackTag(trackId, trackTag[0])
|
|
||||||
|
|
||||||
if tag is not None:
|
|
||||||
if self.__tac.deleteTrackTag(tag.id):
|
|
||||||
self.updateTags()
|
self.updateTags()
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ class TrackType(Enum):
|
|||||||
VIDEO = {'label': 'video', 'index': 1}
|
VIDEO = {'label': 'video', 'index': 1}
|
||||||
AUDIO = {'label': 'audio', 'index': 2}
|
AUDIO = {'label': 'audio', 'index': 2}
|
||||||
SUBTITLE = {'label': 'subtitle', 'index': 3}
|
SUBTITLE = {'label': 'subtitle', 'index': 3}
|
||||||
|
ATTACHMENT = {'label': 'attachment', 'index': 4}
|
||||||
|
|
||||||
UNKNOWN = {'label': 'unknown', 'index': 0}
|
UNKNOWN = {'label': 'unknown', 'index': 0}
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ class VideoEncoder(Enum):
|
|||||||
|
|
||||||
AV1 = {'label': 'av1', 'index': 1}
|
AV1 = {'label': 'av1', 'index': 1}
|
||||||
VP9 = {'label': 'vp9', 'index': 2}
|
VP9 = {'label': 'vp9', 'index': 2}
|
||||||
|
H264 = {'label': 'h264', 'index': 3}
|
||||||
|
COPY = {'label': 'copy', 'index': 4}
|
||||||
|
|
||||||
UNDEFINED = {'label': 'undefined', 'index': 0}
|
UNDEFINED = {'label': 'undefined', 'index': 0}
|
||||||
|
|
||||||
|
|||||||
1
tests/__init__.py
Normal file
1
tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# Repo-root tests package for legacy and future test code.
|
||||||
1
tests/integration/__init__.py
Normal file
1
tests/integration/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
|
||||||
1
tests/integration/pattern_management/__init__.py
Normal file
1
tests/integration/pattern_management/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
|
||||||
@@ -0,0 +1,138 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from tests.support.ffx_bundle import (
|
||||||
|
PatternTrackSpec,
|
||||||
|
SourceTrackSpec,
|
||||||
|
add_show,
|
||||||
|
build_controller_context,
|
||||||
|
create_source_fixture,
|
||||||
|
dispose_controller_context,
|
||||||
|
expected_output_path,
|
||||||
|
run_ffx_convert,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ffx.pattern_controller import PatternController
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pytest
|
||||||
|
except ImportError: # pragma: no cover - unittest-only environments
|
||||||
|
pytest = None
|
||||||
|
|
||||||
|
if pytest is not None:
|
||||||
|
pytestmark = [pytest.mark.integration, pytest.mark.pattern_management]
|
||||||
|
|
||||||
|
|
||||||
|
class PatternManagementCliTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.tempdir = tempfile.TemporaryDirectory()
|
||||||
|
self.workdir = Path(self.tempdir.name)
|
||||||
|
self.home_dir = self.workdir / "home"
|
||||||
|
self.home_dir.mkdir()
|
||||||
|
self.database_path = self.workdir / "test.db"
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self.tempdir.cleanup()
|
||||||
|
|
||||||
|
def prepare_duplicate_matching_patterns(self):
|
||||||
|
context = build_controller_context(self.database_path)
|
||||||
|
try:
|
||||||
|
add_show(context, show_id=1)
|
||||||
|
add_show(context, show_id=2)
|
||||||
|
|
||||||
|
controller = PatternController(context)
|
||||||
|
track_descriptors = [
|
||||||
|
PatternTrackSpec(index=0, source_index=0, track_type=TrackType.VIDEO)
|
||||||
|
]
|
||||||
|
|
||||||
|
def to_track_descriptor(spec: PatternTrackSpec):
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
|
||||||
|
kwargs = {
|
||||||
|
TrackDescriptor.INDEX_KEY: spec.index,
|
||||||
|
TrackDescriptor.SOURCE_INDEX_KEY: spec.source_index,
|
||||||
|
TrackDescriptor.TRACK_TYPE_KEY: spec.track_type,
|
||||||
|
TrackDescriptor.TAGS_KEY: dict(spec.tags),
|
||||||
|
TrackDescriptor.DISPOSITION_SET_KEY: set(spec.dispositions),
|
||||||
|
}
|
||||||
|
return TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
controller.savePatternSchema(
|
||||||
|
{"show_id": 1, "pattern": r"^dup_(s[0-9]+e[0-9]+)\.mkv$"},
|
||||||
|
[to_track_descriptor(track_descriptors[0])],
|
||||||
|
)
|
||||||
|
controller.savePatternSchema(
|
||||||
|
{"show_id": 2, "pattern": r"^dup_.*$"},
|
||||||
|
[to_track_descriptor(track_descriptors[0])],
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
dispose_controller_context(context)
|
||||||
|
|
||||||
|
def test_convert_fails_when_filename_matches_more_than_one_pattern(self):
|
||||||
|
self.prepare_duplicate_matching_patterns()
|
||||||
|
source_filename = "dup_s01e01.mkv"
|
||||||
|
source_path = create_source_fixture(
|
||||||
|
self.workdir,
|
||||||
|
source_filename,
|
||||||
|
[
|
||||||
|
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
|
||||||
|
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
completed = run_ffx_convert(
|
||||||
|
self.workdir,
|
||||||
|
self.home_dir,
|
||||||
|
self.database_path,
|
||||||
|
"--video-encoder",
|
||||||
|
"copy",
|
||||||
|
"--no-tmdb",
|
||||||
|
"--no-prompt",
|
||||||
|
"--no-signature",
|
||||||
|
str(source_path),
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertNotEqual(completed.returncode, 0)
|
||||||
|
error_output = f"{completed.stdout}\n{completed.stderr}"
|
||||||
|
self.assertIn("matched more than one pattern", error_output)
|
||||||
|
self.assertFalse(expected_output_path(self.workdir, source_filename).exists())
|
||||||
|
|
||||||
|
def test_convert_can_ignore_duplicate_matches_when_no_pattern_is_requested(self):
|
||||||
|
self.prepare_duplicate_matching_patterns()
|
||||||
|
source_filename = "dup_s01e01.mkv"
|
||||||
|
source_path = create_source_fixture(
|
||||||
|
self.workdir,
|
||||||
|
source_filename,
|
||||||
|
[
|
||||||
|
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
|
||||||
|
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
completed = run_ffx_convert(
|
||||||
|
self.workdir,
|
||||||
|
self.home_dir,
|
||||||
|
self.database_path,
|
||||||
|
"--video-encoder",
|
||||||
|
"copy",
|
||||||
|
"--no-pattern",
|
||||||
|
"--no-tmdb",
|
||||||
|
"--no-prompt",
|
||||||
|
"--no-signature",
|
||||||
|
str(source_path),
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
0,
|
||||||
|
completed.returncode,
|
||||||
|
f"STDOUT:\n{completed.stdout}\nSTDERR:\n{completed.stderr}",
|
||||||
|
)
|
||||||
|
self.assertTrue(expected_output_path(self.workdir, source_filename).exists())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
1
tests/integration/subtrack_mapping/__init__.py
Normal file
1
tests/integration/subtrack_mapping/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
|
||||||
436
tests/integration/subtrack_mapping/test_cli_bundle.py
Normal file
436
tests/integration/subtrack_mapping/test_cli_bundle.py
Normal file
@@ -0,0 +1,436 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from tests.support.ffx_bundle import (
|
||||||
|
PatternTrackSpec,
|
||||||
|
SourceTrackSpec,
|
||||||
|
create_source_fixture,
|
||||||
|
expected_output_path,
|
||||||
|
extract_first_subtitle_text,
|
||||||
|
ffprobe_json,
|
||||||
|
get_tag,
|
||||||
|
prepare_pattern_database,
|
||||||
|
run_ffx_convert,
|
||||||
|
write_vtt,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pytest
|
||||||
|
except ImportError: # pragma: no cover - unittest-only environments
|
||||||
|
pytest = None
|
||||||
|
|
||||||
|
if pytest is not None:
|
||||||
|
pytestmark = [pytest.mark.integration, pytest.mark.subtrack_mapping]
|
||||||
|
|
||||||
|
|
||||||
|
class SubtrackMappingBundleTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.tempdir = tempfile.TemporaryDirectory()
|
||||||
|
self.workdir = Path(self.tempdir.name)
|
||||||
|
self.home_dir = self.workdir / "home"
|
||||||
|
self.home_dir.mkdir()
|
||||||
|
self.database_path = self.workdir / "test.db"
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self.tempdir.cleanup()
|
||||||
|
|
||||||
|
def write_config(self, data: dict) -> None:
|
||||||
|
config_dir = self.home_dir / ".local" / "etc"
|
||||||
|
config_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
(config_dir / "ffx.json").write_text(json.dumps(data), encoding="utf-8")
|
||||||
|
|
||||||
|
def assertCompleted(self, completed):
|
||||||
|
if completed.returncode != 0:
|
||||||
|
self.fail(
|
||||||
|
"FFX convert failed\n"
|
||||||
|
f"STDOUT:\n{completed.stdout}\n"
|
||||||
|
f"STDERR:\n{completed.stderr}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_pattern_reorders_and_omits_tracks_preserving_metadata_and_group_order(self):
|
||||||
|
source_filename = "reorder_s01e01.mkv"
|
||||||
|
source_path = create_source_fixture(
|
||||||
|
self.workdir,
|
||||||
|
source_filename,
|
||||||
|
[
|
||||||
|
SourceTrackSpec(TrackType.VIDEO, identity="video-0", title="Video Zero"),
|
||||||
|
SourceTrackSpec(
|
||||||
|
TrackType.SUBTITLE,
|
||||||
|
identity="subtitle-1",
|
||||||
|
language="eng",
|
||||||
|
title="First Subtitle",
|
||||||
|
subtitle_lines=("first embedded subtitle",),
|
||||||
|
),
|
||||||
|
SourceTrackSpec(
|
||||||
|
TrackType.AUDIO,
|
||||||
|
identity="audio-2",
|
||||||
|
language="deu",
|
||||||
|
title="German Audio",
|
||||||
|
),
|
||||||
|
SourceTrackSpec(
|
||||||
|
TrackType.SUBTITLE,
|
||||||
|
identity="subtitle-3",
|
||||||
|
language="fra",
|
||||||
|
title="Second Subtitle",
|
||||||
|
subtitle_lines=("second embedded subtitle",),
|
||||||
|
),
|
||||||
|
SourceTrackSpec(TrackType.ATTACHMENT, attachment_name="ordered.ttf"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
prepare_pattern_database(
|
||||||
|
self.database_path,
|
||||||
|
r"^reorder_(s[0-9]+e[0-9]+)\.mkv$",
|
||||||
|
[
|
||||||
|
PatternTrackSpec(
|
||||||
|
index=0,
|
||||||
|
source_index=0,
|
||||||
|
track_type=TrackType.VIDEO,
|
||||||
|
tags={"THIS_IS": "video-0", "title": "Video Zero"},
|
||||||
|
),
|
||||||
|
PatternTrackSpec(
|
||||||
|
index=1,
|
||||||
|
source_index=2,
|
||||||
|
track_type=TrackType.AUDIO,
|
||||||
|
tags={"THIS_IS": "audio-2", "language": "deu", "title": "German Audio"},
|
||||||
|
),
|
||||||
|
PatternTrackSpec(
|
||||||
|
index=2,
|
||||||
|
source_index=1,
|
||||||
|
track_type=TrackType.SUBTITLE,
|
||||||
|
tags={"THIS_IS": "subtitle-1", "language": "eng", "title": "First Subtitle"},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
completed = run_ffx_convert(
|
||||||
|
self.workdir,
|
||||||
|
self.home_dir,
|
||||||
|
self.database_path,
|
||||||
|
"--video-encoder",
|
||||||
|
"copy",
|
||||||
|
"--no-tmdb",
|
||||||
|
"--no-prompt",
|
||||||
|
"--no-signature",
|
||||||
|
str(source_path),
|
||||||
|
)
|
||||||
|
self.assertCompleted(completed)
|
||||||
|
|
||||||
|
output_path = expected_output_path(self.workdir, source_filename)
|
||||||
|
self.assertTrue(output_path.is_file(), output_path)
|
||||||
|
|
||||||
|
streams = ffprobe_json(output_path)["streams"]
|
||||||
|
self.assertEqual(
|
||||||
|
[stream["codec_type"] for stream in streams],
|
||||||
|
["video", "audio", "subtitle", "attachment"],
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
[get_tag(streams[index], "THIS_IS") for index in range(3)],
|
||||||
|
["video-0", "audio-2", "subtitle-1"],
|
||||||
|
)
|
||||||
|
self.assertNotIn(
|
||||||
|
"subtitle-3",
|
||||||
|
[get_tag(stream, "THIS_IS") for stream in streams if stream["codec_type"] != "attachment"],
|
||||||
|
)
|
||||||
|
self.assertEqual(streams[-1]["codec_name"], "ttf")
|
||||||
|
extracted_subtitle = extract_first_subtitle_text(self.workdir, output_path)
|
||||||
|
self.assertIn("first embedded subtitle", extracted_subtitle)
|
||||||
|
self.assertNotIn("second embedded subtitle", extracted_subtitle)
|
||||||
|
|
||||||
|
def test_cli_rearrange_streams_reorders_tracks_without_database_pattern(self):
|
||||||
|
source_filename = "cli_s01e01.mkv"
|
||||||
|
source_path = create_source_fixture(
|
||||||
|
self.workdir,
|
||||||
|
source_filename,
|
||||||
|
[
|
||||||
|
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
|
||||||
|
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng", title="First Audio"),
|
||||||
|
SourceTrackSpec(TrackType.AUDIO, identity="audio-2", language="deu", title="Second Audio"),
|
||||||
|
SourceTrackSpec(TrackType.SUBTITLE, identity="subtitle-3", language="eng", title="Subtitle"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
completed = run_ffx_convert(
|
||||||
|
self.workdir,
|
||||||
|
self.home_dir,
|
||||||
|
self.database_path,
|
||||||
|
"--video-encoder",
|
||||||
|
"copy",
|
||||||
|
"--no-pattern",
|
||||||
|
"--no-tmdb",
|
||||||
|
"--no-prompt",
|
||||||
|
"--no-signature",
|
||||||
|
"--rearrange-streams",
|
||||||
|
"0,2,1,3",
|
||||||
|
str(source_path),
|
||||||
|
)
|
||||||
|
self.assertCompleted(completed)
|
||||||
|
|
||||||
|
output_path = expected_output_path(self.workdir, source_filename)
|
||||||
|
streams = ffprobe_json(output_path)["streams"]
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
[stream["codec_type"] for stream in streams],
|
||||||
|
["video", "audio", "audio", "subtitle"],
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
[get_tag(stream, "THIS_IS") for stream in streams],
|
||||||
|
["video-0", "audio-2", "audio-1", "subtitle-3"],
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_no_pattern_stream_remove_list_clears_copied_stream_metadata(self):
|
||||||
|
source_filename = "remove_tags_s01e01.mkv"
|
||||||
|
self.write_config(
|
||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"streams": {
|
||||||
|
"remove": ["BPS"],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
source_path = create_source_fixture(
|
||||||
|
self.workdir,
|
||||||
|
source_filename,
|
||||||
|
[
|
||||||
|
SourceTrackSpec(
|
||||||
|
TrackType.VIDEO,
|
||||||
|
identity="video-0",
|
||||||
|
extra_tags={"BPS": "remove-me", "KEEP_ME": "video-keep"},
|
||||||
|
),
|
||||||
|
SourceTrackSpec(
|
||||||
|
TrackType.AUDIO,
|
||||||
|
identity="audio-1",
|
||||||
|
language="eng",
|
||||||
|
title="Main Audio",
|
||||||
|
extra_tags={"BPS": "remove-me", "KEEP_ME": "audio-keep"},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
completed = run_ffx_convert(
|
||||||
|
self.workdir,
|
||||||
|
self.home_dir,
|
||||||
|
self.database_path,
|
||||||
|
"--video-encoder",
|
||||||
|
"copy",
|
||||||
|
"--no-pattern",
|
||||||
|
"--no-tmdb",
|
||||||
|
"--no-prompt",
|
||||||
|
"--no-signature",
|
||||||
|
str(source_path),
|
||||||
|
)
|
||||||
|
self.assertCompleted(completed)
|
||||||
|
|
||||||
|
output_path = expected_output_path(self.workdir, source_filename)
|
||||||
|
streams = ffprobe_json(output_path)["streams"]
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
[stream["codec_type"] for stream in streams],
|
||||||
|
["video", "audio"],
|
||||||
|
)
|
||||||
|
self.assertEqual(get_tag(streams[0], "THIS_IS"), "video-0")
|
||||||
|
self.assertEqual(get_tag(streams[0], "KEEP_ME"), "video-keep")
|
||||||
|
self.assertIsNone(get_tag(streams[0], "BPS"))
|
||||||
|
self.assertEqual(get_tag(streams[1], "THIS_IS"), "audio-1")
|
||||||
|
self.assertEqual(get_tag(streams[1], "KEEP_ME"), "audio-keep")
|
||||||
|
self.assertIsNone(get_tag(streams[1], "BPS"))
|
||||||
|
|
||||||
|
def test_pattern_validation_fails_for_nonexistent_source_track_reference(self):
|
||||||
|
source_filename = "invalid_s01e01.mkv"
|
||||||
|
source_path = create_source_fixture(
|
||||||
|
self.workdir,
|
||||||
|
source_filename,
|
||||||
|
[
|
||||||
|
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
|
||||||
|
SourceTrackSpec(TrackType.AUDIO, identity="audio-1"),
|
||||||
|
SourceTrackSpec(TrackType.SUBTITLE, identity="subtitle-2"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
prepare_pattern_database(
|
||||||
|
self.database_path,
|
||||||
|
r"^invalid_(s[0-9]+e[0-9]+)\.mkv$",
|
||||||
|
[
|
||||||
|
PatternTrackSpec(index=0, source_index=0, track_type=TrackType.VIDEO),
|
||||||
|
PatternTrackSpec(index=1, source_index=99, track_type=TrackType.SUBTITLE),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
completed = run_ffx_convert(
|
||||||
|
self.workdir,
|
||||||
|
self.home_dir,
|
||||||
|
self.database_path,
|
||||||
|
"--video-encoder",
|
||||||
|
"copy",
|
||||||
|
"--no-tmdb",
|
||||||
|
"--no-prompt",
|
||||||
|
"--no-signature",
|
||||||
|
str(source_path),
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertNotEqual(completed.returncode, 0)
|
||||||
|
error_output = f"{completed.stdout}\n{completed.stderr}"
|
||||||
|
self.assertIn("non-existent source track #99", error_output)
|
||||||
|
self.assertFalse(expected_output_path(self.workdir, source_filename).exists())
|
||||||
|
|
||||||
|
def test_external_subtitle_file_replaces_payload_and_overrides_metadata(self):
|
||||||
|
source_filename = "substitute_s01e01.mkv"
|
||||||
|
self.write_config(
|
||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"streams": {
|
||||||
|
"remove": ["BPS"],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
source_path = create_source_fixture(
|
||||||
|
self.workdir,
|
||||||
|
source_filename,
|
||||||
|
[
|
||||||
|
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
|
||||||
|
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng", title="Main Audio"),
|
||||||
|
SourceTrackSpec(
|
||||||
|
TrackType.SUBTITLE,
|
||||||
|
identity="embedded-subtitle",
|
||||||
|
language="eng",
|
||||||
|
title="Embedded Title",
|
||||||
|
extra_tags={"BPS": "remove-me", "EXTERNAL_KEEP": "keep-me"},
|
||||||
|
subtitle_lines=("embedded subtitle payload",),
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
write_vtt(
|
||||||
|
self.workdir / "substitute_s01e01_2_deu.vtt",
|
||||||
|
("external subtitle payload",),
|
||||||
|
)
|
||||||
|
|
||||||
|
prepare_pattern_database(
|
||||||
|
self.database_path,
|
||||||
|
r"^substitute_(s[0-9]+e[0-9]+)\.mkv$",
|
||||||
|
[
|
||||||
|
PatternTrackSpec(index=0, source_index=0, track_type=TrackType.VIDEO),
|
||||||
|
PatternTrackSpec(index=1, source_index=1, track_type=TrackType.AUDIO),
|
||||||
|
PatternTrackSpec(index=2, source_index=2, track_type=TrackType.SUBTITLE),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
completed = run_ffx_convert(
|
||||||
|
self.workdir,
|
||||||
|
self.home_dir,
|
||||||
|
self.database_path,
|
||||||
|
"--video-encoder",
|
||||||
|
"copy",
|
||||||
|
"--no-tmdb",
|
||||||
|
"--no-prompt",
|
||||||
|
"--no-signature",
|
||||||
|
"--subtitle-directory",
|
||||||
|
str(self.workdir),
|
||||||
|
"--subtitle-prefix",
|
||||||
|
"substitute",
|
||||||
|
str(source_path),
|
||||||
|
)
|
||||||
|
self.assertCompleted(completed)
|
||||||
|
|
||||||
|
output_path = expected_output_path(self.workdir, source_filename)
|
||||||
|
streams = ffprobe_json(output_path)["streams"]
|
||||||
|
subtitle_stream = [stream for stream in streams if stream["codec_type"] == "subtitle"][0]
|
||||||
|
|
||||||
|
self.assertEqual(get_tag(subtitle_stream, "language"), "deu")
|
||||||
|
self.assertEqual(get_tag(subtitle_stream, "title"), "Embedded Title")
|
||||||
|
self.assertEqual(get_tag(subtitle_stream, "THIS_IS"), "embedded-subtitle")
|
||||||
|
self.assertEqual(get_tag(subtitle_stream, "EXTERNAL_KEEP"), "keep-me")
|
||||||
|
self.assertIsNone(get_tag(subtitle_stream, "BPS"))
|
||||||
|
|
||||||
|
extracted_subtitle = extract_first_subtitle_text(self.workdir, output_path)
|
||||||
|
self.assertIn("external subtitle payload", extracted_subtitle)
|
||||||
|
self.assertNotIn("embedded subtitle payload", extracted_subtitle)
|
||||||
|
|
||||||
|
def test_subtitle_prefix_uses_configured_base_directory_when_directory_is_omitted(self):
|
||||||
|
source_filename = "substitute_default_s01e01.mkv"
|
||||||
|
subtitle_prefix = "substitute_default"
|
||||||
|
subtitles_base_dir = self.home_dir / ".local" / "var" / "sync" / "subtitles"
|
||||||
|
resolved_subtitle_dir = subtitles_base_dir / subtitle_prefix
|
||||||
|
resolved_subtitle_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
self.write_config(
|
||||||
|
{
|
||||||
|
"subtitlesDirectory": "~/.local/var/sync/subtitles",
|
||||||
|
"metadata": {
|
||||||
|
"streams": {
|
||||||
|
"remove": ["BPS"],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
source_path = create_source_fixture(
|
||||||
|
self.workdir,
|
||||||
|
source_filename,
|
||||||
|
[
|
||||||
|
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
|
||||||
|
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng", title="Main Audio"),
|
||||||
|
SourceTrackSpec(
|
||||||
|
TrackType.SUBTITLE,
|
||||||
|
identity="embedded-subtitle",
|
||||||
|
language="eng",
|
||||||
|
title="Embedded Title",
|
||||||
|
extra_tags={"BPS": "remove-me", "EXTERNAL_KEEP": "keep-me"},
|
||||||
|
subtitle_lines=("embedded subtitle payload",),
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
write_vtt(
|
||||||
|
resolved_subtitle_dir / f"{subtitle_prefix}_s01e01_2_deu.vtt",
|
||||||
|
("external subtitle payload",),
|
||||||
|
)
|
||||||
|
|
||||||
|
prepare_pattern_database(
|
||||||
|
self.database_path,
|
||||||
|
r"^substitute_default_(s[0-9]+e[0-9]+)\.mkv$",
|
||||||
|
[
|
||||||
|
PatternTrackSpec(index=0, source_index=0, track_type=TrackType.VIDEO),
|
||||||
|
PatternTrackSpec(index=1, source_index=1, track_type=TrackType.AUDIO),
|
||||||
|
PatternTrackSpec(index=2, source_index=2, track_type=TrackType.SUBTITLE),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
completed = run_ffx_convert(
|
||||||
|
self.workdir,
|
||||||
|
self.home_dir,
|
||||||
|
self.database_path,
|
||||||
|
"--video-encoder",
|
||||||
|
"copy",
|
||||||
|
"--no-tmdb",
|
||||||
|
"--no-prompt",
|
||||||
|
"--no-signature",
|
||||||
|
"--subtitle-prefix",
|
||||||
|
subtitle_prefix,
|
||||||
|
str(source_path),
|
||||||
|
)
|
||||||
|
self.assertCompleted(completed)
|
||||||
|
|
||||||
|
output_path = expected_output_path(self.workdir, source_filename)
|
||||||
|
streams = ffprobe_json(output_path)["streams"]
|
||||||
|
subtitle_stream = [stream for stream in streams if stream["codec_type"] == "subtitle"][0]
|
||||||
|
|
||||||
|
self.assertEqual(get_tag(subtitle_stream, "language"), "deu")
|
||||||
|
self.assertEqual(get_tag(subtitle_stream, "title"), "Embedded Title")
|
||||||
|
self.assertEqual(get_tag(subtitle_stream, "THIS_IS"), "embedded-subtitle")
|
||||||
|
self.assertEqual(get_tag(subtitle_stream, "EXTERNAL_KEEP"), "keep-me")
|
||||||
|
self.assertIsNone(get_tag(subtitle_stream, "BPS"))
|
||||||
|
|
||||||
|
extracted_subtitle = extract_first_subtitle_text(self.workdir, output_path)
|
||||||
|
self.assertIn("external subtitle payload", extracted_subtitle)
|
||||||
|
self.assertNotIn("embedded subtitle payload", extracted_subtitle)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
228
tests/integration/test_cli_unmux.py
Normal file
228
tests/integration/test_cli_unmux.py
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from tests.support.ffx_bundle import (
|
||||||
|
SourceTrackSpec,
|
||||||
|
build_controller_context,
|
||||||
|
create_source_fixture,
|
||||||
|
dispose_controller_context,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ffx.pattern_controller import PatternController
|
||||||
|
from ffx.show_controller import ShowController
|
||||||
|
from ffx.show_descriptor import ShowDescriptor
|
||||||
|
from ffx.track_codec import TrackCodec
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pytest
|
||||||
|
except ImportError: # pragma: no cover - unittest-only environments
|
||||||
|
pytest = None
|
||||||
|
|
||||||
|
if pytest is not None:
|
||||||
|
pytestmark = [pytest.mark.integration]
|
||||||
|
|
||||||
|
|
||||||
|
SRC_ROOT = Path(__file__).resolve().parents[2] / "src"
|
||||||
|
|
||||||
|
|
||||||
|
def run_ffx_unmux(workdir: Path, home_dir: Path, database_path: Path, *args: str) -> subprocess.CompletedProcess[str]:
|
||||||
|
env = os.environ.copy()
|
||||||
|
env["HOME"] = str(home_dir)
|
||||||
|
existing_pythonpath = env.get("PYTHONPATH", "")
|
||||||
|
env["PYTHONPATH"] = str(SRC_ROOT) if not existing_pythonpath else f"{SRC_ROOT}{os.pathsep}{existing_pythonpath}"
|
||||||
|
|
||||||
|
command = [
|
||||||
|
sys.executable,
|
||||||
|
"-m",
|
||||||
|
"ffx",
|
||||||
|
"--database-file",
|
||||||
|
str(database_path),
|
||||||
|
"unmux",
|
||||||
|
*args,
|
||||||
|
]
|
||||||
|
return subprocess.run(command, cwd=workdir, env=env, capture_output=True, text=True)
|
||||||
|
|
||||||
|
|
||||||
|
class UnmuxCliTests(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.tempdir = tempfile.TemporaryDirectory()
|
||||||
|
self.workdir = Path(self.tempdir.name)
|
||||||
|
self.home_dir = self.workdir / "home"
|
||||||
|
self.home_dir.mkdir()
|
||||||
|
self.database_path = self.workdir / "test.db"
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self.tempdir.cleanup()
|
||||||
|
|
||||||
|
def write_config(self, data: dict) -> None:
|
||||||
|
config_dir = self.home_dir / ".local" / "etc"
|
||||||
|
config_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
(config_dir / "ffx.json").write_text(json.dumps(data), encoding="utf-8")
|
||||||
|
|
||||||
|
def assertCompleted(self, completed):
|
||||||
|
if completed.returncode != 0:
|
||||||
|
self.fail(
|
||||||
|
"FFX unmux failed\n"
|
||||||
|
f"STDOUT:\n{completed.stdout}\n"
|
||||||
|
f"STDERR:\n{completed.stderr}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def seed_matching_show(self, pattern_expression: str, *, indicator_season_digits: int, indicator_episode_digits: int) -> None:
|
||||||
|
context = build_controller_context(self.database_path)
|
||||||
|
try:
|
||||||
|
ShowController(context).updateShow(
|
||||||
|
ShowDescriptor(
|
||||||
|
id=1,
|
||||||
|
name="Unmux Test Show",
|
||||||
|
year=2000,
|
||||||
|
indicator_season_digits=indicator_season_digits,
|
||||||
|
indicator_episode_digits=indicator_episode_digits,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
PatternController(context).savePatternSchema(
|
||||||
|
{
|
||||||
|
"show_id": 1,
|
||||||
|
"pattern": pattern_expression,
|
||||||
|
"quality": 0,
|
||||||
|
"notes": "",
|
||||||
|
},
|
||||||
|
trackDescriptors=[
|
||||||
|
TrackDescriptor(
|
||||||
|
index=0,
|
||||||
|
source_index=0,
|
||||||
|
track_type=TrackType.VIDEO,
|
||||||
|
codec_name=TrackCodec.H264,
|
||||||
|
tags={},
|
||||||
|
disposition_set=set(),
|
||||||
|
)
|
||||||
|
],
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
dispose_controller_context(context)
|
||||||
|
|
||||||
|
def test_subtitles_only_without_output_directory_uses_configured_base_plus_label(self):
|
||||||
|
self.write_config(
|
||||||
|
{
|
||||||
|
"subtitlesDirectory": "~/.local/var/sync/subtitles",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
source_filename = "unmux_s01e01.mkv"
|
||||||
|
source_path = create_source_fixture(
|
||||||
|
self.workdir,
|
||||||
|
source_filename,
|
||||||
|
[
|
||||||
|
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
|
||||||
|
SourceTrackSpec(
|
||||||
|
TrackType.SUBTITLE,
|
||||||
|
identity="subtitle-1",
|
||||||
|
language="eng",
|
||||||
|
subtitle_lines=("subtitle payload",),
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
completed = run_ffx_unmux(
|
||||||
|
self.workdir,
|
||||||
|
self.home_dir,
|
||||||
|
self.database_path,
|
||||||
|
"--subtitles-only",
|
||||||
|
"--label",
|
||||||
|
"dball",
|
||||||
|
str(source_path),
|
||||||
|
)
|
||||||
|
self.assertCompleted(completed)
|
||||||
|
|
||||||
|
expected_directory = self.home_dir / ".local" / "var" / "sync" / "subtitles" / "dball"
|
||||||
|
self.assertTrue(expected_directory.is_dir(), expected_directory)
|
||||||
|
|
||||||
|
def test_unmux_uses_configured_indicator_digits_in_output_filenames(self):
|
||||||
|
self.write_config(
|
||||||
|
{
|
||||||
|
"defaultIndicatorSeasonDigits": 3,
|
||||||
|
"defaultIndicatorEpisodeDigits": 4,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
source_filename = "unmux_s01e01.mkv"
|
||||||
|
output_directory = self.workdir / "unmux-output"
|
||||||
|
output_directory.mkdir()
|
||||||
|
source_path = create_source_fixture(
|
||||||
|
self.workdir,
|
||||||
|
source_filename,
|
||||||
|
[
|
||||||
|
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
completed = run_ffx_unmux(
|
||||||
|
self.workdir,
|
||||||
|
self.home_dir,
|
||||||
|
self.database_path,
|
||||||
|
"--label",
|
||||||
|
"dball",
|
||||||
|
"--output-directory",
|
||||||
|
str(output_directory),
|
||||||
|
str(source_path),
|
||||||
|
)
|
||||||
|
self.assertCompleted(completed)
|
||||||
|
|
||||||
|
output_filenames = sorted(path.name for path in output_directory.iterdir())
|
||||||
|
self.assertEqual(1, len(output_filenames), output_filenames)
|
||||||
|
self.assertTrue(
|
||||||
|
output_filenames[0].startswith("dball_S001E0001_"),
|
||||||
|
output_filenames,
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_unmux_prefers_matched_show_indicator_digits_over_config_defaults(self):
|
||||||
|
self.write_config(
|
||||||
|
{
|
||||||
|
"defaultIndicatorSeasonDigits": 4,
|
||||||
|
"defaultIndicatorEpisodeDigits": 4,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
self.seed_matching_show(
|
||||||
|
r"^unmux_([sS][0-9]+[eE][0-9]+)\.mkv$",
|
||||||
|
indicator_season_digits=1,
|
||||||
|
indicator_episode_digits=3,
|
||||||
|
)
|
||||||
|
source_filename = "unmux_s01e01.mkv"
|
||||||
|
output_directory = self.workdir / "unmux-output"
|
||||||
|
output_directory.mkdir()
|
||||||
|
source_path = create_source_fixture(
|
||||||
|
self.workdir,
|
||||||
|
source_filename,
|
||||||
|
[
|
||||||
|
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
completed = run_ffx_unmux(
|
||||||
|
self.workdir,
|
||||||
|
self.home_dir,
|
||||||
|
self.database_path,
|
||||||
|
"--label",
|
||||||
|
"dball",
|
||||||
|
"--output-directory",
|
||||||
|
str(output_directory),
|
||||||
|
str(source_path),
|
||||||
|
)
|
||||||
|
self.assertCompleted(completed)
|
||||||
|
|
||||||
|
output_filenames = sorted(path.name for path in output_directory.iterdir())
|
||||||
|
self.assertEqual(1, len(output_filenames), output_filenames)
|
||||||
|
self.assertTrue(
|
||||||
|
output_filenames[0].startswith("dball_S1E001_"),
|
||||||
|
output_filenames,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
1
tests/legacy/__init__.py
Normal file
1
tests/legacy/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# Legacy custom FFX test harness modules.
|
||||||
@@ -24,8 +24,9 @@ class BasenameCombinator():
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def getClassReference(identifier):
|
def getClassReference(identifier):
|
||||||
importlib.import_module(f"ffx.test.basename_combinator_{ identifier }")
|
module_name = f"tests.legacy.basename_combinator_{ identifier }"
|
||||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.basename_combinator_{ identifier }"]):
|
importlib.import_module(module_name)
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[module_name]):
|
||||||
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
||||||
if inspect.isclass(obj) and name != 'BasenameCombinator' and name.startswith('BasenameCombinator'):
|
if inspect.isclass(obj) and name != 'BasenameCombinator' and name.startswith('BasenameCombinator'):
|
||||||
return obj
|
return obj
|
||||||
@@ -24,8 +24,9 @@ class DispositionCombinator2():
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def getClassReference(identifier):
|
def getClassReference(identifier):
|
||||||
importlib.import_module(f"ffx.test.disposition_combinator_2_{ identifier }")
|
module_name = f"tests.legacy.disposition_combinator_2_{ identifier }"
|
||||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.disposition_combinator_2_{ identifier }"]):
|
importlib.import_module(module_name)
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[module_name]):
|
||||||
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
||||||
if inspect.isclass(obj) and name != 'DispositionCombinator2' and name.startswith('DispositionCombinator2'):
|
if inspect.isclass(obj) and name != 'DispositionCombinator2' and name.startswith('DispositionCombinator2'):
|
||||||
return obj
|
return obj
|
||||||
@@ -23,8 +23,9 @@ class DispositionCombinator3():
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def getClassReference(identifier):
|
def getClassReference(identifier):
|
||||||
importlib.import_module(f"ffx.test.disposition_combinator_3_{ identifier }")
|
module_name = f"tests.legacy.disposition_combinator_3_{ identifier }"
|
||||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.disposition_combinator_3_{ identifier }"]):
|
importlib.import_module(module_name)
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[module_name]):
|
||||||
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
||||||
if inspect.isclass(obj) and name != 'DispositionCombinator3' and name.startswith('DispositionCombinator3'):
|
if inspect.isclass(obj) and name != 'DispositionCombinator3' and name.startswith('DispositionCombinator3'):
|
||||||
return obj
|
return obj
|
||||||
@@ -1,11 +1,9 @@
|
|||||||
import os, math, tempfile, click
|
import os, math, tempfile, click
|
||||||
|
|
||||||
|
|
||||||
from ffx.ffx_controller import FfxController
|
|
||||||
|
|
||||||
from ffx.process import executeProcess
|
from ffx.process import executeProcess
|
||||||
|
|
||||||
from ffx.media_descriptor import MediaDescriptor
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
|
||||||
from ffx.track_type import TrackType
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
from ffx.helper import dictCache
|
from ffx.helper import dictCache
|
||||||
@@ -149,7 +147,6 @@ def createMediaTestFile(mediaDescriptor: MediaDescriptor,
|
|||||||
|
|
||||||
# subtitleFilePath = createVttFile(SHORT_SUBTITLE_SEQUENCE)
|
# subtitleFilePath = createVttFile(SHORT_SUBTITLE_SEQUENCE)
|
||||||
|
|
||||||
# commandTokens = FfxController.COMMAND_TOKENS
|
|
||||||
commandTokens = ['ffmpeg', '-y']
|
commandTokens = ['ffmpeg', '-y']
|
||||||
|
|
||||||
generatorCache = []
|
generatorCache = []
|
||||||
@@ -164,7 +161,8 @@ def createMediaTestFile(mediaDescriptor: MediaDescriptor,
|
|||||||
|
|
||||||
subIndexCounter = {}
|
subIndexCounter = {}
|
||||||
|
|
||||||
for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
|
# for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
|
||||||
|
for trackDescriptor in mediaDescriptor.getTrackDescriptors():
|
||||||
|
|
||||||
trackType = trackDescriptor.getType()
|
trackType = trackDescriptor.getType()
|
||||||
|
|
||||||
@@ -231,15 +229,14 @@ def createMediaTestFile(mediaDescriptor: MediaDescriptor,
|
|||||||
f"{mediaTagKey}={mediaTagValue}"]
|
f"{mediaTagKey}={mediaTagValue}"]
|
||||||
subIndexCounter[trackType] += 1
|
subIndexCounter[trackType] += 1
|
||||||
|
|
||||||
#TODO: Optimize too many runs
|
|
||||||
ffxContext = {'config': ConfigurationController(), 'logger': logger}
|
ffxContext = {'config': ConfigurationController(), 'logger': logger}
|
||||||
fc = FfxController(ffxContext, mediaDescriptor)
|
mdcs = MediaDescriptorChangeSet(ffxContext, mediaDescriptor)
|
||||||
|
|
||||||
commandTokens += (generatorTokens
|
commandTokens += (generatorTokens
|
||||||
+ importTokens
|
+ importTokens
|
||||||
+ mappingTokens
|
+ mappingTokens
|
||||||
+ metadataTokens
|
+ metadataTokens
|
||||||
+ fc.generateDispositionTokens())
|
+ mdcs.generateDispositionTokens())
|
||||||
|
|
||||||
|
|
||||||
commandTokens += ['-t', str(length)]
|
commandTokens += ['-t', str(length)]
|
||||||
@@ -25,8 +25,9 @@ class LabelCombinator():
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def getClassReference(identifier):
|
def getClassReference(identifier):
|
||||||
importlib.import_module(f"ffx.test.{LabelCombinator.PREFIX}{ identifier }")
|
module_name = f"tests.legacy.{LabelCombinator.PREFIX}{ identifier }"
|
||||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.{LabelCombinator.PREFIX}{ identifier }"]):
|
importlib.import_module(module_name)
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[module_name]):
|
||||||
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
||||||
if inspect.isclass(obj) and name != 'LabelCombinator' and name.startswith('LabelCombinator'):
|
if inspect.isclass(obj) and name != 'LabelCombinator' and name.startswith('LabelCombinator'):
|
||||||
return obj
|
return obj
|
||||||
@@ -22,8 +22,9 @@ class MediaCombinator():
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def getClassReference(identifier):
|
def getClassReference(identifier):
|
||||||
importlib.import_module(f"ffx.test.media_combinator_{ identifier }")
|
module_name = f"tests.legacy.media_combinator_{ identifier }"
|
||||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.media_combinator_{ identifier }"]):
|
importlib.import_module(module_name)
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[module_name]):
|
||||||
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
||||||
if inspect.isclass(obj) and name != 'MediaCombinator' and name.startswith('MediaCombinator'):
|
if inspect.isclass(obj) and name != 'MediaCombinator' and name.startswith('MediaCombinator'):
|
||||||
return obj
|
return obj
|
||||||
@@ -22,8 +22,9 @@ class MediaTagCombinator():
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def getClassReference(identifier):
|
def getClassReference(identifier):
|
||||||
importlib.import_module(f"ffx.test.media_tag_combinator_{ identifier }")
|
module_name = f"tests.legacy.media_tag_combinator_{ identifier }"
|
||||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.media_tag_combinator_{ identifier }"]):
|
importlib.import_module(module_name)
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[module_name]):
|
||||||
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
||||||
if inspect.isclass(obj) and name != 'MediaTagCombinator' and name.startswith('MediaTagCombinator'):
|
if inspect.isclass(obj) and name != 'MediaTagCombinator' and name.startswith('MediaTagCombinator'):
|
||||||
return obj
|
return obj
|
||||||
@@ -4,7 +4,7 @@ from ffx.show_controller import ShowController
|
|||||||
from ffx.pattern_controller import PatternController
|
from ffx.pattern_controller import PatternController
|
||||||
from ffx.media_controller import MediaController
|
from ffx.media_controller import MediaController
|
||||||
|
|
||||||
from ffx.test.helper import createEmptyDirectory
|
from .helper import createEmptyDirectory
|
||||||
from ffx.database import databaseContext
|
from ffx.database import databaseContext
|
||||||
|
|
||||||
class Scenario():
|
class Scenario():
|
||||||
@@ -90,11 +90,7 @@ class Scenario():
|
|||||||
def __init__(self, context = None):
|
def __init__(self, context = None):
|
||||||
self._context = context
|
self._context = context
|
||||||
self._testDirectory = createEmptyDirectory()
|
self._testDirectory = createEmptyDirectory()
|
||||||
self._ffxExecutablePath = os.path.join(
|
self._ffxModuleName = 'ffx'
|
||||||
os.path.dirname(
|
|
||||||
os.path.dirname(
|
|
||||||
os.path.dirname(__file__))),
|
|
||||||
'ffx.py')
|
|
||||||
|
|
||||||
self._logger = context['logger']
|
self._logger = context['logger']
|
||||||
self._reportLogger = context['report_logger']
|
self._reportLogger = context['report_logger']
|
||||||
@@ -146,8 +142,9 @@ class Scenario():
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def getClassReference(identifier):
|
def getClassReference(identifier):
|
||||||
importlib.import_module(f"ffx.test.scenario_{ identifier }")
|
module_name = f"tests.legacy.scenario_{ identifier }"
|
||||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.scenario_{ identifier }"]):
|
importlib.import_module(module_name)
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[module_name]):
|
||||||
#HINT: Excluding Scenario as it seems to be included by import (?)
|
#HINT: Excluding Scenario as it seems to be included by import (?)
|
||||||
if inspect.isclass(obj) and name != 'Scenario' and name.startswith('Scenario'):
|
if inspect.isclass(obj) and name != 'Scenario' and name.startswith('Scenario'):
|
||||||
return obj
|
return obj
|
||||||
@@ -2,7 +2,7 @@ import os, sys, click, glob
|
|||||||
|
|
||||||
from .scenario import Scenario
|
from .scenario import Scenario
|
||||||
|
|
||||||
from ffx.test.helper import createMediaTestFile
|
from .helper import createMediaTestFile
|
||||||
from ffx.process import executeProcess
|
from ffx.process import executeProcess
|
||||||
|
|
||||||
from ffx.file_properties import FileProperties
|
from ffx.file_properties import FileProperties
|
||||||
@@ -13,9 +13,9 @@ from ffx.track_descriptor import TrackDescriptor
|
|||||||
from ffx.track_type import TrackType
|
from ffx.track_type import TrackType
|
||||||
from ffx.track_disposition import TrackDisposition
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
|
||||||
from ffx.test.media_combinator_0 import MediaCombinator0
|
from .media_combinator_0 import MediaCombinator0
|
||||||
|
|
||||||
from ffx.test.basename_combinator import BasenameCombinator
|
from .basename_combinator import BasenameCombinator
|
||||||
|
|
||||||
|
|
||||||
class Scenario1(Scenario):
|
class Scenario1(Scenario):
|
||||||
@@ -92,8 +92,7 @@ class Scenario1(Scenario):
|
|||||||
|
|
||||||
# Phase 2: Run ffx
|
# Phase 2: Run ffx
|
||||||
|
|
||||||
commandSequence = [sys.executable,
|
commandSequence = [sys.executable, '-m', self._ffxModuleName]
|
||||||
self._ffxExecutablePath]
|
|
||||||
|
|
||||||
if self._context['verbosity']:
|
if self._context['verbosity']:
|
||||||
commandSequence += ['--verbose',
|
commandSequence += ['--verbose',
|
||||||
@@ -2,7 +2,7 @@ import os, sys, click
|
|||||||
|
|
||||||
from .scenario import Scenario
|
from .scenario import Scenario
|
||||||
|
|
||||||
from ffx.test.helper import createMediaTestFile
|
from .helper import createMediaTestFile
|
||||||
from ffx.process import executeProcess
|
from ffx.process import executeProcess
|
||||||
|
|
||||||
from ffx.file_properties import FileProperties
|
from ffx.file_properties import FileProperties
|
||||||
@@ -13,7 +13,7 @@ from ffx.track_descriptor import TrackDescriptor
|
|||||||
from ffx.track_type import TrackType
|
from ffx.track_type import TrackType
|
||||||
from ffx.track_disposition import TrackDisposition
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
|
||||||
from ffx.test.media_combinator import MediaCombinator
|
from .media_combinator import MediaCombinator
|
||||||
|
|
||||||
|
|
||||||
class Scenario2(Scenario):
|
class Scenario2(Scenario):
|
||||||
@@ -77,8 +77,7 @@ class Scenario2(Scenario):
|
|||||||
|
|
||||||
# Phase 2: Run ffx
|
# Phase 2: Run ffx
|
||||||
|
|
||||||
commandSequence = [sys.executable,
|
commandSequence = [sys.executable, '-m', self._ffxModuleName]
|
||||||
self._ffxExecutablePath]
|
|
||||||
|
|
||||||
if self._context['verbosity']:
|
if self._context['verbosity']:
|
||||||
commandSequence += ['--verbose',
|
commandSequence += ['--verbose',
|
||||||
@@ -122,7 +121,8 @@ class Scenario2(Scenario):
|
|||||||
resultFileProperties = FileProperties(testContext, resultFile)
|
resultFileProperties = FileProperties(testContext, resultFile)
|
||||||
resultMediaDescriptor = resultFileProperties.getMediaDescriptor()
|
resultMediaDescriptor = resultFileProperties.getMediaDescriptor()
|
||||||
|
|
||||||
resultMediaTracks = resultMediaDescriptor.getAllTrackDescriptors()
|
# resultMediaTracks = resultMediaDescriptor.getAllTrackDescriptors()
|
||||||
|
resultMediaTracks = resultMediaDescriptor.getTrackDescriptors()
|
||||||
|
|
||||||
for assertIndex in range(len(assertSelectorList)):
|
for assertIndex in range(len(assertSelectorList)):
|
||||||
|
|
||||||
@@ -2,11 +2,11 @@ import os, sys, click
|
|||||||
|
|
||||||
from .scenario import Scenario
|
from .scenario import Scenario
|
||||||
|
|
||||||
from ffx.test.helper import createMediaTestFile
|
from .helper import createMediaTestFile
|
||||||
from ffx.process import executeProcess
|
from ffx.process import executeProcess
|
||||||
from ffx.database import databaseContext
|
from ffx.database import databaseContext
|
||||||
|
|
||||||
from ffx.test.helper import createEmptyDirectory
|
from .helper import createEmptyDirectory
|
||||||
from ffx.helper import getEpisodeFileBasename
|
from ffx.helper import getEpisodeFileBasename
|
||||||
|
|
||||||
from ffx.file_properties import FileProperties
|
from ffx.file_properties import FileProperties
|
||||||
@@ -17,8 +17,8 @@ from ffx.track_descriptor import TrackDescriptor
|
|||||||
from ffx.track_type import TrackType
|
from ffx.track_type import TrackType
|
||||||
from ffx.track_disposition import TrackDisposition
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
|
||||||
from ffx.test.media_combinator import MediaCombinator
|
from .media_combinator import MediaCombinator
|
||||||
from ffx.test.indicator_combinator import IndicatorCombinator
|
from .indicator_combinator import IndicatorCombinator
|
||||||
|
|
||||||
from ffx.show_descriptor import ShowDescriptor
|
from ffx.show_descriptor import ShowDescriptor
|
||||||
|
|
||||||
@@ -163,8 +163,7 @@ class Scenario4(Scenario):
|
|||||||
|
|
||||||
# Phase 3: Run ffx
|
# Phase 3: Run ffx
|
||||||
|
|
||||||
commandSequence = [sys.executable,
|
commandSequence = [sys.executable, '-m', self._ffxModuleName]
|
||||||
self._ffxExecutablePath]
|
|
||||||
|
|
||||||
if self._context['verbosity']:
|
if self._context['verbosity']:
|
||||||
commandSequence += ['--verbose',
|
commandSequence += ['--verbose',
|
||||||
@@ -223,7 +222,8 @@ class Scenario4(Scenario):
|
|||||||
self._logger.debug(f"{variantLabel}: Result file properties: {rfp.getFilename()} season={rfp.getSeason()} episode={rfp.getEpisode()}")
|
self._logger.debug(f"{variantLabel}: Result file properties: {rfp.getFilename()} season={rfp.getSeason()} episode={rfp.getEpisode()}")
|
||||||
|
|
||||||
rmd = rfp.getMediaDescriptor()
|
rmd = rfp.getMediaDescriptor()
|
||||||
rmt = rmd.getAllTrackDescriptors()
|
# rmt = rmd.getAllTrackDescriptors()
|
||||||
|
rmt = rmd.getTrackDescriptors()
|
||||||
|
|
||||||
for l in rmd.getConfiguration(label = 'resultMediaDescriptor'):
|
for l in rmd.getConfiguration(label = 'resultMediaDescriptor'):
|
||||||
self._logger.debug(l)
|
self._logger.debug(l)
|
||||||
@@ -22,8 +22,9 @@ class TrackTagCombinator2():
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def getClassReference(identifier):
|
def getClassReference(identifier):
|
||||||
importlib.import_module(f"ffx.test.track_tag_combinator_2_{ identifier }")
|
module_name = f"tests.legacy.track_tag_combinator_2_{ identifier }"
|
||||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.track_tag_combinator_2_{ identifier }"]):
|
importlib.import_module(module_name)
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[module_name]):
|
||||||
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
||||||
if inspect.isclass(obj) and name != 'TrackTagCombinator2' and name.startswith('TrackTagCombinator2'):
|
if inspect.isclass(obj) and name != 'TrackTagCombinator2' and name.startswith('TrackTagCombinator2'):
|
||||||
return obj
|
return obj
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user