190 Commits

Author SHA1 Message Date
Javanaut
14c956b6fa Release v0.4.1 2026-04-23 22:10:04 +02:00
Javanaut
502a822bb4 prep 0.4.1 2026-04-23 22:09:36 +02:00
Javanaut
6cc21b5f36 Adds diagnostics/remedy system 2026-04-23 20:32:49 +02:00
Javanaut
0034f8ca97 ff 2026-04-23 16:37:47 +02:00
Javanaut
eedcbaed0a Merge branch 'dev' of gitea.maveno.de:Javanaut/ffx into dev 2026-04-23 16:31:19 +02:00
Javanaut
653ce7b417 Copy audio and video flags 2026-04-23 16:30:15 +02:00
Javanaut
b80c055826 fix table 2026-04-17 13:17:15 +02:00
Javanaut
c5fc6ac13d fix styled ASS and refactor att format 2026-04-17 11:41:13 +02:00
Javanaut
fea8ea4b70 Release v0.3.1 2026-04-16 19:44:07 +02:00
Javanaut
1bead05d19 ff 2026-04-16 19:36:40 +02:00
Javanaut
9fe2a842e9 ff 2026-04-16 19:32:41 +02:00
Javanaut
849d03d054 v0.3.1 2026-04-16 19:26:17 +02:00
Javanaut
3a87bbbba6 Anpassung --cut flag 2026-04-16 19:02:57 +02:00
Javanaut
ab5e8e53e1 Fix debug title 2026-04-16 18:32:07 +02:00
Javanaut
0ab2408444 Fix h265 format 2026-04-16 18:20:17 +02:00
Javanaut
bc1e0889e7 Fix inspect details screen 2026-04-16 18:10:39 +02:00
Javanaut
6dfbe1022a Merge branch 'dev' of gitea.maveno.de:Javanaut/ffx into dev 2026-04-15 15:50:27 +02:00
Javanaut
d3d2de8a0d adds scratchpad points 2026-04-15 15:50:24 +02:00
Javanaut
0728ece4b8 Fix h265 subtrack unmux 2026-04-15 00:03:17 +02:00
Javanaut
02e375fbf2 nnn 2026-04-14 19:08:29 +02:00
Javanaut
14e6ce8458 Fix logging 2026-04-14 10:04:39 +02:00
Javanaut
d314b6024d Release v0.3.0 2026-04-14 00:56:21 +02:00
Javanaut
d921629947 v0.3.0 2026-04-14 00:55:42 +02:00
Javanaut
65490e2a7f ff 2026-04-14 00:44:43 +02:00
Javanaut
6c5b518e4d ffn 2026-04-14 00:26:16 +02:00
Javanaut
e3c18f22d4 Adds UI tweaks nightly 2026-04-13 23:11:14 +02:00
Javanaut
57185c7f10 Adds missing codecs 2026-04-13 20:15:10 +02:00
Javanaut
1ff9ecd4b6 Release v0.2.6 2026-04-13 20:06:34 +02:00
Javanaut
037388886e prep 0.2.6 2026-04-13 20:04:25 +02:00
Javanaut
e614ca5d75 Splits screen classes 2026-04-13 14:57:13 +02:00
Javanaut
c0b3977ea6 iteration1 2026-04-13 13:16:33 +02:00
Javanaut
d9639561ce Fix TUI widgets color bleedthru 2026-04-13 12:00:38 +02:00
Javanaut
cbf43e5d6c adapt shift output 2026-04-12 20:41:31 +02:00
Javanaut
d6e885517d Adds inspect --shift option 2026-04-12 20:34:33 +02:00
Javanaut
2593c95b5c Release v0.2.5 2026-04-12 19:58:30 +02:00
Javanaut
8a8c43ecdf v0.2.5 2026-04-12 19:57:46 +02:00
Javanaut
6170ac641c ff 2026-04-12 19:35:03 +02:00
Javanaut
497c0e500b ff 2026-04-12 19:34:51 +02:00
Javanaut
008c643272 change disposition order for sidecar files 2026-04-12 19:31:49 +02:00
Javanaut
c302b30e63 ff 2026-04-12 19:19:08 +02:00
Javanaut
7926407534 ff 2026-04-12 19:09:26 +02:00
Javanaut
0894ac2fab ff 2026-04-12 18:50:41 +02:00
Javanaut
353759b983 ff 2026-04-12 18:47:54 +02:00
Javanaut
454f5f0656 ff 2026-04-12 18:46:54 +02:00
Javanaut
0e51d6337f ff 2026-04-12 18:35:13 +02:00
Javanaut
a24b6dedaa ff 2026-04-12 18:26:39 +02:00
Javanaut
8361fc536b ff 2026-04-12 17:53:56 +02:00
Javanaut
4d4272e5e8 ff 2026-04-12 17:47:06 +02:00
Javanaut
559869ca68 iteration1 2026-04-12 17:12:32 +02:00
Javanaut
0e4fae538b prep season shift 2026-04-12 16:52:12 +02:00
Javanaut
12509cd4e2 Release v0.2.4 2026-04-12 12:28:37 +02:00
Javanaut
2595bfe4f4 prep 0.2.4 2026-04-12 12:28:23 +02:00
Javanaut
3df11be5e9 upd .gitignore 2026-04-12 12:24:19 +02:00
Javanaut
fc9d94aeee prep 0.2.4 2026-04-12 12:21:26 +02:00
Javanaut
111df11199 ff 2026-04-12 12:20:01 +02:00
Javanaut
f0d4c36bc3 Adds release script and bumps 0.2.4 2026-04-12 12:12:41 +02:00
Javanaut
ef0d6e9274 Extd rename/unmux to pad with zeroes 2026-04-12 11:44:32 +02:00
Javanaut
d05b01cfb2 Adds rename command 2026-04-12 10:38:36 +02:00
Javanaut
9dc08d48e9 ff 2026-04-12 10:06:19 +02:00
Javanaut
20bdfc0dd7 Fix pri lang for rename mode 2026-04-12 10:06:01 +02:00
Javanaut
4365e083dc Adapt unmux command to changes in convert command 2026-04-11 22:31:04 +02:00
Javanaut
528915a235 Adds subtitle default dir 2026-04-11 21:17:21 +02:00
Javanaut
9a980b5766 Fix streamtags remove list 2026-04-11 20:50:09 +02:00
Javanaut
5eee7e1161 Extd cut parameter 2026-04-11 20:27:58 +02:00
Javanaut
0a41998e29 Adds Q/P values to output file metadata 2026-04-11 17:46:16 +02:00
Javanaut
ebdc23c3ce Fixes remove stream tags per list 2026-04-11 17:31:10 +02:00
Javanaut
9611930949 Misc Opts 2026-04-11 16:52:58 +02:00
Javanaut
609f93b783 Fix cpu percentage interpretations 2026-04-11 16:30:41 +02:00
Javanaut
52c6462fa8 Optimizes niceness and cpulimit usage 2026-04-11 16:21:17 +02:00
Javanaut
358ef18f77 Fix regex issues 2026-04-11 16:10:41 +02:00
Javanaut
fc729a2414 Opt database bootstrapping 2026-04-11 16:04:54 +02:00
Javanaut
0939a0c6c2 Optimizes ffprobe usage 2026-04-11 16:00:01 +02:00
Javanaut
c384d54c12 Impr upgrade 2026-04-11 15:08:08 +02:00
Javanaut
71553aad32 Streamlines imports and app start 2026-04-11 14:57:01 +02:00
Javanaut
d19e69990a Opt pattern matching 2026-04-09 16:11:51 +02:00
Javanaut
be0f4b4c4e Optimize database queries 2026-04-09 13:49:14 +02:00
Javanaut
01b5fdb289 Refine tests, CLI 2026-04-09 13:34:38 +02:00
Javanaut
60ae58500a Tidy up logging and rework tests from scratch 2026-04-09 12:46:24 +02:00
Javanaut
f9c8b8ac5e ffn2 2026-04-09 01:13:06 +02:00
Javanaut
72c735c3ee ffn 2026-04-09 01:06:32 +02:00
Javanaut
5871ae30ad ffn 2026-04-09 01:06:09 +02:00
Javanaut
381a62046b nightly 2026-04-09 01:04:47 +02:00
Javanaut
52724ecc5b ff 2026-04-09 01:03:41 +02:00
Javanaut
f288d445e4 Adds requirements, streamlines CLI helper procedures 2026-04-09 00:59:37 +02:00
Javanaut
d9db6da191 tf 2026-01-31 17:30:35 +01:00
Javanaut
5443881ea1 tf 2026-01-31 17:12:27 +01:00
Javanaut
8946b57456 fix attachement descriptor handling 2026-01-31 12:06:24 +01:00
Javanaut
686239491b Adapt for .ssa subtitles with attached fonts 2026-01-31 10:00:14 +01:00
Javanaut
126ba4487c fixes TextArea 2025-11-07 15:54:14 +01:00
Javanaut
447cda19ef ff 2025-11-07 15:49:52 +01:00
Javanaut
f1ba913a98 ff 2025-11-07 15:45:47 +01:00
Javanaut
59336aafb7 dd 2025-11-07 15:43:31 +01:00
Javanaut
fd5ad3ed56 Removes build artifacts from branch 2025-11-07 15:38:07 +01:00
Javanaut
2d03a3bb10 ff 2025-11-07 15:17:13 +01:00
Javanaut
4dc02d52a2 Adds notes field for patterns 2025-11-07 15:14:55 +01:00
Javanaut
ed0cea9c26 Adapts Q Message 2025-11-07 14:11:50 +01:00
Javanaut
15bfbdbe88 Adds setting quality accoeding to pattern default 2025-11-06 14:08:00 +01:00
Javanaut
c354ba09ba Adds pattern quality UI field 2025-11-05 21:24:53 +01:00
2eeea08be0 Merge branch 'dev' of gitea.maveno.de:Javanaut/ffx into dev 2025-10-08 11:01:15 +02:00
fbfc8ea965 rfc niceness/cpulimit 2025-10-08 10:59:04 +02:00
Javanaut
6ec5db2ea2 ff 2025-10-07 10:10:27 +02:00
Javanaut
8feced6f1c Lang/Codec changes 2025-10-07 10:10:09 +02:00
Javanaut
285649c30a Fix chinese iso code 2025-09-09 08:26:27 +02:00
Javanaut
558da817f1 Fügt hinzu Ländercodes Bokmal und Filipino 2025-09-02 23:46:56 +02:00
Javanaut
2a84327f69 Fügt hinzu Ländercodes Filipinisch und Bokmal 2025-09-02 23:38:07 +02:00
535b11dca5 fix pattern markup 2025-03-27 08:04:41 +01:00
8edc715795 typo 2025-03-04 22:58:14 +01:00
cd203703e8 adding languages 2025-03-04 22:56:39 +01:00
8f2367b71e ff 2025-02-17 00:11:38 +01:00
101c7605d2 deint 2025-02-16 23:54:32 +01:00
a5b58e34e4 ff 2025-02-11 20:19:55 +01:00
a32e86550c ff 2025-02-11 19:55:12 +01:00
5de3778ae5 ff 2025-02-11 19:52:05 +01:00
81aab0657e ff 2025-02-11 19:48:38 +01:00
8514a0c152 ff 2025-02-02 17:01:22 +01:00
c846147c64 ff 2025-02-02 17:00:33 +01:00
e52297b2ba ff 2025-02-02 16:58:24 +01:00
655833f13e ff 2025-02-02 16:36:36 +01:00
03dd02ed87 ff 2025-02-02 16:25:19 +01:00
b6ee197536 ff 2025-02-02 16:14:16 +01:00
d8374ae9f2 ff 2025-02-02 16:12:11 +01:00
f262eaa120 ff 2025-02-02 16:10:59 +01:00
d940a6e92a ff 2025-02-02 16:03:09 +01:00
e1395aeca0 tf auto_crop 2025-02-02 16:00:57 +01:00
48841c5750 ff 2025-02-02 13:35:11 +01:00
d558bbf6bd ff 2025-02-02 13:34:06 +01:00
b05d989581 ff 2025-02-02 13:29:50 +01:00
bc8af53525 ff 2025-02-02 13:29:08 +01:00
6bd1587947 ff 2025-02-02 13:27:19 +01:00
7d6531b40e ff 2025-02-02 13:26:28 +01:00
ab435a4c76 ff 2025-02-02 13:24:06 +01:00
0a88e366b1 ff 2025-02-02 13:22:30 +01:00
1c80cd7d7d ff 2025-02-02 13:19:21 +01:00
a45c180aaa ff 2025-02-02 13:17:28 +01:00
0b204ff19c ff 2025-02-02 13:09:59 +01:00
d7ec5f7620 ff 2025-02-02 13:07:35 +01:00
3f64304374 ff 2025-02-02 13:05:28 +01:00
b459272149 ff 2025-02-02 13:02:08 +01:00
4b05fc194b ff 2025-02-02 12:53:55 +01:00
9d088819ab ff 2025-02-02 12:50:56 +01:00
e20f7a1f67 ff 2025-02-02 12:48:07 +01:00
9d683dfa84 tf h264/mkv 2025-02-02 12:46:45 +01:00
867756c661 ff 2025-02-02 12:22:34 +01:00
f81a6edb07 ff 2025-02-02 12:21:38 +01:00
ec4bce473c ff 2025-02-02 12:20:25 +01:00
bf882b741f ff 2025-02-02 12:19:36 +01:00
a4e25b5ec8 ff 2025-02-02 12:16:38 +01:00
ff6bacb0d5 ff 2025-02-02 12:11:26 +01:00
f32b7a06c0 ff 2025-02-02 12:10:56 +01:00
7ceed58e7b add cropdetect stub 2025-02-02 12:10:04 +01:00
153f401dd3 ff 2025-01-17 18:27:29 +01:00
7f1f34fb9f multiplicity iso languages 2025-01-17 18:25:25 +01:00
21fe7cb1eb ff 2025-01-14 22:10:20 +01:00
9e63184524 tf import dubtiles for movies 2025-01-14 22:02:19 +01:00
3742221189 5.0 channel layout 2025-01-14 21:30:04 +01:00
478ac15ab8 ff 2025-01-14 08:06:14 +01:00
ef0a01bc9b ff 2025-01-14 08:04:55 +01:00
802c11be44 ff 2025-01-14 08:00:41 +01:00
4cbb135772 ff 2025-01-14 08:00:14 +01:00
3d52442471 ff 2025-01-14 07:56:35 +01:00
81640192ab ff 2025-01-14 00:45:33 +01:00
81d760aabe ff 2025-01-14 00:44:10 +01:00
c0eff679f7 ff 2025-01-14 00:41:28 +01:00
07097058d7 add mpeg format 2025-01-14 00:30:57 +01:00
cd7a338541 ff 2024-12-31 11:12:11 +01:00
be652f8efb copy only mode 2024-12-31 10:56:17 +01:00
dd51b14d49 ff 2024-12-27 09:10:08 +01:00
a471808392 add mp3 codec 2024-12-27 09:08:59 +01:00
b3da8ce738 add mpeg-4 format 2024-12-18 22:00:57 +01:00
fe0c078c3f ff 2024-12-15 17:16:57 +01:00
962522b974 ff 2024-12-15 17:13:39 +01:00
24367ea08a ff 2024-12-15 17:13:13 +01:00
f0eebd0bea ff 2024-12-15 17:12:45 +01:00
c8e21b9260 modipy ansible role for pypi packaing 2024-12-15 17:02:03 +01:00
cdc1664779 adapt for manjaro 2024-12-15 16:53:36 +01:00
Maveno
2849eda05a perm filter out png thumbnails 2024-11-29 07:08:59 +01:00
Maveno
cfb2df8d66 pf png tracks 2024-11-27 23:40:22 +01:00
Maveno
12c8ad3782 add codec eac3 png 2024-11-25 08:13:02 +01:00
Maveno
74a39a8f9a #433 Descriptor Pattern Checks 2024-11-24 14:01:55 +01:00
Maveno
5eacb0d0cb #411 Input/Output Pfade 2024-11-24 13:21:46 +01:00
Maveno
e8c0c3d646 fix unchanged tracks for external files 2024-11-24 12:56:31 +01:00
Maveno
6b2671a1f5 ff 2024-11-23 18:08:24 +01:00
Maveno
2d8622506e Rework Descriptor Diff 2024-11-23 13:26:44 +01:00
Maveno
86cc7dfc6f nighl 2024-11-20 22:12:40 +01:00
Maveno
d84bee74c4 ff 2024-11-20 19:01:23 +01:00
Maveno
488caa7a08 ff 2024-11-19 07:56:37 +01:00
Maveno
62877dfed6 ff 2024-11-19 07:54:39 +01:00
Maveno
87ff94e204 add codec srt 2024-11-19 07:54:06 +01:00
Maveno
0c78ed7cf7 ff 2024-11-18 21:15:29 +01:00
Maveno
4db9bfd103 ff 2024-11-18 20:57:04 +01:00
193 changed files with 21738 additions and 3151 deletions

25
.gitignore vendored
View File

@@ -1,10 +1,27 @@
__pycache__
__pycache__/
*.py[cod]
junk/
.vscode
.ipynb_checkpoints/
ansible/inventory/hawaii.yml
ansible/inventory/peppermint.yml
tools/ansible/inventory/hawaii.yml
tools/ansible/inventory/peppermint.yml
tools/ansible/inventory/cappuccino.yml
tools/ansible/inventory/group_vars/all.yml
ffx_test_report.log
bin/conversiontest.py
*.egg-info/
tests/assets/
build/
dist/
*.egg-info/
.venv/
venv/
.codex
*.mkv
*.webm
*.mp4
ffmpeg2pass-0.log
*.sup

181
README.md
View File

@@ -1,48 +1,187 @@
# FFX
FFX is a local CLI and Textual TUI for inspecting TV episode files, storing normalization rules in SQLite, and converting outputs into a predictable stream, metadata, and filename layout.
## Requirements
- Linux-like environment
- `python3`
- `ffmpeg`
- `ffprobe`
- `cpulimit`
## Installation
per https:
FFX uses a two-step local setup flow.
### 1. Install The Bundle
This step creates or reuses the persistent bundle virtualenv in `~/.local/share/ffx.venv`, installs FFX into it, and ensures `ffx` is exposed through a shell alias.
```sh
pip install https://<URL>/<Releaser>/ffx.git@<Branch>
bash tools/setup.sh
```
per git:
If you also want the Python packages needed for the modern test suite:
```sh
pip install git+ssh://<Username>@<URL>/<Releaser>/ffx.git@<Branch>
bash tools/setup.sh --with-tests
```
## Version history
You can verify the bundle state without changing anything:
### 0.1.1
```sh
bash tools/setup.sh --check
```
Bugfixes, TMBD identify shows
### 2. Prepare System Dependencies And Local User Files
### 0.1.2
This step installs or verifies workstation dependencies and seeds local config and data directories. It is the step wrapped by the CLI command `ffx configure_workstation`.
Bugfixes
Run it directly:
### 0.1.3
```sh
bash tools/configure_workstation.sh
```
Subtitle file imports
Or through the installed CLI:
### 0.2.0
```sh
ffx configure_workstation
```
Tests, Config-File
Check-only mode is available in both forms:
### 0.2.1
```sh
bash tools/configure_workstation.sh --check
ffx configure_workstation --check
```
Signature, Tags cleaning, Bugfixes, Refactoring
`tools/configure_workstation.sh` does not manage the bundle virtualenv. Python-side test packages belong to `tools/setup.sh --with-tests`.
### 0.2.2
## Basic Usage
CLI-Overrides
Examples:
```sh
ffx version
ffx inspect /path/to/episode.mkv
ffx convert /path/to/episode.mkv
ffx shows
```
## Modern Tests
Install Python test packages first:
```sh
bash tools/setup.sh --with-tests
```
Then run the modern automatically discovered test suite:
```sh
./tools/test.sh
```
This runner uses `pytest` and intentionally excludes the legacy harness under `tests/legacy/`.
## Default Local Paths
- Config: `~/.local/etc/ffx.json`
- Database: `~/.local/var/ffx/ffx.db`
- Log file: `~/.local/var/log/ffx.log`
- Bundle venv: `~/.local/share/ffx.venv`
## TMDB
TMDB-backed metadata enrichment requires `TMDB_API_KEY` to be set in the environment.
## Version History
### 0.4.1
- `convert` now supports `--copy-video` and `--copy-audio` to keep the selected stream type in copy mode without applying the corresponding reencode flags, filters, or formatting options
- ffmpeg conversions now monitor diagnostics while the process is running, retry unset AVI packet timestamps once with `-fflags +genpts`, and stop early when a file should be skipped instead of waiting for the full job to finish
- end-of-run convert summaries now list only ffmpeg findings that still require review, including named remedy identifiers such as `warn-corrupt-mpeg-audio`
- `upgrade` now finishes by reporting the installed FFX version together with the active bundle branch
### 0.3.1
- debug mode screen titles now append the active Textual screen class name, making screen-specific troubleshooting easier during inspect and edit flows
- `--cut` again works as a combined flag/option: omitted disables cutting, bare `--cut` applies the default `60,180`, and explicit duration or `START,DURATION` values stay supported
- H.265 unmux commands no longer force an invalid `-f h265` output format, keeping ffmpeg copy extraction aligned with the required Annex B bitstream filter
- H.264 encoding now falls back from `libx264` to `libopenh264` with a warning when needed, and the test fixtures use the same encoder fallback so the suite remains portable across ffmpeg builds
### 0.3.0
- inspect and edit screens now refresh nested track and pattern changes more reliably, with inspect-mode tables aligned to the target pattern view shown in the differences pane
- metadata editing got a follow-up polish pass with clearer ffmpeg notifications, a shared in-screen log pane, safer apply/reload handling, and expanded cleanup and normalization coverage
- track and asset probing recognize additional codecs, and the modern test suite now covers more metadata-editor, change-set, screen-state, and asset-probe behavior
- Textual now requires version `8.0` or newer to match the UI APIs used by the current screens
### 0.2.6
- DB-free `ffx edit` workflow for in-place metadata editing via temporary-file rewrite
- inspect and edit workflows split into dedicated Textual screens with shared media-workflow support
- Textual tables and row actions now separate raw data from rendered labels to avoid markup leaking into stored metadata
- responsive screen layout pass, `Esc` back handling, sortable show/inspect tables, and improved edit-screen notifications/toggles
- application-wide UTF-8 i18n catalogs with language precedence from CLI over config over system over German default
- metadata normalization extended for localized subtitle titles, ISO language cleanup, and smarter track editor language/title helpers
### 0.2.5
- show-level quality and notes fields
- pattern-over-show-over-default season-shift resolution with dynamic DB migration loading
- migration prompt now reports the upgrade path and creates an in-place DB backup before applying schema changes
- `upgrade --branch <name>` now fetches remote-only branches before switching
- `unmux` now applies season shifting to subtitle output filenames
- convert now keeps DB-defined target subtitle dispositions authoritative over sidecar filename disposition flags when a pattern definition exists
- focused modern tests added around migrations, unmux, upgrade, and subtitle-disposition import precedence
### 0.2.4
- lightweight CLI commands now stay import-light via lazy runtime loading
- setup/config templating moved to `assets/ffx.json.j2`
- aligned two-step local setup wrappers: `ffx setup` and `ffx configure_workstation`
- combined `ffprobe` payload reuse in `FileProperties`
- configurable crop-detect sampling plus per-process crop result caching
- single-query controller accessors and conditional DB schema bootstrap
- shared screen bootstrap/controller wiring for large detail screens
- configurable default season/episode digit lengths
- digit-aware `rename` and padded `unmux` filename markers
### 0.2.3
PyPi packaging
Templating output filename
Season shiftung
DB-Versionierung
- PyPI packaging
- output filename templating
- season shifting
- DB versioning
### 0.2.2
- CLI overrides
### 0.2.1
- signature handling
- tag cleanup
- bugfixes and refactoring
### 0.2.0
- tests
- config file
### 0.1.3
- subtitle file imports
### 0.1.2
- bugfixes
### 0.1.1
- bugfixes
- TMDB show identification

37
assets/ffx.json.j2 Normal file
View File

@@ -0,0 +1,37 @@
{
"language": {{ language_json }},
"databasePath": {{ database_path_json }},
"logDirectory": {{ log_directory_json }},
"subtitlesDirectory": {{ subtitles_directory_json }},
"defaultIndexSeasonDigits": {{ default_index_season_digits }},
"defaultIndexEpisodeDigits": {{ default_index_episode_digits }},
"defaultIndicatorSeasonDigits": {{ default_indicator_season_digits }},
"defaultIndicatorEpisodeDigits": {{ default_indicator_episode_digits }},
"metadata": {
"signature": {
"RECODED_WITH": "FFX"
},
"remove": [
"VERSION-eng",
"creation_time",
"NAME"
],
"streams": {
"remove": [
"BPS",
"NUMBER_OF_FRAMES",
"NUMBER_OF_BYTES",
"_STATISTICS_WRITING_APP",
"_STATISTICS_WRITING_DATE_UTC",
"_STATISTICS_TAGS",
"BPS-eng",
"DURATION-eng",
"NUMBER_OF_FRAMES-eng",
"NUMBER_OF_BYTES-eng",
"_STATISTICS_WRITING_APP-eng",
"_STATISTICS_WRITING_DATE_UTC-eng",
"_STATISTICS_TAGS-eng"
]
}
}
}

361
assets/i18n/de.json Normal file
View File

@@ -0,0 +1,361 @@
{
"iso_languages": {
"ABKHAZIAN": "Abchasisch",
"AFAR": "Afar",
"AFRIKAANS": "Afrikaans",
"AKAN": "Akan",
"ALBANIAN": "Albanisch",
"AMHARIC": "Amharisch",
"ARABIC": "Arabisch",
"ARAGONESE": "Aragonesisch",
"ARMENIAN": "Armenisch",
"ASSAMESE": "Assamesisch",
"AVARIC": "Awarisch",
"AVESTAN": "Avestisch",
"AYMARA": "Aymara",
"AZERBAIJANI": "Aserbaidschanisch",
"BAMBARA": "Bambara",
"BASHKIR": "Baschkirisch",
"BASQUE": "Baskisch",
"BELARUSIAN": "Weißrussisch",
"BENGALI": "Bengalisch",
"BISLAMA": "Bislama",
"BOKMAL": "Bokmål",
"BOSNIAN": "Bosnisch",
"BRETON": "Bretonisch",
"BULGARIAN": "Bulgarisch",
"BURMESE": "Burmesisch",
"CATALAN": "Catalan",
"CHAMORRO": "Chamorro",
"CHECHEN": "Tschetschenisch",
"CHICHEWA": "Chichewa",
"CHINESE": "Chinesisch",
"CHURCH_SLAVIC": "Church Slavic",
"CHUVASH": "Tschuwaschisch",
"CORNISH": "Kornisch",
"CORSICAN": "Korsisch",
"CREE": "Cree",
"CROATIAN": "Kroatisch",
"CZECH": "Tschechisch",
"DANISH": "Dänisch",
"DIVEHI": "Divehi",
"DUTCH": "Dutch",
"DZONGKHA": "Dzongkha",
"ENGLISH": "Englisch",
"ESPERANTO": "Esperanto",
"ESTONIAN": "Estnisch",
"EWE": "Ewe-Sprache",
"FAROESE": "Färöisch",
"FIJIAN": "Fidschianisch",
"FILIPINO": "Filipino",
"FINNISH": "Finnisch",
"FRENCH": "Französisch",
"FULAH": "Ful",
"GALICIAN": "Galizisch",
"GANDA": "Ganda",
"GEORGIAN": "Georgisch",
"GERMAN": "Deutsch",
"GREEK": "Greek",
"GUARANI": "Guaraní",
"GUJARATI": "Gujarati",
"HAITIAN": "Haitian",
"HAUSA": "Haussa",
"HEBREW": "Hebräisch",
"HERERO": "Herero",
"HINDI": "Hindi",
"HIRI_MOTU": "Hiri-Motu",
"HUNGARIAN": "Ungarisch",
"ICELANDIC": "Isländisch",
"IDO": "Ido",
"IGBO": "Ibo",
"INDONESIAN": "Indonesisch",
"INTERLINGUA": "Interlingua",
"INTERLINGUE": "Interlingue",
"INUKTITUT": "Inuktitut",
"INUPIAQ": "Inupiaq",
"IRISH": "Irisch",
"ITALIAN": "Italienisch",
"JAPANESE": "Japanisch",
"JAVANESE": "Javanisch",
"KALAALLISUT": "Kalaallisut",
"KANNADA": "Kannada",
"KANURI": "Kanuri",
"KASHMIRI": "Kaschmirisch",
"KAZAKH": "Kasachisch",
"KHMER": "Khmer",
"KIKUYU": "Kikuyu",
"KINYARWANDA": "Kinyarwanda",
"KIRGHIZ": "Kirghiz",
"KOMI": "Komi",
"KONGO": "Kongo",
"KOREAN": "Koreanisch",
"KUANYAMA": "Kuanyama",
"KURDISH": "Kurdisch",
"LAO": "Laotisch",
"LATIN": "Lateinisch",
"LATVIAN": "Lettisch",
"LIMBURGAN": "Limburgan",
"LINGALA": "Lingala",
"LITHUANIAN": "Litauisch",
"LUBA_KATANGA": "Luba-Katanga",
"LUXEMBOURGISH": "Luxembourgish",
"MACEDONIAN": "Makedonisch",
"MALAGASY": "Malagasi",
"MALAY": "Malaiisch",
"MALAYALAM": "Malayalam",
"MALTESE": "Maltesisch",
"MANX": "Manx",
"MAORI": "Maori",
"MARATHI": "Marathi",
"MARSHALLESE": "Marschallesisch",
"MONGOLIAN": "Mongolisch",
"NAURU": "Nauru",
"NAVAJO": "Navajo",
"NDONGA": "Ndonga",
"NEPALI": "Nepali",
"NORTHERN_SAMI": "Nord-Samisch",
"NORTH_NDEBELE": "North Ndebele",
"NORWEGIAN": "Norwegisch",
"NORWEGIAN_NYNORSK": "Nynorsk",
"OCCITAN": "Occitan",
"OJIBWA": "Ojibwa",
"ORIYA": "Oriya",
"OROMO": "Oromo",
"OSSETIAN": "Ossetian",
"PALI": "Pali",
"PANJABI": "Panjabi",
"PERSIAN": "Persisch",
"POLISH": "Polnisch",
"PORTUGUESE": "Portugiesisch",
"PUSHTO": "Pushto",
"QUECHUA": "Quechua",
"ROMANIAN": "Romanian",
"ROMANSH": "Bündnerromanisch",
"RUNDI": "Kirundi",
"RUSSIAN": "Russisch",
"SAMOAN": "Samoanisch",
"SANGO": "Sango",
"SANSKRIT": "Sanskrit",
"SARDINIAN": "Sardisch",
"SCOTTISH_GAELIC": "Scottish Gaelic",
"SERBIAN": "Serbisch",
"SHONA": "Schona",
"SICHUAN_YI": "Sichuan Yi",
"SINDHI": "Sindhi",
"SINHALA": "Sinhala",
"SLOVAK": "Slowakisch",
"SLOVENIAN": "Slowenisch",
"SOMALI": "Somali",
"SOUTHERN_SOTHO": "Southern Sotho",
"SOUTH_NDEBELE": "South Ndebele",
"SPANISH": "Spanish",
"SUNDANESE": "Sundanesisch",
"SWAHILI": "Suaheli; Swaheli",
"SWATI": "Swazi",
"SWEDISH": "Schwedisch",
"TAGALOG": "Tagalog",
"TAHITIAN": "Tahitisch",
"TAJIK": "Tadschikisch",
"TAMIL": "Tamilisch",
"TATAR": "Tatarisch",
"TELUGU": "Telugu",
"THAI": "Thai",
"TIBETAN": "Tibetisch",
"TIGRINYA": "Tigrinja",
"TONGA": "Tonga",
"TSONGA": "Tsonga",
"TSWANA": "Tswana",
"TURKISH": "Türkisch",
"TURKMEN": "Turkmenisch",
"TWI": "Twi",
"UIGHUR": "Uighur",
"UKRAINIAN": "Ukrainisch",
"UNDEFINED": "undefined",
"URDU": "Urdu",
"UZBEK": "Usbekisch",
"VENDA": "Venda",
"VIETNAMESE": "Vietnamesisch",
"VOLAPUK": "Volapük",
"WALLOON": "Wallonisch",
"WELSH": "Walisisch",
"WESTERN_FRISIAN": "Westfriesisch",
"WOLOF": "Wolof",
"XHOSA": "Xhosa",
"YIDDISH": "Jiddisch",
"YORUBA": "Joruba",
"ZHUANG": "Zhuang",
"ZULU": "Zulu"
},
"phrases": {
"5.0(side)": "5.0(side)",
"5.1(side)": "5.1(side)",
"6.1": "6.1",
"6ch": "6ch",
"7.1": "7.1",
"<New show>": "<Neue Serie>",
"Add": "Hinzufügen",
"Add Pattern": "Muster hinzufügen",
"Apply": "Anwenden",
"Apply failed: {error}": "Anwenden fehlgeschlagen: {error}",
"Are you sure to delete the following filename pattern?": "Möchtest du das folgende Dateinamensmuster wirklich löschen?",
"Are you sure to delete the following shifted season?": "Möchtest du die folgende verschobene Staffel wirklich löschen?",
"Are you sure to delete the following show?": "Möchtest du die folgende Serie wirklich löschen?",
"Are you sure to delete the following {track_type} track?": "Möchtest du den folgenden {track_type}-Stream wirklich löschen?",
"Are you sure to delete this tag?": "Möchtest du dieses Tag wirklich löschen?",
"Audio Layout": "Audiolayout",
"Back": "Zurück",
"Cancel": "Abbrechen",
"Cannot add another stream with disposition flag 'default' or 'forced' set": "Es kann kein weiterer Stream mit gesetztem Dispositions-Flag 'default' oder 'forced' hinzugefügt werden",
"Changes applied and file reloaded.": "Änderungen angewendet und Datei neu geladen.",
"Cleanup": "Bereinigen",
"Cleanup disabled.": "Bereinigung deaktiviert.",
"Cleanup enabled.": "Bereinigung aktiviert.",
"Codec": "Codec",
"Continuing edit session.": "Bearbeitung wird fortgesetzt.",
"Default": "Standard",
"Delete": "Löschen",
"Delete Show": "Serie löschen",
"Deleted media tag {tag!r}.": "Medien-Tag {tag!r} gelöscht.",
"Differences": "Unterschiede",
"Differences (file->db/output)": "Unterschiede (Datei->DB/Ausgabe)",
"Discard": "Verwerfen",
"Discard pending metadata changes and quit?": "Ausstehende Metadatenänderungen verwerfen und beenden?",
"Discard pending metadata changes and reload the file state?": "Ausstehende Metadatenänderungen verwerfen und Dateistand neu laden?",
"Down": "Runter",
"Dry-run: would rewrite via temporary file {target_path}": "Trockenlauf: würde über temporäre Datei {target_path} neu schreiben",
"Edit": "Bearbeiten",
"Edit Pattern": "Muster bearbeiten",
"Edit Show": "Serie bearbeiten",
"Edit filename pattern": "Dateinamensmuster bearbeiten",
"Edit shifted season": "Verschobene Staffel bearbeiten",
"Edit stream": "Stream bearbeiten",
"Episode Offset": "Episodenoffset",
"Episode offset": "Episodenoffset",
"File": "Datei",
"File patterns": "Datei-Namensmuster",
"First Episode": "Erste Episode",
"First episode": "Erste Episode",
"Forced": "Erzwungen",
"Help": "Hilfe",
"Help Screen": "Hilfe-Bildschirm",
"ID": "ID",
"Identify": "Identifizieren",
"Index": "Index",
"Index / Subindex": "Index / Unterindex",
"Index Episode Digits": "Ep. Index Stellen",
"Index Season Digits": "Sta. Index Stellen",
"Indicator Edisode Digits": "Ep. Indikator Stellen",
"Indicator Season Digits": "Sta. Indikator Stellen",
"Keep Editing": "Weiter bearbeiten",
"Keeping pending changes.": "Ausstehende Änderungen bleiben erhalten.",
"Key": "Schlüssel",
"Language": "Sprache",
"Last Episode": "Letzte Episode",
"Last episode": "Letzte Episode",
"Layout": "Layout",
"Media Tags": "Medien-Tags",
"More than one default audio stream detected and no prompt set": "Mehr als ein Standard-Audiostream erkannt und keine Abfrage aktiviert",
"More than one default audio stream detected! Please select stream": "Mehr als ein Standard-Audiostream erkannt! Bitte Stream auswählen",
"More than one default subtitle stream detected and no prompt set": "Mehr als ein Standard-Untertitelstream erkannt und keine Abfrage aktiviert",
"More than one default subtitle stream detected! Please select stream": "Mehr als ein Standard-Untertitelstream erkannt! Bitte Stream auswählen",
"More than one default video stream detected and no prompt set": "Mehr als ein Standard-Videostream erkannt und keine Abfrage aktiviert",
"More than one default video stream detected! Please select stream": "Mehr als ein Standard-Videostream erkannt! Bitte Stream auswählen",
"More than one forced audio stream detected and no prompt set": "Mehr als ein erzwungener Audiostream erkannt und keine Abfrage aktiviert",
"More than one forced audio stream detected! Please select stream": "Mehr als ein erzwungener Audiostream erkannt! Bitte Stream auswählen",
"More than one forced subtitle stream detected and no prompt set": "Mehr als ein erzwungener Untertitelstream erkannt und keine Abfrage aktiviert",
"More than one forced subtitle stream detected! Please select stream": "Mehr als ein erzwungener Untertitelstream erkannt! Bitte Stream auswählen",
"More than one forced video stream detected and no prompt set": "Mehr als ein erzwungener Videostream erkannt und keine Abfrage aktiviert",
"More than one forced video stream detected! Please select stream": "Mehr als ein erzwungener Videostream erkannt! Bitte Stream auswählen",
"Name": "Name",
"New Pattern": "Neues Muster",
"New Show": "Neue Serie",
"New filename pattern": "Neues Dateinamensmuster",
"New shifted season": "Neue verschobene Staffel",
"New stream": "Neuer Stream",
"No": "Nein",
"No changes to apply.": "Keine Änderungen zum Anwenden.",
"No changes to revert.": "Keine Änderungen zum Zurücksetzen.",
"Normalization disabled.": "Normalisierung deaktiviert.",
"Normalization enabled.": "Normalisierung aktiviert.",
"Normalize": "Normalisieren",
"Notes": "Notizen",
"Pattern": "Muster",
"Planned Changes (file->edited output)": "Geplante Änderungen (Datei->bearbeitete Ausgabe)",
"Quality": "Qualität",
"Quit": "Beenden",
"Remove Pattern": "Muster entfernen",
"Revert": "Zurücksetzen",
"Reverted pending changes.": "Ausstehende Änderungen verworfen.",
"Save": "Speichern",
"Season Offset": "Staffeloffset",
"Select a stream first.": "Bitte zuerst einen Stream auswählen.",
"Set Default": "Als Standard setzen",
"Set Forced": "Als erzwungen setzen",
"Settings Screen": "Einstellungsbildschirm",
"Numbering Mapping": "Abbildung Nummerierung",
"Show": "Serie",
"Shows": "Serien",
"Source Season": "Quellstaffel",
"SrcIndex": "QuellIndex",
"Status": "Status",
"Stay": "Bleiben",
"Stream dispositions": "Stream-Dispositionen",
"Stream tags": "Stream-Tags",
"Streams": "Streams",
"SubIndex": "Unterindex",
"Substitute": "Ersetzen",
"Substitute pattern": "Muster ersetzen",
"Title": "Titel",
"Type": "Typ",
"Unable to update selected stream.": "Ausgewählten Stream konnte nicht aktualisiert werden.",
"Up": "Hoch",
"Update Pattern": "Muster aktualisieren",
"Updated media tag {tag!r}.": "Medien-Tag {tag!r} aktualisiert.",
"Updated stream #{index} ({track_type}).": "Stream #{index} ({track_type}) aktualisiert.",
"Value": "Wert",
"Year": "Jahr",
"Yes": "Ja",
"add media tag: key='{key}' value='{value}'": "Medien-Tag hinzufügen: Schlüssel='{key}' Wert='{value}'",
"add {track_type} track: index={index} lang={language}": "{track_type}-Stream hinzufügen: Index={index} Sprache={language}",
"attached_pic": "attached_pic",
"attachment": "Anhang",
"audio": "Audio",
"captions": "Untertitel",
"change media tag: key='{key}' value='{value}'": "Medien-Tag ändern: Schlüssel='{key}' Wert='{value}'",
"change stream #{index} ({track_type}:{sub_index}) add disposition={disposition}": "Stream #{index} ({track_type}:{sub_index}) Disposition hinzufügen={disposition}",
"change stream #{index} ({track_type}:{sub_index}) add key={key} value={value}": "Stream #{index} ({track_type}:{sub_index}) Schlüssel hinzufügen={key} Wert={value}",
"change stream #{index} ({track_type}:{sub_index}) change key={key} value={value}": "Stream #{index} ({track_type}:{sub_index}) Schlüssel ändern={key} Wert={value}",
"change stream #{index} ({track_type}:{sub_index}) remove disposition={disposition}": "Stream #{index} ({track_type}:{sub_index}) Disposition entfernen={disposition}",
"change stream #{index} ({track_type}:{sub_index}) remove key={key} value={value}": "Stream #{index} ({track_type}:{sub_index}) Schlüssel entfernen={key} Wert={value}",
"clean_effects": "Nur Effekte",
"comment": "Kommentar",
"default": "Standard",
"dependent": "abhängig",
"descriptions": "Beschreibungen",
"dub": "Synchronisiert",
"for pattern": "für Muster",
"forced": "erzwungen",
"from": "von",
"from pattern": "aus Muster",
"from show": "aus Serie",
"hearing_impaired": "hörgeschädigt",
"karaoke": "Karaoke",
"lyrics": "Liedtext",
"metadata": "Metadaten",
"non_diegetic": "nicht-diegetisch",
"original": "Original",
"pattern #{id}": "Muster #{id}",
"remove media tag: key='{key}' value='{value}'": "Medien-Tag entfernen: Schlüssel='{key}' Wert='{value}'",
"remove stream #{index}": "Stream #{index} entfernen",
"show #{id}": "Serie #{id}",
"stereo": "Stereo",
"still_image": "Standbild",
"sub index": "Unterindex",
"subtitle": "Untertitel",
"timed_thumbnails": "zeitgesteuerte Vorschaubilder",
"undefined": "undefiniert",
"unknown": "unbekannt",
"video": "Video",
"visual_impaired": "sehgeschädigt"
}
}

360
assets/i18n/en.json Normal file
View File

@@ -0,0 +1,360 @@
{
"iso_languages": {
"ABKHAZIAN": "Abkhazian",
"AFAR": "Afar",
"AFRIKAANS": "Afrikaans",
"AKAN": "Akan",
"ALBANIAN": "Albanian",
"AMHARIC": "Amharic",
"ARABIC": "Arabic",
"ARAGONESE": "Aragonese",
"ARMENIAN": "Armenian",
"ASSAMESE": "Assamese",
"AVARIC": "Avaric",
"AVESTAN": "Avestan",
"AYMARA": "Aymara",
"AZERBAIJANI": "Azerbaijani",
"BAMBARA": "Bambara",
"BASHKIR": "Bashkir",
"BASQUE": "Basque",
"BELARUSIAN": "Belarusian",
"BENGALI": "Bengali",
"BISLAMA": "Bislama",
"BOKMAL": "Bokmål",
"BOSNIAN": "Bosnian",
"BRETON": "Breton",
"BULGARIAN": "Bulgarian",
"BURMESE": "Burmese",
"CATALAN": "Catalan",
"CHAMORRO": "Chamorro",
"CHECHEN": "Chechen",
"CHICHEWA": "Chichewa",
"CHINESE": "Chinese",
"CHURCH_SLAVIC": "Church Slavic",
"CHUVASH": "Chuvash",
"CORNISH": "Cornish",
"CORSICAN": "Corsican",
"CREE": "Cree",
"CROATIAN": "Croatian",
"CZECH": "Czech",
"DANISH": "Danish",
"DIVEHI": "Divehi",
"DUTCH": "Dutch",
"DZONGKHA": "Dzongkha",
"ENGLISH": "English",
"ESPERANTO": "Esperanto",
"ESTONIAN": "Estonian",
"EWE": "Ewe",
"FAROESE": "Faroese",
"FIJIAN": "Fijian",
"FILIPINO": "Filipino",
"FINNISH": "Finnish",
"FRENCH": "French",
"FULAH": "Fulah",
"GALICIAN": "Galician",
"GANDA": "Ganda",
"GEORGIAN": "Georgian",
"GERMAN": "German",
"GREEK": "Greek",
"GUARANI": "Guarani",
"GUJARATI": "Gujarati",
"HAITIAN": "Haitian",
"HAUSA": "Hausa",
"HEBREW": "Hebrew",
"HERERO": "Herero",
"HINDI": "Hindi",
"HIRI_MOTU": "Hiri Motu",
"HUNGARIAN": "Hungarian",
"ICELANDIC": "Icelandic",
"IDO": "Ido",
"IGBO": "Igbo",
"INDONESIAN": "Indonesian",
"INTERLINGUA": "Interlingua",
"INTERLINGUE": "Interlingue",
"INUKTITUT": "Inuktitut",
"INUPIAQ": "Inupiaq",
"IRISH": "Irish",
"ITALIAN": "Italian",
"JAPANESE": "Japanese",
"JAVANESE": "Javanese",
"KALAALLISUT": "Kalaallisut",
"KANNADA": "Kannada",
"KANURI": "Kanuri",
"KASHMIRI": "Kashmiri",
"KAZAKH": "Kazakh",
"KHMER": "Khmer",
"KIKUYU": "Kikuyu",
"KINYARWANDA": "Kinyarwanda",
"KIRGHIZ": "Kirghiz",
"KOMI": "Komi",
"KONGO": "Kongo",
"KOREAN": "Korean",
"KUANYAMA": "Kuanyama",
"KURDISH": "Kurdish",
"LAO": "Lao",
"LATIN": "Latin",
"LATVIAN": "Latvian",
"LIMBURGAN": "Limburgan",
"LINGALA": "Lingala",
"LITHUANIAN": "Lithuanian",
"LUBA_KATANGA": "Luba-Katanga",
"LUXEMBOURGISH": "Luxembourgish",
"MACEDONIAN": "Macedonian",
"MALAGASY": "Malagasy",
"MALAY": "Malay",
"MALAYALAM": "Malayalam",
"MALTESE": "Maltese",
"MANX": "Manx",
"MAORI": "Maori",
"MARATHI": "Marathi",
"MARSHALLESE": "Marshallese",
"MONGOLIAN": "Mongolian",
"NAURU": "Nauru",
"NAVAJO": "Navajo",
"NDONGA": "Ndonga",
"NEPALI": "Nepali",
"NORTHERN_SAMI": "Northern Sami",
"NORTH_NDEBELE": "North Ndebele",
"NORWEGIAN": "Norwegian",
"NORWEGIAN_NYNORSK": "Nynorsk",
"OCCITAN": "Occitan",
"OJIBWA": "Ojibwa",
"ORIYA": "Oriya",
"OROMO": "Oromo",
"OSSETIAN": "Ossetian",
"PALI": "Pali",
"PANJABI": "Panjabi",
"PERSIAN": "Persian",
"POLISH": "Polish",
"PORTUGUESE": "Portuguese",
"PUSHTO": "Pushto",
"QUECHUA": "Quechua",
"ROMANIAN": "Romanian",
"ROMANSH": "Romansh",
"RUNDI": "Rundi",
"RUSSIAN": "Russian",
"SAMOAN": "Samoan",
"SANGO": "Sango",
"SANSKRIT": "Sanskrit",
"SARDINIAN": "Sardinian",
"SCOTTISH_GAELIC": "Scottish Gaelic",
"SERBIAN": "Serbian",
"SHONA": "Shona",
"SICHUAN_YI": "Sichuan Yi",
"SINDHI": "Sindhi",
"SINHALA": "Sinhala",
"SLOVAK": "Slovak",
"SLOVENIAN": "Slovenian",
"SOMALI": "Somali",
"SOUTHERN_SOTHO": "Southern Sotho",
"SOUTH_NDEBELE": "South Ndebele",
"SPANISH": "Spanish",
"SUNDANESE": "Sundanese",
"SWAHILI": "Swahili",
"SWATI": "Swati",
"SWEDISH": "Swedish",
"TAGALOG": "Tagalog",
"TAHITIAN": "Tahitian",
"TAJIK": "Tajik",
"TAMIL": "Tamil",
"TATAR": "Tatar",
"TELUGU": "Telugu",
"THAI": "Thai",
"TIBETAN": "Tibetan",
"TIGRINYA": "Tigrinya",
"TONGA": "Tonga",
"TSONGA": "Tsonga",
"TSWANA": "Tswana",
"TURKISH": "Turkish",
"TURKMEN": "Turkmen",
"TWI": "Twi",
"UIGHUR": "Uighur",
"UKRAINIAN": "Ukrainian",
"UNDEFINED": "undefined",
"URDU": "Urdu",
"UZBEK": "Uzbek",
"VENDA": "Venda",
"VIETNAMESE": "Vietnamese",
"VOLAPUK": "Volapük",
"WALLOON": "Walloon",
"WELSH": "Welsh",
"WESTERN_FRISIAN": "Western Frisian",
"WOLOF": "Wolof",
"XHOSA": "Xhosa",
"YIDDISH": "Yiddish",
"YORUBA": "Yoruba",
"ZHUANG": "Zhuang",
"ZULU": "Zulu"
},
"phrases": {
"5.0(side)": "5.0(side)",
"5.1(side)": "5.1(side)",
"6.1": "6.1",
"6ch": "6ch",
"7.1": "7.1",
"<New show>": "<New show>",
"Add": "Add",
"Add Pattern": "Add Pattern",
"Apply": "Apply",
"Apply failed: {error}": "Apply failed: {error}",
"Are you sure to delete the following filename pattern?": "Are you sure to delete the following filename pattern?",
"Are you sure to delete the following shifted season?": "Are you sure to delete the following shifted season?",
"Are you sure to delete the following show?": "Are you sure to delete the following show?",
"Are you sure to delete the following {track_type} track?": "Are you sure to delete the following {track_type} track?",
"Are you sure to delete this tag?": "Are you sure to delete this tag?",
"Audio Layout": "Audio Layout",
"Back": "Back",
"Cancel": "Cancel",
"Cannot add another stream with disposition flag 'default' or 'forced' set": "Cannot add another stream with disposition flag 'default' or 'forced' set",
"Changes applied and file reloaded.": "Changes applied and file reloaded.",
"Cleanup": "Cleanup",
"Cleanup disabled.": "Cleanup disabled.",
"Cleanup enabled.": "Cleanup enabled.",
"Codec": "Codec",
"Continuing edit session.": "Continuing edit session.",
"Default": "Default",
"Delete": "Delete",
"Delete Show": "Delete Show",
"Deleted media tag {tag!r}.": "Deleted media tag {tag!r}.",
"Differences": "Differences",
"Differences (file->db/output)": "Differences (file->db/output)",
"Discard": "Discard",
"Discard pending metadata changes and quit?": "Discard pending metadata changes and quit?",
"Discard pending metadata changes and reload the file state?": "Discard pending metadata changes and reload the file state?",
"Down": "Down",
"Dry-run: would rewrite via temporary file {target_path}": "Dry-run: would rewrite via temporary file {target_path}",
"Edit": "Edit",
"Edit Pattern": "Edit Pattern",
"Edit Show": "Edit Show",
"Edit filename pattern": "Edit filename pattern",
"Edit shifted season": "Edit shifted season",
"Edit stream": "Edit stream",
"Episode Offset": "Episode Offset",
"Episode offset": "Episode offset",
"File": "File",
"File patterns": "File patterns",
"First Episode": "First Episode",
"First episode": "First episode",
"Forced": "Forced",
"Help": "Help",
"Help Screen": "Help Screen",
"ID": "ID",
"Identify": "Identify",
"Index": "Index",
"Index / Subindex": "Index / Subindex",
"Index Episode Digits": "Index Episode Digits",
"Index Season Digits": "Index Season Digits",
"Indicator Edisode Digits": "Indicator Edisode Digits",
"Indicator Season Digits": "Indicator Season Digits",
"Keep Editing": "Keep Editing",
"Keeping pending changes.": "Keeping pending changes.",
"Key": "Key",
"Language": "Language",
"Last Episode": "Last Episode",
"Last episode": "Last episode",
"Layout": "Layout",
"Media Tags": "Media Tags",
"More than one default audio stream detected and no prompt set": "More than one default audio stream detected and no prompt set",
"More than one default audio stream detected! Please select stream": "More than one default audio stream detected! Please select stream",
"More than one default subtitle stream detected and no prompt set": "More than one default subtitle stream detected and no prompt set",
"More than one default subtitle stream detected! Please select stream": "More than one default subtitle stream detected! Please select stream",
"More than one default video stream detected and no prompt set": "More than one default video stream detected and no prompt set",
"More than one default video stream detected! Please select stream": "More than one default video stream detected! Please select stream",
"More than one forced audio stream detected and no prompt set": "More than one forced audio stream detected and no prompt set",
"More than one forced audio stream detected! Please select stream": "More than one forced audio stream detected! Please select stream",
"More than one forced subtitle stream detected and no prompt set": "More than one forced subtitle stream detected and no prompt set",
"More than one forced subtitle stream detected! Please select stream": "More than one forced subtitle stream detected! Please select stream",
"More than one forced video stream detected and no prompt set": "More than one forced video stream detected and no prompt set",
"More than one forced video stream detected! Please select stream": "More than one forced video stream detected! Please select stream",
"Name": "Name",
"New Pattern": "New Pattern",
"New Show": "New Show",
"New filename pattern": "New filename pattern",
"New shifted season": "New shifted season",
"New stream": "New stream",
"No": "No",
"No changes to apply.": "No changes to apply.",
"No changes to revert.": "No changes to revert.",
"Normalization disabled.": "Normalization disabled.",
"Normalization enabled.": "Normalization enabled.",
"Normalize": "Normalize",
"Notes": "Notes",
"Pattern": "Pattern",
"Planned Changes (file->edited output)": "Planned Changes (file->edited output)",
"Quality": "Quality",
"Quit": "Quit",
"Remove Pattern": "Remove Pattern",
"Revert": "Revert",
"Reverted pending changes.": "Reverted pending changes.",
"Save": "Save",
"Season Offset": "Season Offset",
"Select a stream first.": "Select a stream first.",
"Set Default": "Set Default",
"Set Forced": "Set Forced",
"Settings Screen": "Settings Screen",
"Numbering Mapping": "Numbering Mapping",
"Show": "Show",
"Shows": "Shows",
"SrcIndex": "SrcIndex",
"Status": "Status",
"Stay": "Stay",
"Stream dispositions": "Stream dispositions",
"Stream tags": "Stream tags",
"Streams": "Streams",
"SubIndex": "SubIndex",
"Substitute": "Substitute",
"Substitute pattern": "Substitute pattern",
"Title": "Title",
"Type": "Type",
"Unable to update selected stream.": "Unable to update selected stream.",
"Up": "Up",
"Update Pattern": "Update Pattern",
"Updated media tag {tag!r}.": "Updated media tag {tag!r}.",
"Updated stream #{index} ({track_type}).": "Updated stream #{index} ({track_type}).",
"Value": "Value",
"Year": "Year",
"Yes": "Yes",
"add media tag: key='{key}' value='{value}'": "add media tag: key='{key}' value='{value}'",
"add {track_type} track: index={index} lang={language}": "add {track_type} track: index={index} lang={language}",
"attached_pic": "attached_pic",
"attachment": "attachment",
"audio": "audio",
"captions": "captions",
"change media tag: key='{key}' value='{value}'": "change media tag: key='{key}' value='{value}'",
"change stream #{index} ({track_type}:{sub_index}) add disposition={disposition}": "change stream #{index} ({track_type}:{sub_index}) add disposition={disposition}",
"change stream #{index} ({track_type}:{sub_index}) add key={key} value={value}": "change stream #{index} ({track_type}:{sub_index}) add key={key} value={value}",
"change stream #{index} ({track_type}:{sub_index}) change key={key} value={value}": "change stream #{index} ({track_type}:{sub_index}) change key={key} value={value}",
"change stream #{index} ({track_type}:{sub_index}) remove disposition={disposition}": "change stream #{index} ({track_type}:{sub_index}) remove disposition={disposition}",
"change stream #{index} ({track_type}:{sub_index}) remove key={key} value={value}": "change stream #{index} ({track_type}:{sub_index}) remove key={key} value={value}",
"clean_effects": "clean_effects",
"comment": "comment",
"default": "default",
"dependent": "dependent",
"descriptions": "descriptions",
"dub": "dub",
"for pattern": "for pattern",
"forced": "forced",
"from": "from",
"from pattern": "from pattern",
"from show": "from show",
"hearing_impaired": "hearing_impaired",
"karaoke": "karaoke",
"lyrics": "lyrics",
"metadata": "metadata",
"non_diegetic": "non_diegetic",
"original": "original",
"pattern #{id}": "pattern #{id}",
"remove media tag: key='{key}' value='{value}'": "remove media tag: key='{key}' value='{value}'",
"remove stream #{index}": "remove stream #{index}",
"show #{id}": "show #{id}",
"stereo": "stereo",
"still_image": "still_image",
"sub index": "sub index",
"subtitle": "subtitle",
"timed_thumbnails": "timed_thumbnails",
"undefined": "undefined",
"unknown": "unknown",
"video": "video",
"visual_impaired": "visual_impaired"
}
}

361
assets/i18n/eo.json Normal file
View File

@@ -0,0 +1,361 @@
{
"iso_languages": {
"ABKHAZIAN": "Abĥaza",
"AFAR": "Afara",
"AFRIKAANS": "Afrikansa",
"AKAN": "Akana",
"ALBANIAN": "Albana",
"AMHARIC": "Amhara",
"ARABIC": "Araba",
"ARAGONESE": "Aragona",
"ARMENIAN": "Armena",
"ASSAMESE": "Asama",
"AVARIC": "Avara",
"AVESTAN": "Avesta",
"AYMARA": "Ajmara",
"AZERBAIJANI": "Azerbajĝana",
"BAMBARA": "Bambara",
"BASHKIR": "Baŝkira",
"BASQUE": "Eŭska",
"BELARUSIAN": "Belorusa",
"BENGALI": "Bengala",
"BISLAMA": "Bislamo",
"BOKMAL": "Bokmål",
"BOSNIAN": "Bosna",
"BRETON": "Bretona",
"BULGARIAN": "Bulgara",
"BURMESE": "Birma",
"CATALAN": "Catalan",
"CHAMORRO": "Ĉamora",
"CHECHEN": "Ĉeĉena",
"CHICHEWA": "Chichewa",
"CHINESE": "Ĉina",
"CHURCH_SLAVIC": "Church Slavic",
"CHUVASH": "Ĉuvaŝa",
"CORNISH": "Kornvala",
"CORSICAN": "Korsika",
"CREE": "Kria",
"CROATIAN": "Kroata",
"CZECH": "Ĉeĥa",
"DANISH": "Dana",
"DIVEHI": "Divehi",
"DUTCH": "Dutch",
"DZONGKHA": "Dzonka",
"ENGLISH": "Angla",
"ESPERANTO": "Esperanto",
"ESTONIAN": "Estona",
"EWE": "Evea",
"FAROESE": "Feroa",
"FIJIAN": "Fiĝia",
"FILIPINO": "Filipino",
"FINNISH": "Finna",
"FRENCH": "Franca",
"FULAH": "Fula",
"GALICIAN": "Galega",
"GANDA": "Ganda",
"GEORGIAN": "Kartvela",
"GERMAN": "Germana",
"GREEK": "Greek",
"GUARANI": "Gvarania",
"GUJARATI": "Guĝarata",
"HAITIAN": "Haitian",
"HAUSA": "Haŭsa",
"HEBREW": "Hebrea",
"HERERO": "Herera",
"HINDI": "Hindia",
"HIRI_MOTU": "Hirimotua",
"HUNGARIAN": "Hungara",
"ICELANDIC": "Islanda",
"IDO": "Ido",
"IGBO": "Igba",
"INDONESIAN": "Indonezia",
"INTERLINGUA": "Interlingua",
"INTERLINGUE": "Interlingue",
"INUKTITUT": "Inuktituta",
"INUPIAQ": "Inupiaka",
"IRISH": "Irlanda",
"ITALIAN": "Itala",
"JAPANESE": "Japana",
"JAVANESE": "Java",
"KALAALLISUT": "Kalaallisut",
"KANNADA": "Kanara",
"KANURI": "Kanura",
"KASHMIRI": "Kaŝmira",
"KAZAKH": "Kazaĥa",
"KHMER": "Khmer",
"KIKUYU": "Kikuyu",
"KINYARWANDA": "Ruanda",
"KIRGHIZ": "Kirghiz",
"KOMI": "Komia",
"KONGO": "Konga",
"KOREAN": "Korea",
"KUANYAMA": "Kuanyama",
"KURDISH": "Kurda",
"LAO": "Laosa",
"LATIN": "Latina",
"LATVIAN": "Latva",
"LIMBURGAN": "Limburgan",
"LINGALA": "Lingala",
"LITHUANIAN": "Litova",
"LUBA_KATANGA": "Luba-katanga",
"LUXEMBOURGISH": "Luxembourgish",
"MACEDONIAN": "Makedona",
"MALAGASY": "Malagasa",
"MALAY": "Malaja",
"MALAYALAM": "Malajala",
"MALTESE": "Malta",
"MANX": "Manksa",
"MAORI": "Maoria",
"MARATHI": "Marata",
"MARSHALLESE": "Marŝala",
"MONGOLIAN": "Mongola",
"NAURU": "Naura",
"NAVAJO": "Navajo",
"NDONGA": "Ndonga",
"NEPALI": "Nepala",
"NORTHERN_SAMI": "Norda samea",
"NORTH_NDEBELE": "North Ndebele",
"NORWEGIAN": "Norvega",
"NORWEGIAN_NYNORSK": "Nynorsk",
"OCCITAN": "Occitan",
"OJIBWA": "Oĝibva",
"ORIYA": "Orija",
"OROMO": "Oroma",
"OSSETIAN": "Ossetian",
"PALI": "Palia",
"PANJABI": "Panjabi",
"PERSIAN": "Persa",
"POLISH": "Pola",
"PORTUGUESE": "Portugala",
"PUSHTO": "Pushto",
"QUECHUA": "Keĉua",
"ROMANIAN": "Romanian",
"ROMANSH": "Romanĉa",
"RUNDI": "Burunda",
"RUSSIAN": "Rusa",
"SAMOAN": "Samoa",
"SANGO": "Sangoa",
"SANSKRIT": "Sanskrito",
"SARDINIAN": "Sarda",
"SCOTTISH_GAELIC": "Scottish Gaelic",
"SERBIAN": "Serba",
"SHONA": "Ŝona",
"SICHUAN_YI": "Sichuan Yi",
"SINDHI": "Sinda",
"SINHALA": "Sinhala",
"SLOVAK": "Slovaka",
"SLOVENIAN": "Slovena",
"SOMALI": "Somalia",
"SOUTHERN_SOTHO": "Southern Sotho",
"SOUTH_NDEBELE": "South Ndebele",
"SPANISH": "Spanish",
"SUNDANESE": "Sunda",
"SWAHILI": "Svahila",
"SWATI": "Svazia",
"SWEDISH": "Sveda",
"TAGALOG": "Tagaloga",
"TAHITIAN": "Tahitia",
"TAJIK": "Taĝika",
"TAMIL": "Tamila",
"TATAR": "Tatara",
"TELUGU": "Telugua",
"THAI": "Taja",
"TIBETAN": "Tibeta",
"TIGRINYA": "Tigraja",
"TONGA": "Tonga",
"TSONGA": "Conga",
"TSWANA": "Cvana",
"TURKISH": "Turka",
"TURKMEN": "Turkmena",
"TWI": "Tvia",
"UIGHUR": "Uighur",
"UKRAINIAN": "Ukraina",
"UNDEFINED": "undefined",
"URDU": "Urdua",
"UZBEK": "Uzbeka",
"VENDA": "Vendaa",
"VIETNAMESE": "Vjetnama",
"VOLAPUK": "Volapuko",
"WALLOON": "Valona",
"WELSH": "Kimra",
"WESTERN_FRISIAN": "Okcidenta frisa",
"WOLOF": "Volofa",
"XHOSA": "Kosa",
"YIDDISH": "Jida",
"YORUBA": "Joruba",
"ZHUANG": "Zhuang",
"ZULU": "Zulua"
},
"phrases": {
"5.0(side)": "5.0(side)",
"5.1(side)": "5.1(side)",
"6.1": "6.1",
"6ch": "6ch",
"7.1": "7.1",
"<New show>": "<Nova serio>",
"Add": "Aldoni",
"Add Pattern": "Aldoni ŝablonon",
"Apply": "Apliki",
"Apply failed: {error}": "Apliko malsukcesis: {error}",
"Are you sure to delete the following filename pattern?": "Ĉu vi certe volas forigi la jenan dosiernoman ŝablonon?",
"Are you sure to delete the following shifted season?": "Ĉu vi certe volas forigi la jenan ŝovitan sezonon?",
"Are you sure to delete the following show?": "Ĉu vi certe volas forigi la jenan serion?",
"Are you sure to delete the following {track_type} track?": "Ĉu vi certe volas forigi la jenan {track_type}-trakon?",
"Are you sure to delete this tag?": "Ĉu vi certe volas forigi ĉi tiun etikedon?",
"Audio Layout": "Aŭda aranĝo",
"Back": "Reen",
"Cancel": "Nuligi",
"Cannot add another stream with disposition flag 'default' or 'forced' set": "Ne eblas aldoni alian fluon kun la dispozicia flago 'default' aŭ 'forced' aktiva",
"Changes applied and file reloaded.": "Ŝanĝoj aplikitaj kaj dosiero reŝargita.",
"Cleanup": "Purigado",
"Cleanup disabled.": "Purigado malŝaltita.",
"Cleanup enabled.": "Purigado ŝaltita.",
"Codec": "Kodeko",
"Continuing edit session.": "Daŭrigante la redaktan seancon.",
"Default": "Defaŭlta",
"Delete": "Forigi",
"Delete Show": "Forigi serion",
"Deleted media tag {tag!r}.": "Forigis la aŭdvidan etikedon {tag!r}.",
"Differences": "Diferencoj",
"Differences (file->db/output)": "Diferencoj (dosiero->DB/eligo)",
"Discard": "Forĵeti",
"Discard pending metadata changes and quit?": "Ĉu forĵeti atendatajn metadatumajn ŝanĝojn kaj eliri?",
"Discard pending metadata changes and reload the file state?": "Ĉu forĵeti atendatajn metadatumajn ŝanĝojn kaj reŝargi la dosieran staton?",
"Down": "Malsupren",
"Dry-run: would rewrite via temporary file {target_path}": "Seka provo: reskribus per provizora dosiero {target_path}",
"Edit": "Redakti",
"Edit Pattern": "Redakti ŝablonon",
"Edit Show": "Redakti serion",
"Edit filename pattern": "Redakti dosiernoman ŝablonon",
"Edit shifted season": "Redakti ŝovitan sezonon",
"Edit stream": "Redakti fluon",
"Episode Offset": "Epizoda deŝovo",
"Episode offset": "Epizoda deŝovo",
"File": "Dosiero",
"File patterns": "Dosieraj ŝablonoj",
"First Episode": "Unua epizodo",
"First episode": "Unua epizodo",
"Forced": "Devigita",
"Help": "Helpo",
"Help Screen": "Helpa ekrano",
"ID": "ID",
"Identify": "Identigi",
"Index": "Indekso",
"Index / Subindex": "Indekso / Subindekso",
"Index Episode Digits": "Ciferoj de epizoda indekso",
"Index Season Digits": "Ciferoj de sezona indekso",
"Indicator Edisode Digits": "Ciferoj de epizoda indikilo",
"Indicator Season Digits": "Ciferoj de sezona indikilo",
"Keep Editing": "Daŭrigi redaktadon",
"Keeping pending changes.": "Konservas atendatajn ŝanĝojn.",
"Key": "Ŝlosilo",
"Language": "Lingvo",
"Last Episode": "Lasta epizodo",
"Last episode": "Lasta epizodo",
"Layout": "Aranĝo",
"Media Tags": "Aŭdvidaj etikedoj",
"More than one default audio stream detected and no prompt set": "Pli ol unu defaŭlta sonfluo detektita kaj neniu instigo agordita",
"More than one default audio stream detected! Please select stream": "Pli ol unu defaŭlta sonfluo detektita! Bonvolu elekti fluon",
"More than one default subtitle stream detected and no prompt set": "Pli ol unu defaŭlta subtitola fluo detektita kaj neniu instigo agordita",
"More than one default subtitle stream detected! Please select stream": "Pli ol unu defaŭlta subtitola fluo detektita! Bonvolu elekti fluon",
"More than one default video stream detected and no prompt set": "Pli ol unu defaŭlta videofluo detektita kaj neniu instigo agordita",
"More than one default video stream detected! Please select stream": "Pli ol unu defaŭlta videofluo detektita! Bonvolu elekti fluon",
"More than one forced audio stream detected and no prompt set": "Pli ol unu devigita sonfluo detektita kaj neniu instigo agordita",
"More than one forced audio stream detected! Please select stream": "Pli ol unu devigita sonfluo detektita! Bonvolu elekti fluon",
"More than one forced subtitle stream detected and no prompt set": "Pli ol unu devigita subtitola fluo detektita kaj neniu instigo agordita",
"More than one forced subtitle stream detected! Please select stream": "Pli ol unu devigita subtitola fluo detektita! Bonvolu elekti fluon",
"More than one forced video stream detected and no prompt set": "Pli ol unu devigita videofluo detektita kaj neniu instigo agordita",
"More than one forced video stream detected! Please select stream": "Pli ol unu devigita videofluo detektita! Bonvolu elekti fluon",
"Name": "Nomo",
"New Pattern": "Nova ŝablono",
"New Show": "Nova serio",
"New filename pattern": "Nova dosiernoma ŝablono",
"New shifted season": "Nova ŝovita sezono",
"New stream": "Nova fluo",
"No": "Ne",
"No changes to apply.": "Neniuj ŝanĝoj por apliki.",
"No changes to revert.": "Neniuj ŝanĝoj por malfari.",
"Normalization disabled.": "Normaligo malŝaltita.",
"Normalization enabled.": "Normaligo ŝaltita.",
"Normalize": "Normaligi",
"Notes": "Notoj",
"Pattern": "Ŝablono",
"Planned Changes (file->edited output)": "Planitaj ŝanĝoj (dosiero->redaktita eligo)",
"Quality": "Kvalito",
"Quit": "Eliri",
"Remove Pattern": "Forigi ŝablonon",
"Revert": "Malfari",
"Reverted pending changes.": "Malfaris atendatajn ŝanĝojn.",
"Save": "Konservi",
"Season Offset": "Sezona deŝovo",
"Select a stream first.": "Bonvolu unue elekti fluon.",
"Set Default": "Agordi kiel defaŭltan",
"Set Forced": "Agordi kiel devigitan",
"Settings Screen": "Agorda ekrano",
"Numbering Mapping": "Ŝovitaj sezonoj",
"Show": "Serio",
"Shows": "Serioj",
"Source Season": "Fonta sezono",
"SrcIndex": "Fontindekso",
"Status": "Stato",
"Stay": "Resti",
"Stream dispositions": "Fluaj dispozicioj",
"Stream tags": "Fluaj etikedoj",
"Streams": "Fluoj",
"SubIndex": "Subindekso",
"Substitute": "Anstataŭigi",
"Substitute pattern": "Anstataŭigi ŝablonon",
"Title": "Titolo",
"Type": "Tipo",
"Unable to update selected stream.": "Ne eblis ĝisdatigi la elektitan fluon.",
"Up": "Supren",
"Update Pattern": "Ĝisdatigi ŝablonon",
"Updated media tag {tag!r}.": "Ĝisdatigis la aŭdvidan etikedon {tag!r}.",
"Updated stream #{index} ({track_type}).": "Ĝisdatigis fluon #{index} ({track_type}).",
"Value": "Valoro",
"Year": "Jaro",
"Yes": "Jes",
"add media tag: key='{key}' value='{value}'": "aldoni aŭdvidan etikedon: ŝlosilo='{key}' valoro='{value}'",
"add {track_type} track: index={index} lang={language}": "aldoni {track_type}-trakon: indekso={index} lingvo={language}",
"attached_pic": "attached_pic",
"attachment": "aldonaĵo",
"audio": "sono",
"captions": "subtekstoj",
"change media tag: key='{key}' value='{value}'": "ŝanĝi aŭdvidan etikedon: ŝlosilo='{key}' valoro='{value}'",
"change stream #{index} ({track_type}:{sub_index}) add disposition={disposition}": "ŝanĝi fluon #{index} ({track_type}:{sub_index}) aldoni dispozicion={disposition}",
"change stream #{index} ({track_type}:{sub_index}) add key={key} value={value}": "ŝanĝi fluon #{index} ({track_type}:{sub_index}) aldoni ŝlosilon={key} valoron={value}",
"change stream #{index} ({track_type}:{sub_index}) change key={key} value={value}": "ŝanĝi fluon #{index} ({track_type}:{sub_index}) ŝanĝi ŝlosilon={key} valoron={value}",
"change stream #{index} ({track_type}:{sub_index}) remove disposition={disposition}": "ŝanĝi fluon #{index} ({track_type}:{sub_index}) forigi dispozicion={disposition}",
"change stream #{index} ({track_type}:{sub_index}) remove key={key} value={value}": "ŝanĝi fluon #{index} ({track_type}:{sub_index}) forigi ŝlosilon={key} valoron={value}",
"clean_effects": "nur efektoj",
"comment": "komento",
"default": "defaŭlta",
"dependent": "dependa",
"descriptions": "priskriboj",
"dub": "dublado",
"for pattern": "por ŝablono",
"forced": "devigita",
"from": "de",
"from pattern": "de ŝablono",
"from show": "el serio",
"hearing_impaired": "aŭdmalhelpita",
"karaoke": "karaokeo",
"lyrics": "kantoteksto",
"metadata": "metadatenoj",
"non_diegetic": "nediĝeta",
"original": "originala",
"pattern #{id}": "ŝablono #{id}",
"remove media tag: key='{key}' value='{value}'": "forigi aŭdvidan etikedon: ŝlosilo='{key}' valoro='{value}'",
"remove stream #{index}": "forigi fluon #{index}",
"show #{id}": "serio #{id}",
"stereo": "stereo",
"still_image": "senmova bildo",
"sub index": "subindekso",
"subtitle": "subtitolo",
"timed_thumbnails": "tempigitaj bildetoj",
"undefined": "nedifinita",
"unknown": "nekonata",
"video": "video",
"visual_impaired": "vidmalhelpita"
}
}

361
assets/i18n/es.json Normal file
View File

@@ -0,0 +1,361 @@
{
"iso_languages": {
"ABKHAZIAN": "Abjaziano",
"AFAR": "Afar",
"AFRIKAANS": "Afrikaans",
"AKAN": "Akan",
"ALBANIAN": "Albanés",
"AMHARIC": "Ámárico",
"ARABIC": "Árábe",
"ARAGONESE": "Aragonés",
"ARMENIAN": "Armenio",
"ASSAMESE": "Assamais",
"AVARIC": "Avaric",
"AVESTAN": "Avestan",
"AYMARA": "Aymará",
"AZERBAIJANI": "Azerbayano",
"BAMBARA": "Bambara",
"BASHKIR": "Bashkir",
"BASQUE": "Vasco",
"BELARUSIAN": "Bieloruso",
"BENGALI": "Bengalí",
"BISLAMA": "Bislama",
"BOKMAL": "Bokmål",
"BOSNIAN": "Bosnio",
"BRETON": "Bretón",
"BULGARIAN": "Búlgaro",
"BURMESE": "Birmano",
"CATALAN": "Catalan",
"CHAMORRO": "Chamorro",
"CHECHEN": "Checheno",
"CHICHEWA": "Chichewa",
"CHINESE": "Chino",
"CHURCH_SLAVIC": "Church Slavic",
"CHUVASH": "Chuvash",
"CORNISH": "Córnico",
"CORSICAN": "Corso",
"CREE": "Cree",
"CROATIAN": "Croata",
"CZECH": "Checo",
"DANISH": "Danés",
"DIVEHI": "Divehi",
"DUTCH": "Dutch",
"DZONGKHA": "Butaní",
"ENGLISH": "Inglés",
"ESPERANTO": "Esperanto",
"ESTONIAN": "Estonio",
"EWE": "Ewe",
"FAROESE": "Feroés",
"FIJIAN": "Fidji",
"FILIPINO": "Filipino",
"FINNISH": "Finés",
"FRENCH": "Francés",
"FULAH": "Fulah",
"GALICIAN": "Gallego",
"GANDA": "Ganda",
"GEORGIAN": "Georgiano",
"GERMAN": "Alemán",
"GREEK": "Greek",
"GUARANI": "Guaraní",
"GUJARATI": "guyaratí",
"HAITIAN": "Haitian",
"HAUSA": "Haussa",
"HEBREW": "Hebreo",
"HERERO": "Herero",
"HINDI": "Hindi",
"HIRI_MOTU": "Hiri Motu",
"HUNGARIAN": "Húngaro",
"ICELANDIC": "Islandés",
"IDO": "Ido",
"IGBO": "Igbo",
"INDONESIAN": "Indonesio",
"INTERLINGUA": "Interlingua",
"INTERLINGUE": "Interlingue",
"INUKTITUT": "Inuktitut",
"INUPIAQ": "Inupiak",
"IRISH": "Irlandés",
"ITALIAN": "Italiano",
"JAPANESE": "Japonés",
"JAVANESE": "Javanés",
"KALAALLISUT": "Kalaallisut",
"KANNADA": "Canarés",
"KANURI": "Kanuri",
"KASHMIRI": "Kashmir",
"KAZAKH": "Kazako",
"KHMER": "Khmer",
"KIKUYU": "Kikuyu",
"KINYARWANDA": "Kinyarwanda",
"KIRGHIZ": "Kirghiz",
"KOMI": "Komi",
"KONGO": "Kongo",
"KOREAN": "Coreano",
"KUANYAMA": "Kuanyama",
"KURDISH": "Kurdo",
"LAO": "laosiano",
"LATIN": "Latín",
"LATVIAN": "Letón",
"LIMBURGAN": "Limburgan",
"LINGALA": "Lingala",
"LITHUANIAN": "Lituano",
"LUBA_KATANGA": "Luba-Katanga",
"LUXEMBOURGISH": "Luxembourgish",
"MACEDONIAN": "Macedonio",
"MALAGASY": "Malgache",
"MALAY": "Malayo",
"MALAYALAM": "malabar",
"MALTESE": "Maltés",
"MANX": "Manx [Gaélico de Manx]",
"MAORI": "Maorí",
"MARATHI": "Marath",
"MARSHALLESE": "Marshall",
"MONGOLIAN": "Mongol",
"NAURU": "Nauru",
"NAVAJO": "Navajo",
"NDONGA": "Ndonga",
"NEPALI": "Nepalés",
"NORTHERN_SAMI": "Sami del Norte",
"NORTH_NDEBELE": "North Ndebele",
"NORWEGIAN": "Noruego",
"NORWEGIAN_NYNORSK": "Nynorsk",
"OCCITAN": "Occitan",
"OJIBWA": "Ojibwa",
"ORIYA": "Oriya",
"OROMO": "Oromo (Afan)",
"OSSETIAN": "Ossetian",
"PALI": "Pali",
"PANJABI": "Panjabi",
"PERSIAN": "Persa",
"POLISH": "Polaco",
"PORTUGUESE": "Portugués",
"PUSHTO": "Pushto",
"QUECHUA": "Quechua",
"ROMANIAN": "Romanian",
"ROMANSH": "Romaní",
"RUNDI": "Kiroundi",
"RUSSIAN": "Ruso",
"SAMOAN": "Samoano",
"SANGO": "Sango",
"SANSKRIT": "Sánscrito",
"SARDINIAN": "Sardo",
"SCOTTISH_GAELIC": "Scottish Gaelic",
"SERBIAN": "Serbio",
"SHONA": "Shona",
"SICHUAN_YI": "Sichuan Yi",
"SINDHI": "Sindhi",
"SINHALA": "Sinhala",
"SLOVAK": "Eslovaco",
"SLOVENIAN": "Esloveno",
"SOMALI": "Somalí",
"SOUTHERN_SOTHO": "Southern Sotho",
"SOUTH_NDEBELE": "South Ndebele",
"SPANISH": "Spanish",
"SUNDANESE": "Sondanés",
"SWAHILI": "Swahili",
"SWATI": "Siswati",
"SWEDISH": "Sueco",
"TAGALOG": "Tagalo",
"TAHITIAN": "Tahitiano",
"TAJIK": "Tajiko",
"TAMIL": "Tamil",
"TATAR": "Tataro",
"TELUGU": "Telugu",
"THAI": "Tailandés",
"TIBETAN": "Tibetano",
"TIGRINYA": "Tigrinya",
"TONGA": "Tonga",
"TSONGA": "Tsonga",
"TSWANA": "Setchwana",
"TURKISH": "Turco",
"TURKMEN": "Turkmeno",
"TWI": "Tchi",
"UIGHUR": "Uighur",
"UKRAINIAN": "Ukranio",
"UNDEFINED": "undefined",
"URDU": "Urdu",
"UZBEK": "Uzbeko",
"VENDA": "Venda",
"VIETNAMESE": "Vietnamita",
"VOLAPUK": "Volapük",
"WALLOON": "valón",
"WELSH": "Galés",
"WESTERN_FRISIAN": "Frisón occidental",
"WOLOF": "Wolof",
"XHOSA": "Xhosa",
"YIDDISH": "Yidish",
"YORUBA": "Yoruba",
"ZHUANG": "Zhuang",
"ZULU": "Zulu"
},
"phrases": {
"5.0(side)": "5.0(side)",
"5.1(side)": "5.1(side)",
"6.1": "6.1",
"6ch": "6ch",
"7.1": "7.1",
"<New show>": "<Nueva serie>",
"Add": "Añadir",
"Add Pattern": "Añadir patrón",
"Apply": "Aplicar",
"Apply failed: {error}": "Error al aplicar: {error}",
"Are you sure to delete the following filename pattern?": "¿Seguro que quieres eliminar el siguiente patrón de nombre de archivo?",
"Are you sure to delete the following shifted season?": "¿Seguro que quieres eliminar la siguiente temporada desplazada?",
"Are you sure to delete the following show?": "¿Seguro que quieres eliminar la siguiente serie?",
"Are you sure to delete the following {track_type} track?": "¿Seguro que quieres eliminar la pista {track_type} siguiente?",
"Are you sure to delete this tag?": "¿Seguro que quieres eliminar esta etiqueta?",
"Audio Layout": "Disposición de audio",
"Back": "Volver",
"Cancel": "Cancelar",
"Cannot add another stream with disposition flag 'default' or 'forced' set": "No se puede añadir otro flujo con la marca de disposición 'default' o 'forced' activada",
"Changes applied and file reloaded.": "Cambios aplicados y archivo recargado.",
"Cleanup": "Limpieza",
"Cleanup disabled.": "Limpieza desactivada.",
"Cleanup enabled.": "Limpieza activada.",
"Codec": "Códec",
"Continuing edit session.": "Continuando la sesión de edición.",
"Default": "Predeterminado",
"Delete": "Eliminar",
"Delete Show": "Eliminar serie",
"Deleted media tag {tag!r}.": "Etiqueta de medios {tag!r} eliminada.",
"Differences": "Diferencias",
"Differences (file->db/output)": "Diferencias (archivo->BD/salida)",
"Discard": "Descartar",
"Discard pending metadata changes and quit?": "¿Descartar los cambios pendientes de metadatos y salir?",
"Discard pending metadata changes and reload the file state?": "¿Descartar los cambios pendientes de metadatos y recargar el estado del archivo?",
"Down": "Abajo",
"Dry-run: would rewrite via temporary file {target_path}": "Simulación: reescribiría mediante el archivo temporal {target_path}",
"Edit": "Editar",
"Edit Pattern": "Editar patrón",
"Edit Show": "Editar serie",
"Edit filename pattern": "Editar patrón de nombre de archivo",
"Edit shifted season": "Editar temporada desplazada",
"Edit stream": "Editar flujo",
"Episode Offset": "Desplazamiento de episodio",
"Episode offset": "Desplazamiento de episodio",
"File": "Archivo",
"File patterns": "Patrones de archivo",
"First Episode": "Primer episodio",
"First episode": "Primer episodio",
"Forced": "Forzado",
"Help": "Ayuda",
"Help Screen": "Pantalla de ayuda",
"ID": "ID",
"Identify": "Identificar",
"Index": "Índice",
"Index / Subindex": "Índice / Subíndice",
"Index Episode Digits": "Dígitos del índice de episodio",
"Index Season Digits": "Dígitos del índice de temporada",
"Indicator Edisode Digits": "Dígitos del indicador de episodio",
"Indicator Season Digits": "Dígitos del indicador de temporada",
"Keep Editing": "Seguir editando",
"Keeping pending changes.": "Se conservan los cambios pendientes.",
"Key": "Clave",
"Language": "Idioma",
"Last Episode": "Último episodio",
"Last episode": "Último episodio",
"Layout": "Diseño",
"Media Tags": "Etiquetas de medios",
"More than one default audio stream detected and no prompt set": "Se detectó más de un flujo de audio predeterminado y no hay aviso configurado",
"More than one default audio stream detected! Please select stream": "Se detectó más de un flujo de audio predeterminado. Selecciona el flujo",
"More than one default subtitle stream detected and no prompt set": "Se detectó más de un flujo de subtítulos predeterminado y no hay aviso configurado",
"More than one default subtitle stream detected! Please select stream": "Se detectó más de un flujo de subtítulos predeterminado. Selecciona el flujo",
"More than one default video stream detected and no prompt set": "Se detectó más de un flujo de vídeo predeterminado y no hay aviso configurado",
"More than one default video stream detected! Please select stream": "Se detectó más de un flujo de vídeo predeterminado. Selecciona el flujo",
"More than one forced audio stream detected and no prompt set": "Se detectó más de un flujo de audio forzado y no hay aviso configurado",
"More than one forced audio stream detected! Please select stream": "Se detectó más de un flujo de audio forzado. Selecciona el flujo",
"More than one forced subtitle stream detected and no prompt set": "Se detectó más de un flujo de subtítulos forzados y no hay aviso configurado",
"More than one forced subtitle stream detected! Please select stream": "Se detectó más de un flujo de subtítulos forzados. Selecciona el flujo",
"More than one forced video stream detected and no prompt set": "Se detectó más de un flujo de vídeo forzado y no hay aviso configurado",
"More than one forced video stream detected! Please select stream": "Se detectó más de un flujo de vídeo forzado. Selecciona el flujo",
"Name": "Nombre",
"New Pattern": "Nuevo patrón",
"New Show": "Nueva serie",
"New filename pattern": "Nuevo patrón de nombre de archivo",
"New shifted season": "Nueva temporada desplazada",
"New stream": "Nuevo flujo",
"No": "No",
"No changes to apply.": "No hay cambios para aplicar.",
"No changes to revert.": "No hay cambios para revertir.",
"Normalization disabled.": "Normalización desactivada.",
"Normalization enabled.": "Normalización activada.",
"Normalize": "Normalizar",
"Notes": "Notas",
"Pattern": "Patrón",
"Planned Changes (file->edited output)": "Cambios planificados (archivo->salida editada)",
"Quality": "Calidad",
"Quit": "Salir",
"Remove Pattern": "Eliminar patrón",
"Revert": "Revertir",
"Reverted pending changes.": "Se revirtieron los cambios pendientes.",
"Save": "Guardar",
"Season Offset": "Desplazamiento de temporada",
"Select a stream first.": "Selecciona primero un flujo.",
"Set Default": "Establecer como predeterminado",
"Set Forced": "Establecer como forzado",
"Settings Screen": "Pantalla de ajustes",
"Numbering Mapping": "Temporadas desplazadas",
"Show": "Serie",
"Shows": "Series",
"Source Season": "Temporada de origen",
"SrcIndex": "Índice origen",
"Status": "Estado",
"Stay": "Permanecer",
"Stream dispositions": "Disposiciones del flujo",
"Stream tags": "Etiquetas del flujo",
"Streams": "Flujos",
"SubIndex": "Subíndice",
"Substitute": "Sustituir",
"Substitute pattern": "Sustituir patrón",
"Title": "Título",
"Type": "Tipo",
"Unable to update selected stream.": "No se pudo actualizar el flujo seleccionado.",
"Up": "Arriba",
"Update Pattern": "Actualizar patrón",
"Updated media tag {tag!r}.": "Etiqueta de medios {tag!r} actualizada.",
"Updated stream #{index} ({track_type}).": "Flujo #{index} ({track_type}) actualizado.",
"Value": "Valor",
"Year": "Año",
"Yes": "Sí",
"add media tag: key='{key}' value='{value}'": "añadir etiqueta de medios: clave='{key}' valor='{value}'",
"add {track_type} track: index={index} lang={language}": "añadir pista {track_type}: índice={index} idioma={language}",
"attached_pic": "attached_pic",
"attachment": "adjunto",
"audio": "audio",
"captions": "subtítulos",
"change media tag: key='{key}' value='{value}'": "cambiar etiqueta de medios: clave='{key}' valor='{value}'",
"change stream #{index} ({track_type}:{sub_index}) add disposition={disposition}": "cambiar flujo #{index} ({track_type}:{sub_index}) añadir disposición={disposition}",
"change stream #{index} ({track_type}:{sub_index}) add key={key} value={value}": "cambiar flujo #{index} ({track_type}:{sub_index}) añadir clave={key} valor={value}",
"change stream #{index} ({track_type}:{sub_index}) change key={key} value={value}": "cambiar flujo #{index} ({track_type}:{sub_index}) cambiar clave={key} valor={value}",
"change stream #{index} ({track_type}:{sub_index}) remove disposition={disposition}": "cambiar flujo #{index} ({track_type}:{sub_index}) quitar disposición={disposition}",
"change stream #{index} ({track_type}:{sub_index}) remove key={key} value={value}": "cambiar flujo #{index} ({track_type}:{sub_index}) quitar clave={key} valor={value}",
"clean_effects": "solo efectos",
"comment": "comentario",
"default": "predeterminado",
"dependent": "dependiente",
"descriptions": "descripciones",
"dub": "doblaje",
"for pattern": "para el patrón",
"forced": "forzado",
"from": "de",
"from pattern": "del patrón",
"from show": "de la serie",
"hearing_impaired": "personas con discapacidad auditiva",
"karaoke": "karaoke",
"lyrics": "letra",
"metadata": "metadatos",
"non_diegetic": "no diegético",
"original": "original",
"pattern #{id}": "patrón #{id}",
"remove media tag: key='{key}' value='{value}'": "eliminar etiqueta de medios: clave='{key}' valor='{value}'",
"remove stream #{index}": "eliminar flujo #{index}",
"show #{id}": "serie #{id}",
"stereo": "estéreo",
"still_image": "imagen fija",
"sub index": "subíndice",
"subtitle": "subtítulo",
"timed_thumbnails": "miniaturas temporizadas",
"undefined": "indefinido",
"unknown": "desconocido",
"video": "vídeo",
"visual_impaired": "personas con discapacidad visual"
}
}

361
assets/i18n/fr.json Normal file
View File

@@ -0,0 +1,361 @@
{
"iso_languages": {
"ABKHAZIAN": "Abkhaze",
"AFAR": "Afar",
"AFRIKAANS": "Afrikaans",
"AKAN": "Akan",
"ALBANIAN": "Albanais",
"AMHARIC": "Amharique",
"ARABIC": "Arabe",
"ARAGONESE": "Aragonais",
"ARMENIAN": "Arménien",
"ASSAMESE": "Assamais",
"AVARIC": "Avar",
"AVESTAN": "Avestique",
"AYMARA": "Aymara",
"AZERBAIJANI": "Azéri",
"BAMBARA": "Bambara",
"BASHKIR": "Bachkir",
"BASQUE": "Basque",
"BELARUSIAN": "Biélorusse",
"BENGALI": "Bengali",
"BISLAMA": "Bichelamar",
"BOKMAL": "Bokmål",
"BOSNIAN": "Bosniaque",
"BRETON": "Breton",
"BULGARIAN": "Bulgare",
"BURMESE": "Birman",
"CATALAN": "Catalan",
"CHAMORRO": "Chamorro",
"CHECHEN": "Tchétchène",
"CHICHEWA": "Chichewa",
"CHINESE": "Chinois",
"CHURCH_SLAVIC": "Church Slavic",
"CHUVASH": "Tchouvache",
"CORNISH": "Cornique",
"CORSICAN": "Corse",
"CREE": "Cri",
"CROATIAN": "Croate",
"CZECH": "Tchèque",
"DANISH": "Danois",
"DIVEHI": "Divehi",
"DUTCH": "Dutch",
"DZONGKHA": "Dzongkha",
"ENGLISH": "Anglais",
"ESPERANTO": "Espéranto",
"ESTONIAN": "Estonien",
"EWE": "Éwé",
"FAROESE": "Féroïen",
"FIJIAN": "Fidjien",
"FILIPINO": "Filipino",
"FINNISH": "Finnois",
"FRENCH": "Français",
"FULAH": "Peul",
"GALICIAN": "Galicien",
"GANDA": "Ganda",
"GEORGIAN": "Géorgien",
"GERMAN": "Allemand",
"GREEK": "Greek",
"GUARANI": "Guarani",
"GUJARATI": "Goudjarâtî (Gujrâtî)",
"HAITIAN": "Haitian",
"HAUSA": "Haoussa",
"HEBREW": "Hébreu",
"HERERO": "Herero",
"HINDI": "Hindi",
"HIRI_MOTU": "Hiri Motu",
"HUNGARIAN": "Hongrois",
"ICELANDIC": "Islandais",
"IDO": "Ido",
"IGBO": "Igbo",
"INDONESIAN": "Indonésien",
"INTERLINGUA": "Interlingua",
"INTERLINGUE": "Interlingue",
"INUKTITUT": "Inuktitut",
"INUPIAQ": "Inupiaq",
"IRISH": "Irlandais",
"ITALIAN": "Italien",
"JAPANESE": "Japonais",
"JAVANESE": "Javanais",
"KALAALLISUT": "Kalaallisut",
"KANNADA": "Kannara (Canara)",
"KANURI": "Kanouri",
"KASHMIRI": "Kashmiri",
"KAZAKH": "Kazakh",
"KHMER": "Khmer",
"KIKUYU": "Kikuyu",
"KINYARWANDA": "Kinyarwanda",
"KIRGHIZ": "Kirghiz",
"KOMI": "Komi",
"KONGO": "Kongo",
"KOREAN": "Coréen",
"KUANYAMA": "Kuanyama",
"KURDISH": "Kurde",
"LAO": "Laotien",
"LATIN": "Latin",
"LATVIAN": "Letton",
"LIMBURGAN": "Limburgan",
"LINGALA": "Lingala",
"LITHUANIAN": "Lituanien",
"LUBA_KATANGA": "Luba-katanga",
"LUXEMBOURGISH": "Luxembourgish",
"MACEDONIAN": "Macédonien",
"MALAGASY": "Malgache",
"MALAY": "Malais",
"MALAYALAM": "Malayalam",
"MALTESE": "Maltais",
"MANX": "Mannois",
"MAORI": "Maori",
"MARATHI": "Marathe",
"MARSHALLESE": "Marshallais",
"MONGOLIAN": "Mongol",
"NAURU": "Nauru",
"NAVAJO": "Navajo",
"NDONGA": "Ndonga",
"NEPALI": "Népalais",
"NORTHERN_SAMI": "Same du Nord",
"NORTH_NDEBELE": "North Ndebele",
"NORWEGIAN": "Norvégien",
"NORWEGIAN_NYNORSK": "Nynorsk",
"OCCITAN": "Occitan",
"OJIBWA": "Ojibwa",
"ORIYA": "Oriya",
"OROMO": "Oromo",
"OSSETIAN": "Ossetian",
"PALI": "Pali",
"PANJABI": "Panjabi",
"PERSIAN": "Persan",
"POLISH": "Polonais",
"PORTUGUESE": "Portugais",
"PUSHTO": "Pushto",
"QUECHUA": "Quechua",
"ROMANIAN": "Romanian",
"ROMANSH": "Romanche",
"RUNDI": "Rundi",
"RUSSIAN": "Russe",
"SAMOAN": "Samoan",
"SANGO": "Sango",
"SANSKRIT": "Sanskrit",
"SARDINIAN": "Sarde",
"SCOTTISH_GAELIC": "Scottish Gaelic",
"SERBIAN": "Serbe",
"SHONA": "Shona",
"SICHUAN_YI": "Sichuan Yi",
"SINDHI": "Sindhi",
"SINHALA": "Sinhala",
"SLOVAK": "Slovaque",
"SLOVENIAN": "Slovène",
"SOMALI": "Somali",
"SOUTHERN_SOTHO": "Southern Sotho",
"SOUTH_NDEBELE": "South Ndebele",
"SPANISH": "Spanish",
"SUNDANESE": "Sundanais",
"SWAHILI": "Swahili",
"SWATI": "Swati",
"SWEDISH": "Suédois",
"TAGALOG": "Tagalog",
"TAHITIAN": "Tahitien",
"TAJIK": "Tadjik",
"TAMIL": "Tamoul",
"TATAR": "Tatar",
"TELUGU": "Télougou",
"THAI": "Thaï",
"TIBETAN": "Tibétain",
"TIGRINYA": "Tigrigna",
"TONGA": "Tonga",
"TSONGA": "Tsonga",
"TSWANA": "Tswana",
"TURKISH": "Turc",
"TURKMEN": "Turkmène",
"TWI": "Twi",
"UIGHUR": "Uighur",
"UKRAINIAN": "Ukrainien",
"UNDEFINED": "undefined",
"URDU": "Ourdou",
"UZBEK": "Ouszbek",
"VENDA": "Venda",
"VIETNAMESE": "Vietnamien",
"VOLAPUK": "Volapük",
"WALLOON": "Wallon",
"WELSH": "Gallois",
"WESTERN_FRISIAN": "Frison occidental",
"WOLOF": "Wolof",
"XHOSA": "Xhosa",
"YIDDISH": "Yiddish",
"YORUBA": "Yoruba",
"ZHUANG": "Zhuang",
"ZULU": "Zoulou"
},
"phrases": {
"5.0(side)": "5.0(side)",
"5.1(side)": "5.1(side)",
"6.1": "6.1",
"6ch": "6ch",
"7.1": "7.1",
"<New show>": "<Nouvelle série>",
"Add": "Ajouter",
"Add Pattern": "Ajouter un modèle",
"Apply": "Appliquer",
"Apply failed: {error}": "Échec de l'application : {error}",
"Are you sure to delete the following filename pattern?": "Voulez-vous vraiment supprimer le modèle de nom de fichier suivant ?",
"Are you sure to delete the following shifted season?": "Voulez-vous vraiment supprimer la saison décalée suivante ?",
"Are you sure to delete the following show?": "Voulez-vous vraiment supprimer la série suivante ?",
"Are you sure to delete the following {track_type} track?": "Voulez-vous vraiment supprimer la piste {track_type} suivante ?",
"Are you sure to delete this tag?": "Voulez-vous vraiment supprimer cette balise ?",
"Audio Layout": "Disposition audio",
"Back": "Retour",
"Cancel": "Annuler",
"Cannot add another stream with disposition flag 'default' or 'forced' set": "Impossible d'ajouter un autre flux avec l'indicateur de disposition 'default' ou 'forced'",
"Changes applied and file reloaded.": "Modifications appliquées et fichier rechargé.",
"Cleanup": "Nettoyage",
"Cleanup disabled.": "Nettoyage désactivé.",
"Cleanup enabled.": "Nettoyage activé.",
"Codec": "Codec",
"Continuing edit session.": "Poursuite de la session d'édition.",
"Default": "Par défaut",
"Delete": "Supprimer",
"Delete Show": "Supprimer la série",
"Deleted media tag {tag!r}.": "Balise média {tag!r} supprimée.",
"Differences": "Différences",
"Differences (file->db/output)": "Différences (fichier->BD/sortie)",
"Discard": "Ignorer",
"Discard pending metadata changes and quit?": "Ignorer les modifications de métadonnées en attente et quitter ?",
"Discard pending metadata changes and reload the file state?": "Ignorer les modifications de métadonnées en attente et recharger l'état du fichier ?",
"Down": "Descendre",
"Dry-run: would rewrite via temporary file {target_path}": "Simulation : réécrirait via le fichier temporaire {target_path}",
"Edit": "Modifier",
"Edit Pattern": "Modifier le modèle",
"Edit Show": "Modifier la série",
"Edit filename pattern": "Modifier le modèle de nom de fichier",
"Edit shifted season": "Modifier la saison décalée",
"Edit stream": "Modifier le flux",
"Episode Offset": "Décalage d'épisode",
"Episode offset": "Décalage d'épisode",
"File": "Fichier",
"File patterns": "Modèles de fichiers",
"First Episode": "Premier épisode",
"First episode": "Premier épisode",
"Forced": "Forcé",
"Help": "Aide",
"Help Screen": "Écran d'aide",
"ID": "ID",
"Identify": "Identifier",
"Index": "Index",
"Index / Subindex": "Index / Sous-index",
"Index Episode Digits": "Chiffres d'épisode d'index",
"Index Season Digits": "Chiffres de saison d'index",
"Indicator Edisode Digits": "Chiffres d'épisode de l'indicateur",
"Indicator Season Digits": "Chiffres de saison de l'indicateur",
"Keep Editing": "Continuer l'édition",
"Keeping pending changes.": "Les modifications en attente sont conservées.",
"Key": "Clé",
"Language": "Langue",
"Last Episode": "Dernier épisode",
"Last episode": "Dernier épisode",
"Layout": "Disposition",
"Media Tags": "Balises média",
"More than one default audio stream detected and no prompt set": "Plus d'un flux audio par défaut détecté et aucune invite définie",
"More than one default audio stream detected! Please select stream": "Plus d'un flux audio par défaut détecté ! Veuillez sélectionner un flux",
"More than one default subtitle stream detected and no prompt set": "Plus d'un flux de sous-titres par défaut détecté et aucune invite définie",
"More than one default subtitle stream detected! Please select stream": "Plus d'un flux de sous-titres par défaut détecté ! Veuillez sélectionner un flux",
"More than one default video stream detected and no prompt set": "Plus d'un flux vidéo par défaut détecté et aucune invite définie",
"More than one default video stream detected! Please select stream": "Plus d'un flux vidéo par défaut détecté ! Veuillez sélectionner un flux",
"More than one forced audio stream detected and no prompt set": "Plus d'un flux audio forcé détecté et aucune invite définie",
"More than one forced audio stream detected! Please select stream": "Plus d'un flux audio forcé détecté ! Veuillez sélectionner un flux",
"More than one forced subtitle stream detected and no prompt set": "Plus d'un flux de sous-titres forcé détecté et aucune invite définie",
"More than one forced subtitle stream detected! Please select stream": "Plus d'un flux de sous-titres forcé détecté ! Veuillez sélectionner un flux",
"More than one forced video stream detected and no prompt set": "Plus d'un flux vidéo forcé détecté et aucune invite définie",
"More than one forced video stream detected! Please select stream": "Plus d'un flux vidéo forcé détecté ! Veuillez sélectionner un flux",
"Name": "Nom",
"New Pattern": "Nouveau modèle",
"New Show": "Nouvelle série",
"New filename pattern": "Nouveau modèle de nom de fichier",
"New shifted season": "Nouvelle saison décalée",
"New stream": "Nouveau flux",
"No": "Non",
"No changes to apply.": "Aucune modification à appliquer.",
"No changes to revert.": "Aucune modification à annuler.",
"Normalization disabled.": "Normalisation désactivée.",
"Normalization enabled.": "Normalisation activée.",
"Normalize": "Normaliser",
"Notes": "Notes",
"Pattern": "Modèle",
"Planned Changes (file->edited output)": "Modifications prévues (fichier->sortie modifiée)",
"Quality": "Qualité",
"Quit": "Quitter",
"Remove Pattern": "Supprimer le modèle",
"Revert": "Annuler les modifications",
"Reverted pending changes.": "Modifications en attente annulées.",
"Save": "Enregistrer",
"Season Offset": "Décalage de saison",
"Select a stream first.": "Veuillez d'abord sélectionner un flux.",
"Set Default": "Définir par défaut",
"Set Forced": "Définir comme forcé",
"Settings Screen": "Écran des paramètres",
"Numbering Mapping": "Saisons décalées",
"Show": "Série",
"Shows": "Séries",
"Source Season": "Saison source",
"SrcIndex": "Index source",
"Status": "Statut",
"Stay": "Rester",
"Stream dispositions": "Dispositions des flux",
"Stream tags": "Balises du flux",
"Streams": "Flux",
"SubIndex": "Sous-index",
"Substitute": "Remplacer",
"Substitute pattern": "Remplacer le modèle",
"Title": "Titre",
"Type": "Type",
"Unable to update selected stream.": "Impossible de mettre à jour le flux sélectionné.",
"Up": "Monter",
"Update Pattern": "Mettre à jour le modèle",
"Updated media tag {tag!r}.": "Balise média {tag!r} mise à jour.",
"Updated stream #{index} ({track_type}).": "Flux #{index} ({track_type}) mis à jour.",
"Value": "Valeur",
"Year": "Année",
"Yes": "Oui",
"add media tag: key='{key}' value='{value}'": "ajouter une balise média : clé='{key}' valeur='{value}'",
"add {track_type} track: index={index} lang={language}": "ajouter une piste {track_type} : index={index} langue={language}",
"attached_pic": "attached_pic",
"attachment": "pièce jointe",
"audio": "audio",
"captions": "sous-titres",
"change media tag: key='{key}' value='{value}'": "modifier une balise média : clé='{key}' valeur='{value}'",
"change stream #{index} ({track_type}:{sub_index}) add disposition={disposition}": "modifier le flux #{index} ({track_type}:{sub_index}) ajouter disposition={disposition}",
"change stream #{index} ({track_type}:{sub_index}) add key={key} value={value}": "modifier le flux #{index} ({track_type}:{sub_index}) ajouter clé={key} valeur={value}",
"change stream #{index} ({track_type}:{sub_index}) change key={key} value={value}": "modifier le flux #{index} ({track_type}:{sub_index}) changer clé={key} valeur={value}",
"change stream #{index} ({track_type}:{sub_index}) remove disposition={disposition}": "modifier le flux #{index} ({track_type}:{sub_index}) supprimer disposition={disposition}",
"change stream #{index} ({track_type}:{sub_index}) remove key={key} value={value}": "modifier le flux #{index} ({track_type}:{sub_index}) supprimer clé={key} valeur={value}",
"clean_effects": "effets seuls",
"comment": "commentaire",
"default": "par défaut",
"dependent": "dépendant",
"descriptions": "descriptions",
"dub": "doublage",
"for pattern": "pour le modèle",
"forced": "forcé",
"from": "de",
"from pattern": "depuis le modèle",
"from show": "depuis la série",
"hearing_impaired": "malentendants",
"karaoke": "karaoké",
"lyrics": "paroles",
"metadata": "métadonnées",
"non_diegetic": "non diégétique",
"original": "original",
"pattern #{id}": "modèle #{id}",
"remove media tag: key='{key}' value='{value}'": "supprimer une balise média : clé='{key}' valeur='{value}'",
"remove stream #{index}": "supprimer le flux #{index}",
"show #{id}": "série #{id}",
"stereo": "stéréo",
"still_image": "image fixe",
"sub index": "sous-index",
"subtitle": "sous-titre",
"timed_thumbnails": "miniatures horodatées",
"undefined": "indéfini",
"unknown": "inconnu",
"video": "vidéo",
"visual_impaired": "malvoyants"
}
}

361
assets/i18n/ja.json Normal file
View File

@@ -0,0 +1,361 @@
{
"iso_languages": {
"ABKHAZIAN": "アブハジア語",
"AFAR": "アファル語",
"AFRIKAANS": "アフリカーンス語",
"AKAN": "アカン語",
"ALBANIAN": "アルバニア語",
"AMHARIC": "アムハラ語",
"ARABIC": "アラビア語",
"ARAGONESE": "アラゴン語",
"ARMENIAN": "アルメニア語",
"ASSAMESE": "アッサム語",
"AVARIC": "アヴァル語",
"AVESTAN": "アヴェスタ語",
"AYMARA": "アイマラ語",
"AZERBAIJANI": "アゼルバイジャン語",
"BAMBARA": "バンバラ語",
"BASHKIR": "バシキール語",
"BASQUE": "バスク語",
"BELARUSIAN": "白ロシア語",
"BENGALI": "ベンガル語",
"BISLAMA": "ビスラマ語",
"BOKMAL": "Bokmål",
"BOSNIAN": "ボスニア語",
"BRETON": "ブルトン語",
"BULGARIAN": "ブルガリア語",
"BURMESE": "ビルマ語",
"CATALAN": "Catalan",
"CHAMORRO": "チャモロ語",
"CHECHEN": "チェチェン語",
"CHICHEWA": "Chichewa",
"CHINESE": "中国語",
"CHURCH_SLAVIC": "Church Slavic",
"CHUVASH": "チュヴァシュ語",
"CORNISH": "コーンウォール語",
"CORSICAN": "コルシカ語",
"CREE": "クリー語",
"CROATIAN": "クロアチア語",
"CZECH": "チェコ語",
"DANISH": "デンマーク語",
"DIVEHI": "Divehi",
"DUTCH": "Dutch",
"DZONGKHA": "ゾンカ語",
"ENGLISH": "英語",
"ESPERANTO": "エスペラント語",
"ESTONIAN": "エストニア語",
"EWE": "エウェ語",
"FAROESE": "フェロー語",
"FIJIAN": "フィジー語",
"FILIPINO": "Filipino",
"FINNISH": "フィン語",
"FRENCH": "フランス語",
"FULAH": "フラ語",
"GALICIAN": "ガリシア語",
"GANDA": "ガンダ語",
"GEORGIAN": "グルジア語",
"GERMAN": "ドイツ語",
"GREEK": "Greek",
"GUARANI": "グアラニー",
"GUJARATI": "グジャラーティー語",
"HAITIAN": "Haitian",
"HAUSA": "ハウサ語",
"HEBREW": "ヘブライ語",
"HERERO": "ヘレロ語",
"HINDI": "ヒンディー語",
"HIRI_MOTU": "ヒリモトゥ語",
"HUNGARIAN": "ハンガリー語",
"ICELANDIC": "アイスランド語",
"IDO": "イド語",
"IGBO": "イボ語",
"INDONESIAN": "インドネシア語",
"INTERLINGUA": "Interlingua",
"INTERLINGUE": "Interlingue",
"INUKTITUT": "イヌクウティトット語",
"INUPIAQ": "イヌピアック語",
"IRISH": "アイルランド語",
"ITALIAN": "イタリア語",
"JAPANESE": "日本語",
"JAVANESE": "ジャワ語",
"KALAALLISUT": "Kalaallisut",
"KANNADA": "カンナダ語",
"KANURI": "カヌリ語",
"KASHMIRI": "カシミーリー語",
"KAZAKH": "カザーフ語",
"KHMER": "Khmer",
"KIKUYU": "Kikuyu",
"KINYARWANDA": "キンヤルワンダ語",
"KIRGHIZ": "Kirghiz",
"KOMI": "コミ語",
"KONGO": "コンゴ語",
"KOREAN": "朝鮮語",
"KUANYAMA": "Kuanyama",
"KURDISH": "クルド語",
"LAO": "ラオ語",
"LATIN": "ラテン語",
"LATVIAN": "ラトビア語",
"LIMBURGAN": "Limburgan",
"LINGALA": "リンガラ語",
"LITHUANIAN": "リトアニア語",
"LUBA_KATANGA": "ルバ語",
"LUXEMBOURGISH": "Luxembourgish",
"MACEDONIAN": "マケドニア語",
"MALAGASY": "マラガシ語",
"MALAY": "マライ語",
"MALAYALAM": "マラヤーラム語",
"MALTESE": "マルタ語",
"MANX": "マン島語",
"MAORI": "マオリ語",
"MARATHI": "マラーティー語",
"MARSHALLESE": "マーシャル語",
"MONGOLIAN": "蒙古語",
"NAURU": "ナウル語",
"NAVAJO": "Navajo",
"NDONGA": "ンドンガ語",
"NEPALI": "ネパール語",
"NORTHERN_SAMI": "北サーミ語",
"NORTH_NDEBELE": "North Ndebele",
"NORWEGIAN": "ノルウェー語",
"NORWEGIAN_NYNORSK": "Nynorsk",
"OCCITAN": "Occitan",
"OJIBWA": "オジブワ語",
"ORIYA": "オリヤー語",
"OROMO": "オロモ語",
"OSSETIAN": "Ossetian",
"PALI": "パーリ語",
"PANJABI": "Panjabi",
"PERSIAN": "ペルシア語",
"POLISH": "ポーランド語",
"PORTUGUESE": "ポルトガル語",
"PUSHTO": "Pushto",
"QUECHUA": "キチュワ語",
"ROMANIAN": "Romanian",
"ROMANSH": "ロマンシュ語",
"RUNDI": "ルンディ語",
"RUSSIAN": "ロシア語",
"SAMOAN": "サモア語",
"SANGO": "サンゴ語",
"SANSKRIT": "梵語",
"SARDINIAN": "サルデーニャ語",
"SCOTTISH_GAELIC": "Scottish Gaelic",
"SERBIAN": "セルビア語",
"SHONA": "ショナ語",
"SICHUAN_YI": "Sichuan Yi",
"SINDHI": "シンディー語",
"SINHALA": "Sinhala",
"SLOVAK": "スロヴァキア語",
"SLOVENIAN": "スロヴェニア語",
"SOMALI": "ソマリ語",
"SOUTHERN_SOTHO": "Southern Sotho",
"SOUTH_NDEBELE": "South Ndebele",
"SPANISH": "Spanish",
"SUNDANESE": "スンダ語",
"SWAHILI": "スワヒリ語",
"SWATI": "シスワティ語",
"SWEDISH": "スウェーデン語",
"TAGALOG": "タガログ語",
"TAHITIAN": "タヒチ語",
"TAJIK": "タジク語",
"TAMIL": "タミル語",
"TATAR": "タタール語",
"TELUGU": "テルグ語",
"THAI": "タイ語",
"TIBETAN": "チベット語",
"TIGRINYA": "ティグリニア語",
"TONGA": "Tonga",
"TSONGA": "ツォンガ語",
"TSWANA": "ツワナ語",
"TURKISH": "トルコ語",
"TURKMEN": "トゥルクメン語",
"TWI": "トウィ語",
"UIGHUR": "Uighur",
"UKRAINIAN": "ウクライナ語",
"UNDEFINED": "undefined",
"URDU": "ウルドゥー語",
"UZBEK": "ウズベク語",
"VENDA": "ベンダ語",
"VIETNAMESE": "ベトナム語",
"VOLAPUK": "ボラピューク語",
"WALLOON": "ワロン語",
"WELSH": "ウェールズ語",
"WESTERN_FRISIAN": "西フリジア語",
"WOLOF": "ウォロフ語",
"XHOSA": "ホサ語",
"YIDDISH": "イディッシュ語",
"YORUBA": "ヨルバ語",
"ZHUANG": "Zhuang",
"ZULU": "ズールー語"
},
"phrases": {
"5.0(side)": "5.0(side)",
"5.1(side)": "5.1(side)",
"6.1": "6.1",
"6ch": "6ch",
"7.1": "7.1",
"<New show>": "<新しい番組>",
"Add": "追加",
"Add Pattern": "パターンを追加",
"Apply": "適用",
"Apply failed: {error}": "適用に失敗しました: {error}",
"Are you sure to delete the following filename pattern?": "次のファイル名パターンを削除してもよろしいですか?",
"Are you sure to delete the following shifted season?": "次のシーズンシフト設定を削除してもよろしいですか?",
"Are you sure to delete the following show?": "次の番組を削除してもよろしいですか?",
"Are you sure to delete the following {track_type} track?": "次の{track_type}ストリームを削除してもよろしいですか?",
"Are you sure to delete this tag?": "このタグを削除してもよろしいですか?",
"Audio Layout": "音声レイアウト",
"Back": "戻る",
"Cancel": "キャンセル",
"Cannot add another stream with disposition flag 'default' or 'forced' set": "default または forced の disposition が設定されたストリームはこれ以上追加できません",
"Changes applied and file reloaded.": "変更を適用し、ファイルを再読み込みしました。",
"Cleanup": "クリーンアップ",
"Cleanup disabled.": "クリーンアップを無効にしました。",
"Cleanup enabled.": "クリーンアップを有効にしました。",
"Codec": "コーデック",
"Continuing edit session.": "編集セッションを続行します。",
"Default": "デフォルト",
"Delete": "削除",
"Delete Show": "番組を削除",
"Deleted media tag {tag!r}.": "メディアタグ {tag!r} を削除しました。",
"Differences": "差分",
"Differences (file->db/output)": "差分 (ファイル->DB/出力)",
"Discard": "破棄",
"Discard pending metadata changes and quit?": "保留中のメタデータ変更を破棄して終了しますか?",
"Discard pending metadata changes and reload the file state?": "保留中のメタデータ変更を破棄してファイル状態を再読み込みしますか?",
"Down": "下へ",
"Dry-run: would rewrite via temporary file {target_path}": "ドライラン: 一時ファイル {target_path} 経由で再書き込みします",
"Edit": "編集",
"Edit Pattern": "パターンを編集",
"Edit Show": "番組を編集",
"Edit filename pattern": "ファイル名パターンを編集",
"Edit shifted season": "シフト済みシーズンを編集",
"Edit stream": "ストリームを編集",
"Episode Offset": "エピソードオフセット",
"Episode offset": "エピソードオフセット",
"File": "ファイル",
"File patterns": "ファイルパターン",
"First Episode": "最初のエピソード",
"First episode": "最初のエピソード",
"Forced": "強制",
"Help": "ヘルプ",
"Help Screen": "ヘルプ画面",
"ID": "ID",
"Identify": "識別",
"Index": "インデックス",
"Index / Subindex": "インデックス / サブインデックス",
"Index Episode Digits": "インデックスのエピソード桁数",
"Index Season Digits": "インデックスのシーズン桁数",
"Indicator Edisode Digits": "インジケーターのエピソード桁数",
"Indicator Season Digits": "インジケーターのシーズン桁数",
"Keep Editing": "編集を続ける",
"Keeping pending changes.": "保留中の変更を保持します。",
"Key": "キー",
"Language": "言語",
"Last Episode": "最後のエピソード",
"Last episode": "最後のエピソード",
"Layout": "レイアウト",
"Media Tags": "メディアタグ",
"More than one default audio stream detected and no prompt set": "デフォルト音声ストリームが複数検出され、プロンプトも設定されていません",
"More than one default audio stream detected! Please select stream": "デフォルト音声ストリームが複数検出されました。ストリームを選択してください",
"More than one default subtitle stream detected and no prompt set": "デフォルト字幕ストリームが複数検出され、プロンプトも設定されていません",
"More than one default subtitle stream detected! Please select stream": "デフォルト字幕ストリームが複数検出されました。ストリームを選択してください",
"More than one default video stream detected and no prompt set": "デフォルト映像ストリームが複数検出され、プロンプトも設定されていません",
"More than one default video stream detected! Please select stream": "デフォルト映像ストリームが複数検出されました。ストリームを選択してください",
"More than one forced audio stream detected and no prompt set": "強制音声ストリームが複数検出され、プロンプトも設定されていません",
"More than one forced audio stream detected! Please select stream": "強制音声ストリームが複数検出されました。ストリームを選択してください",
"More than one forced subtitle stream detected and no prompt set": "強制字幕ストリームが複数検出され、プロンプトも設定されていません",
"More than one forced subtitle stream detected! Please select stream": "強制字幕ストリームが複数検出されました。ストリームを選択してください",
"More than one forced video stream detected and no prompt set": "強制映像ストリームが複数検出され、プロンプトも設定されていません",
"More than one forced video stream detected! Please select stream": "強制映像ストリームが複数検出されました。ストリームを選択してください",
"Name": "名前",
"New Pattern": "新しいパターン",
"New Show": "新しい番組",
"New filename pattern": "新しいファイル名パターン",
"New shifted season": "新しいシーズンシフト",
"New stream": "新しいストリーム",
"No": "いいえ",
"No changes to apply.": "適用する変更はありません。",
"No changes to revert.": "元に戻す変更はありません。",
"Normalization disabled.": "正規化を無効にしました。",
"Normalization enabled.": "正規化を有効にしました。",
"Normalize": "正規化",
"Notes": "メモ",
"Pattern": "パターン",
"Planned Changes (file->edited output)": "予定された変更 (ファイル->編集後出力)",
"Quality": "品質",
"Quit": "終了",
"Remove Pattern": "パターンを削除",
"Revert": "元に戻す",
"Reverted pending changes.": "保留中の変更を元に戻しました。",
"Save": "保存",
"Season Offset": "シーズンオフセット",
"Select a stream first.": "まずストリームを選択してください。",
"Set Default": "デフォルトに設定",
"Set Forced": "強制に設定",
"Settings Screen": "設定画面",
"Numbering Mapping": "シフト済みシーズン",
"Show": "番組",
"Shows": "番組一覧",
"Source Season": "元シーズン",
"SrcIndex": "元インデックス",
"Status": "状態",
"Stay": "このまま",
"Stream dispositions": "ストリーム disposition",
"Stream tags": "ストリームタグ",
"Streams": "ストリーム",
"SubIndex": "サブインデックス",
"Substitute": "置換",
"Substitute pattern": "パターンを置換",
"Title": "タイトル",
"Type": "タイプ",
"Unable to update selected stream.": "選択したストリームを更新できませんでした。",
"Up": "上へ",
"Update Pattern": "パターンを更新",
"Updated media tag {tag!r}.": "メディアタグ {tag!r} を更新しました。",
"Updated stream #{index} ({track_type}).": "ストリーム #{index} ({track_type}) を更新しました。",
"Value": "値",
"Year": "年",
"Yes": "はい",
"add media tag: key='{key}' value='{value}'": "メディアタグを追加: key='{key}' value='{value}'",
"add {track_type} track: index={index} lang={language}": "{track_type}ストリームを追加: index={index} lang={language}",
"attached_pic": "attached_pic",
"attachment": "添付",
"audio": "音声",
"captions": "キャプション",
"change media tag: key='{key}' value='{value}'": "メディアタグを変更: key='{key}' value='{value}'",
"change stream #{index} ({track_type}:{sub_index}) add disposition={disposition}": "ストリーム #{index} ({track_type}:{sub_index}) disposition を追加={disposition}",
"change stream #{index} ({track_type}:{sub_index}) add key={key} value={value}": "ストリーム #{index} ({track_type}:{sub_index}) key を追加={key} value={value}",
"change stream #{index} ({track_type}:{sub_index}) change key={key} value={value}": "ストリーム #{index} ({track_type}:{sub_index}) key を変更={key} value={value}",
"change stream #{index} ({track_type}:{sub_index}) remove disposition={disposition}": "ストリーム #{index} ({track_type}:{sub_index}) disposition を削除={disposition}",
"change stream #{index} ({track_type}:{sub_index}) remove key={key} value={value}": "ストリーム #{index} ({track_type}:{sub_index}) key を削除={key} value={value}",
"clean_effects": "効果音のみ",
"comment": "コメント",
"default": "デフォルト",
"dependent": "依存",
"descriptions": "解説",
"dub": "吹替",
"for pattern": "パターン用",
"forced": "強制",
"from": "元",
"from pattern": "パターンから",
"from show": "番組から",
"hearing_impaired": "聴覚障害者向け",
"karaoke": "カラオケ",
"lyrics": "歌詞",
"metadata": "メタデータ",
"non_diegetic": "非ダイジェティック",
"original": "オリジナル",
"pattern #{id}": "パターン #{id}",
"remove media tag: key='{key}' value='{value}'": "メディアタグを削除: key='{key}' value='{value}'",
"remove stream #{index}": "ストリーム #{index} を削除",
"show #{id}": "番組 #{id}",
"stereo": "ステレオ",
"still_image": "静止画",
"sub index": "サブインデックス",
"subtitle": "字幕",
"timed_thumbnails": "時間指定サムネイル",
"undefined": "未定義",
"unknown": "不明",
"video": "映像",
"visual_impaired": "視覚障害者向け"
}
}

361
assets/i18n/nb.json Normal file
View File

@@ -0,0 +1,361 @@
{
"iso_languages": {
"ABKHAZIAN": "Abkhazian",
"AFAR": "afar",
"AFRIKAANS": "Afrikansk",
"AKAN": "Akan",
"ALBANIAN": "Albansk",
"AMHARIC": "Amharic",
"ARABIC": "Arabisk",
"ARAGONESE": "aragonsk",
"ARMENIAN": "armensk",
"ASSAMESE": "assamisk",
"AVARIC": "Avaric",
"AVESTAN": "avestisk",
"AYMARA": "aymara",
"AZERBAIJANI": "Aserbadjansk",
"BAMBARA": "bambara",
"BASHKIR": "basjkirsk",
"BASQUE": "Baskisk",
"BELARUSIAN": "Hviterussisk",
"BENGALI": "bengali",
"BISLAMA": "bislama",
"BOKMAL": "Bokmål",
"BOSNIAN": "Bosnisk",
"BRETON": "Breton",
"BULGARIAN": "Bulgarsk",
"BURMESE": "burmesisk",
"CATALAN": "Catalan",
"CHAMORRO": "chamorro",
"CHECHEN": "Chechen",
"CHICHEWA": "Chichewa",
"CHINESE": "Kinesisk",
"CHURCH_SLAVIC": "Church Slavic",
"CHUVASH": "tsjuvansk",
"CORNISH": "Cornish",
"CORSICAN": "Korsikansk",
"CREE": "Cree",
"CROATIAN": "Kroatsisk",
"CZECH": "Tjekkisk",
"DANISH": "Dansk",
"DIVEHI": "Divehi",
"DUTCH": "Dutch",
"DZONGKHA": "dzongkha",
"ENGLISH": "Engelsk",
"ESPERANTO": "Esperanto",
"ESTONIAN": "Estonsk",
"EWE": "ewe",
"FAROESE": "færøysk",
"FIJIAN": "fijiansk",
"FILIPINO": "Filipino",
"FINNISH": "Finsk",
"FRENCH": "Fransk",
"FULAH": "fulani",
"GALICIAN": "Galisisk",
"GANDA": "ganda",
"GEORGIAN": "Georgisk",
"GERMAN": "Tysk",
"GREEK": "Greek",
"GUARANI": "Guarani",
"GUJARATI": "gujarati",
"HAITIAN": "Haitian",
"HAUSA": "Hausa",
"HEBREW": "Hebraisk",
"HERERO": "Herero",
"HINDI": "hindi",
"HIRI_MOTU": "Hiri Motu",
"HUNGARIAN": "Ungarsk",
"ICELANDIC": "Islandsk",
"IDO": "ido",
"IGBO": "ibo",
"INDONESIAN": "Indonesisk",
"INTERLINGUA": "Interlingua",
"INTERLINGUE": "Interlingue",
"INUKTITUT": "inuktitut",
"INUPIAQ": "unupiak",
"IRISH": "Irsk",
"ITALIAN": "Italiensk",
"JAPANESE": "Japansk",
"JAVANESE": "Javanesisk",
"KALAALLISUT": "Kalaallisut",
"KANNADA": "kannada",
"KANURI": "Kanuri",
"KASHMIRI": "kasjmiri",
"KAZAKH": "kasakhisk",
"KHMER": "Khmer",
"KIKUYU": "Kikuyu",
"KINYARWANDA": "kinjarwanda",
"KIRGHIZ": "Kirghiz",
"KOMI": "komi",
"KONGO": "kikongo",
"KOREAN": "Koreansk",
"KUANYAMA": "Kuanyama",
"KURDISH": "Kurdisk",
"LAO": "laotisk",
"LATIN": "Latin",
"LATVIAN": "Latvisk",
"LIMBURGAN": "Limburgan",
"LINGALA": "lingala",
"LITHUANIAN": "Lituaisk",
"LUBA_KATANGA": "luba-katanga",
"LUXEMBOURGISH": "Luxembourgish",
"MACEDONIAN": "Makedonsk",
"MALAGASY": "madagassisk",
"MALAY": "malayisk",
"MALAYALAM": "malayalam",
"MALTESE": "Maltisk",
"MANX": "manx",
"MAORI": "Maori",
"MARATHI": "Marathi",
"MARSHALLESE": "Marshallese",
"MONGOLIAN": "Mongolsk",
"NAURU": "nauru",
"NAVAJO": "Navajo",
"NDONGA": "Ndonga",
"NEPALI": "nepalsk",
"NORTHERN_SAMI": "nordsamisk",
"NORTH_NDEBELE": "North Ndebele",
"NORWEGIAN": "Norsk",
"NORWEGIAN_NYNORSK": "Nynorsk",
"OCCITAN": "Occitan",
"OJIBWA": "ojibwa",
"ORIYA": "oriya",
"OROMO": "oromo",
"OSSETIAN": "Ossetian",
"PALI": "Pali",
"PANJABI": "Panjabi",
"PERSIAN": "Persisk",
"POLISH": "Polsk",
"PORTUGUESE": "Portugisisk",
"PUSHTO": "Pushto",
"QUECHUA": "quechua",
"ROMANIAN": "Romanian",
"ROMANSH": "Romansh",
"RUNDI": "rundi",
"RUSSIAN": "Russisk",
"SAMOAN": "samoansk",
"SANGO": "sango",
"SANSKRIT": "sanskrit",
"SARDINIAN": "Sardinsk",
"SCOTTISH_GAELIC": "Scottish Gaelic",
"SERBIAN": "Serbisk",
"SHONA": "Shona",
"SICHUAN_YI": "Sichuan Yi",
"SINDHI": "sindhi",
"SINHALA": "Sinhala",
"SLOVAK": "Slovakisk",
"SLOVENIAN": "Slovensk",
"SOMALI": "somalisk",
"SOUTHERN_SOTHO": "Southern Sotho",
"SOUTH_NDEBELE": "South Ndebele",
"SPANISH": "Spanish",
"SUNDANESE": "sundanesisk",
"SWAHILI": "swahili",
"SWATI": "swati",
"SWEDISH": "Svensk",
"TAGALOG": "tagalog",
"TAHITIAN": "Tahitisk",
"TAJIK": "Tajik",
"TAMIL": "Tamilsk",
"TATAR": "tatarisk",
"TELUGU": "telugu",
"THAI": "Thai",
"TIBETAN": "tibetansk",
"TIGRINYA": "Tigrinya",
"TONGA": "Tonga",
"TSONGA": "tsonga",
"TSWANA": "tswana",
"TURKISH": "Tyrkisk",
"TURKMEN": "turkmensk",
"TWI": "twi",
"UIGHUR": "Uighur",
"UKRAINIAN": "Ukrainsk",
"UNDEFINED": "undefined",
"URDU": "urdu",
"UZBEK": "usbekisk",
"VENDA": "venda",
"VIETNAMESE": "Vietnamesisk",
"VOLAPUK": "Volapük",
"WALLOON": "Vietnamesisk",
"WELSH": "Walisisk",
"WESTERN_FRISIAN": "Vestfrisisk",
"WOLOF": "wolof",
"XHOSA": "Xhosa",
"YIDDISH": "jiddisk",
"YORUBA": "joruba",
"ZHUANG": "Zhuang",
"ZULU": "Zulu"
},
"phrases": {
"5.0(side)": "5.0(side)",
"5.1(side)": "5.1(side)",
"6.1": "6.1",
"6ch": "6ch",
"7.1": "7.1",
"<New show>": "<Ny serie>",
"Add": "Legg til",
"Add Pattern": "Legg til mønster",
"Apply": "Bruk",
"Apply failed: {error}": "Kunne ikke bruke endringene: {error}",
"Are you sure to delete the following filename pattern?": "Er du sikker på at du vil slette følgende filnavnmønster?",
"Are you sure to delete the following shifted season?": "Er du sikker på at du vil slette følgende forskjøvede sesong?",
"Are you sure to delete the following show?": "Er du sikker på at du vil slette følgende serie?",
"Are you sure to delete the following {track_type} track?": "Er du sikker på at du vil slette følgende {track_type}-spor?",
"Are you sure to delete this tag?": "Er du sikker på at du vil slette denne taggen?",
"Audio Layout": "Lydoppsett",
"Back": "Tilbake",
"Cancel": "Avbryt",
"Cannot add another stream with disposition flag 'default' or 'forced' set": "Kan ikke legge til en ny strøm med disposisjonsflagget 'default' eller 'forced' satt",
"Changes applied and file reloaded.": "Endringene er brukt og filen er lastet inn på nytt.",
"Cleanup": "Rydd opp",
"Cleanup disabled.": "Rydding deaktivert.",
"Cleanup enabled.": "Rydding aktivert.",
"Codec": "Kodek",
"Continuing edit session.": "Fortsetter redigeringsøkten.",
"Default": "Standard",
"Delete": "Slett",
"Delete Show": "Slett serie",
"Deleted media tag {tag!r}.": "Mediataggen {tag!r} ble slettet.",
"Differences": "Forskjeller",
"Differences (file->db/output)": "Forskjeller (fil->DB/utdata)",
"Discard": "Forkast",
"Discard pending metadata changes and quit?": "Forkaste ventende metadataendringer og avslutte?",
"Discard pending metadata changes and reload the file state?": "Forkaste ventende metadataendringer og laste filtilstanden på nytt?",
"Down": "Ned",
"Dry-run: would rewrite via temporary file {target_path}": "Tørrkjøring: ville skrevet om via midlertidig fil {target_path}",
"Edit": "Rediger",
"Edit Pattern": "Rediger mønster",
"Edit Show": "Rediger serie",
"Edit filename pattern": "Rediger filnavnmønster",
"Edit shifted season": "Rediger forskjøvet sesong",
"Edit stream": "Rediger strøm",
"Episode Offset": "Episodeforskyvning",
"Episode offset": "Episodeforskyvning",
"File": "Fil",
"File patterns": "Filmønstre",
"First Episode": "Første episode",
"First episode": "Første episode",
"Forced": "Tvungen",
"Help": "Hjelp",
"Help Screen": "Hjelpeskjerm",
"ID": "ID",
"Identify": "Identifiser",
"Index": "Indeks",
"Index / Subindex": "Indeks / Underindeks",
"Index Episode Digits": "Siffer for episodeindeks",
"Index Season Digits": "Siffer for sesongindeks",
"Indicator Edisode Digits": "Siffer for episodeindikator",
"Indicator Season Digits": "Siffer for sesongindikator",
"Keep Editing": "Fortsett redigeringen",
"Keeping pending changes.": "Beholder ventende endringer.",
"Key": "Nøkkel",
"Language": "Språk",
"Last Episode": "Siste episode",
"Last episode": "Siste episode",
"Layout": "Oppsett",
"Media Tags": "Mediatagger",
"More than one default audio stream detected and no prompt set": "Mer enn én standard lydstrøm funnet og ingen forespørsel satt",
"More than one default audio stream detected! Please select stream": "Mer enn én standard lydstrøm funnet. Velg strøm",
"More than one default subtitle stream detected and no prompt set": "Mer enn én standard undertekststrøm funnet og ingen forespørsel satt",
"More than one default subtitle stream detected! Please select stream": "Mer enn én standard undertekststrøm funnet. Velg strøm",
"More than one default video stream detected and no prompt set": "Mer enn én standard videostrøm funnet og ingen forespørsel satt",
"More than one default video stream detected! Please select stream": "Mer enn én standard videostrøm funnet. Velg strøm",
"More than one forced audio stream detected and no prompt set": "Mer enn én tvungen lydstrøm funnet og ingen forespørsel satt",
"More than one forced audio stream detected! Please select stream": "Mer enn én tvungen lydstrøm funnet. Velg strøm",
"More than one forced subtitle stream detected and no prompt set": "Mer enn én tvungen undertekststrøm funnet og ingen forespørsel satt",
"More than one forced subtitle stream detected! Please select stream": "Mer enn én tvungen undertekststrøm funnet. Velg strøm",
"More than one forced video stream detected and no prompt set": "Mer enn én tvungen videostrøm funnet og ingen forespørsel satt",
"More than one forced video stream detected! Please select stream": "Mer enn én tvungen videostrøm funnet. Velg strøm",
"Name": "Navn",
"New Pattern": "Nytt mønster",
"New Show": "Ny serie",
"New filename pattern": "Nytt filnavnmønster",
"New shifted season": "Ny forskjøvet sesong",
"New stream": "Ny strøm",
"No": "Nei",
"No changes to apply.": "Ingen endringer å bruke.",
"No changes to revert.": "Ingen endringer å tilbakestille.",
"Normalization disabled.": "Normalisering deaktivert.",
"Normalization enabled.": "Normalisering aktivert.",
"Normalize": "Normaliser",
"Notes": "Notater",
"Pattern": "Mønster",
"Planned Changes (file->edited output)": "Planlagte endringer (fil->redigert utdata)",
"Quality": "Kvalitet",
"Quit": "Avslutt",
"Remove Pattern": "Fjern mønster",
"Revert": "Tilbakestill",
"Reverted pending changes.": "Ventende endringer ble tilbakestilt.",
"Save": "Lagre",
"Season Offset": "Sesongforskyvning",
"Select a stream first.": "Velg en strøm først.",
"Set Default": "Sett som standard",
"Set Forced": "Sett som tvungen",
"Settings Screen": "Innstillingsskjerm",
"Numbering Mapping": "Forskjøvne sesonger",
"Show": "Serie",
"Shows": "Serier",
"Source Season": "Kildesesong",
"SrcIndex": "Kildeindeks",
"Status": "Status",
"Stay": "Bli",
"Stream dispositions": "Strømdisposisjoner",
"Stream tags": "Strømtagger",
"Streams": "Strømmer",
"SubIndex": "Underindeks",
"Substitute": "Erstatt",
"Substitute pattern": "Erstatt mønster",
"Title": "Tittel",
"Type": "Type",
"Unable to update selected stream.": "Kunne ikke oppdatere valgt strøm.",
"Up": "Opp",
"Update Pattern": "Oppdater mønster",
"Updated media tag {tag!r}.": "Mediataggen {tag!r} ble oppdatert.",
"Updated stream #{index} ({track_type}).": "Strøm #{index} ({track_type}) oppdatert.",
"Value": "Verdi",
"Year": "År",
"Yes": "Ja",
"add media tag: key='{key}' value='{value}'": "legg til mediatagg: nøkkel='{key}' verdi='{value}'",
"add {track_type} track: index={index} lang={language}": "legg til {track_type}-spor: indeks={index} språk={language}",
"attached_pic": "attached_pic",
"attachment": "vedlegg",
"audio": "lyd",
"captions": "teksting",
"change media tag: key='{key}' value='{value}'": "endre mediatagg: nøkkel='{key}' verdi='{value}'",
"change stream #{index} ({track_type}:{sub_index}) add disposition={disposition}": "endre strøm #{index} ({track_type}:{sub_index}) legg til disposisjon={disposition}",
"change stream #{index} ({track_type}:{sub_index}) add key={key} value={value}": "endre strøm #{index} ({track_type}:{sub_index}) legg til nøkkel={key} verdi={value}",
"change stream #{index} ({track_type}:{sub_index}) change key={key} value={value}": "endre strøm #{index} ({track_type}:{sub_index}) endre nøkkel={key} verdi={value}",
"change stream #{index} ({track_type}:{sub_index}) remove disposition={disposition}": "endre strøm #{index} ({track_type}:{sub_index}) fjern disposisjon={disposition}",
"change stream #{index} ({track_type}:{sub_index}) remove key={key} value={value}": "endre strøm #{index} ({track_type}:{sub_index}) fjern nøkkel={key} verdi={value}",
"clean_effects": "bare effekter",
"comment": "kommentar",
"default": "standard",
"dependent": "avhengig",
"descriptions": "beskrivelser",
"dub": "dubbet",
"for pattern": "for mønster",
"forced": "tvungen",
"from": "fra",
"from pattern": "fra mønster",
"from show": "fra serie",
"hearing_impaired": "hørselshemmet",
"karaoke": "karaoke",
"lyrics": "sangtekst",
"metadata": "metadata",
"non_diegetic": "ikke-diegetisk",
"original": "original",
"pattern #{id}": "mønster #{id}",
"remove media tag: key='{key}' value='{value}'": "fjern mediatagg: nøkkel='{key}' verdi='{value}'",
"remove stream #{index}": "fjern strøm #{index}",
"show #{id}": "serie #{id}",
"stereo": "stereo",
"still_image": "stillbilde",
"sub index": "underindeks",
"subtitle": "undertekst",
"timed_thumbnails": "tidsbestemte miniatyrer",
"undefined": "udefinert",
"unknown": "ukjent",
"video": "video",
"visual_impaired": "synshemmet"
}
}

361
assets/i18n/pt.json Normal file
View File

@@ -0,0 +1,361 @@
{
"iso_languages": {
"ABKHAZIAN": "abkhazian",
"AFAR": "afar",
"AFRIKAANS": "Africanos",
"AKAN": "Akan",
"ALBANIAN": "Albanês",
"AMHARIC": "Amárico",
"ARABIC": "Árabe",
"ARAGONESE": "Aragonês",
"ARMENIAN": "arménio",
"ASSAMESE": "assamês",
"AVARIC": "Avárico",
"AVESTAN": "avéstico",
"AYMARA": "aimara",
"AZERBAIJANI": "Azerbaijani",
"BAMBARA": "bambara",
"BASHKIR": "bashkir",
"BASQUE": "Basco",
"BELARUSIAN": "Bielorusso",
"BENGALI": "Bengali",
"BISLAMA": "bislamá",
"BOKMAL": "Bokmål",
"BOSNIAN": "Bósnio",
"BRETON": "Bretão",
"BULGARIAN": "Búlgaro",
"BURMESE": "birmanês",
"CATALAN": "Catalan",
"CHAMORRO": "chamorro",
"CHECHEN": "Checheno",
"CHICHEWA": "Chichewa",
"CHINESE": "Chinês",
"CHURCH_SLAVIC": "Church Slavic",
"CHUVASH": "chuvash",
"CORNISH": "Córnico",
"CORSICAN": "córsico",
"CREE": "Cree",
"CROATIAN": "Croata",
"CZECH": "Checo",
"DANISH": "Dinamarquês",
"DIVEHI": "Divehi",
"DUTCH": "Dutch",
"DZONGKHA": "dzonga",
"ENGLISH": "Inglês",
"ESPERANTO": "Esperanto",
"ESTONIAN": "Estoniano",
"EWE": "eve",
"FAROESE": "Faroês",
"FIJIAN": "fijiano",
"FILIPINO": "Filipino",
"FINNISH": "Finlandês",
"FRENCH": "Francês",
"FULAH": "fula",
"GALICIAN": "Galego",
"GANDA": "luganda",
"GEORGIAN": "georgiano",
"GERMAN": "Alemão",
"GREEK": "Greek",
"GUARANI": "Guarani",
"GUJARATI": "Guzerate",
"HAITIAN": "Haitian",
"HAUSA": "Hauçá",
"HEBREW": "Hebreu",
"HERERO": "Hereró",
"HINDI": "Hindi",
"HIRI_MOTU": "Hiri Motu",
"HUNGARIAN": "Húngaro",
"ICELANDIC": "Islandês",
"IDO": "ido",
"IGBO": "ibo",
"INDONESIAN": "Indonésio",
"INTERLINGUA": "Interlingua",
"INTERLINGUE": "Interlingue",
"INUKTITUT": "inuktitut",
"INUPIAQ": "Inupiaque",
"IRISH": "Irlandês",
"ITALIAN": "Italiano",
"JAPANESE": "Japonês",
"JAVANESE": "Javanês",
"KALAALLISUT": "Kalaallisut",
"KANNADA": "Kannada",
"KANURI": "Canúri",
"KASHMIRI": "kashmiri",
"KAZAKH": "cazaque",
"KHMER": "Khmer",
"KIKUYU": "Kikuyu",
"KINYARWANDA": "kinyarwanda",
"KIRGHIZ": "Kirghiz",
"KOMI": "komi",
"KONGO": "congolês",
"KOREAN": "Coreano",
"KUANYAMA": "Kuanyama",
"KURDISH": "Curdo",
"LAO": "Laosiano",
"LATIN": "Latim",
"LATVIAN": "Letão",
"LIMBURGAN": "Limburgan",
"LINGALA": "Lingala",
"LITHUANIAN": "Lituano",
"LUBA_KATANGA": "luba-catanga",
"LUXEMBOURGISH": "Luxembourgish",
"MACEDONIAN": "Macedônio",
"MALAGASY": "malgaxe",
"MALAY": "Malaio",
"MALAYALAM": "malaiala",
"MALTESE": "Maltês",
"MANX": "Manx",
"MAORI": "Maori",
"MARATHI": "marata",
"MARSHALLESE": "Marshalês",
"MONGOLIAN": "Mongol",
"NAURU": "nauruano",
"NAVAJO": "Navajo",
"NDONGA": "dongo",
"NEPALI": "Nepalês",
"NORTHERN_SAMI": "northern sami",
"NORTH_NDEBELE": "North Ndebele",
"NORWEGIAN": "Norueguês",
"NORWEGIAN_NYNORSK": "Nynorsk",
"OCCITAN": "Occitan",
"OJIBWA": "ojibwa",
"ORIYA": "oriya",
"OROMO": "Oromo",
"OSSETIAN": "Ossetian",
"PALI": "Páli",
"PANJABI": "Panjabi",
"PERSIAN": "Persa",
"POLISH": "Polaco",
"PORTUGUESE": "Português",
"PUSHTO": "Pushto",
"QUECHUA": "quíchua",
"ROMANIAN": "Romanian",
"ROMANSH": "Romanche",
"RUNDI": "rundi",
"RUSSIAN": "Russo",
"SAMOAN": "Samoano",
"SANGO": "sango",
"SANSKRIT": "Sânscrito",
"SARDINIAN": "Sardo",
"SCOTTISH_GAELIC": "Scottish Gaelic",
"SERBIAN": "Sérvio",
"SHONA": "Xona",
"SICHUAN_YI": "Sichuan Yi",
"SINDHI": "sindi",
"SINHALA": "Sinhala",
"SLOVAK": "Eslovaco",
"SLOVENIAN": "Eslovêno",
"SOMALI": "somali",
"SOUTHERN_SOTHO": "Southern Sotho",
"SOUTH_NDEBELE": "South Ndebele",
"SPANISH": "Spanish",
"SUNDANESE": "sundanês",
"SWAHILI": "suaíli",
"SWATI": "swati",
"SWEDISH": "Sueco",
"TAGALOG": "Tagalo",
"TAHITIAN": "Taitiano",
"TAJIK": "Tadjique",
"TAMIL": "Tâmil",
"TATAR": "tatar",
"TELUGU": "Telugu",
"THAI": "Tailandês",
"TIBETAN": "tibetano",
"TIGRINYA": "Tigrínia",
"TONGA": "Tonga",
"TSONGA": "tsonga",
"TSWANA": "tswana",
"TURKISH": "Turco",
"TURKMEN": "turcomano",
"TWI": "twi",
"UIGHUR": "Uighur",
"UKRAINIAN": "Ucraniano",
"UNDEFINED": "undefined",
"URDU": "urdu",
"UZBEK": "usbeque",
"VENDA": "venda",
"VIETNAMESE": "Vietnamita",
"VOLAPUK": "Volapuque",
"WALLOON": "walloon",
"WELSH": "galês",
"WESTERN_FRISIAN": "Frísio ocidental",
"WOLOF": "uolofe",
"XHOSA": "xosa",
"YIDDISH": "iídiche",
"YORUBA": "ioruba",
"ZHUANG": "Zhuang",
"ZULU": "zulu"
},
"phrases": {
"5.0(side)": "5.0(side)",
"5.1(side)": "5.1(side)",
"6.1": "6.1",
"6ch": "6ch",
"7.1": "7.1",
"<New show>": "<Nova série>",
"Add": "Adicionar",
"Add Pattern": "Adicionar padrão",
"Apply": "Aplicar",
"Apply failed: {error}": "Falha ao aplicar: {error}",
"Are you sure to delete the following filename pattern?": "Tem certeza de que deseja excluir o seguinte padrão de nome de arquivo?",
"Are you sure to delete the following shifted season?": "Tem certeza de que deseja excluir a seguinte temporada deslocada?",
"Are you sure to delete the following show?": "Tem certeza de que deseja excluir a seguinte série?",
"Are you sure to delete the following {track_type} track?": "Tem certeza de que deseja excluir a seguinte faixa {track_type}?",
"Are you sure to delete this tag?": "Tem certeza de que deseja excluir esta tag?",
"Audio Layout": "Layout de áudio",
"Back": "Voltar",
"Cancel": "Cancelar",
"Cannot add another stream with disposition flag 'default' or 'forced' set": "Não é possível adicionar outro fluxo com a flag de disposição 'default' ou 'forced' definida",
"Changes applied and file reloaded.": "Alterações aplicadas e arquivo recarregado.",
"Cleanup": "Limpeza",
"Cleanup disabled.": "Limpeza desativada.",
"Cleanup enabled.": "Limpeza ativada.",
"Codec": "Codec",
"Continuing edit session.": "Continuando a sessão de edição.",
"Default": "Padrão",
"Delete": "Excluir",
"Delete Show": "Excluir série",
"Deleted media tag {tag!r}.": "Tag de mídia {tag!r} excluída.",
"Differences": "Diferenças",
"Differences (file->db/output)": "Diferenças (arquivo->BD/saída)",
"Discard": "Descartar",
"Discard pending metadata changes and quit?": "Descartar alterações pendentes de metadados e sair?",
"Discard pending metadata changes and reload the file state?": "Descartar alterações pendentes de metadados e recarregar o estado do arquivo?",
"Down": "Baixo",
"Dry-run: would rewrite via temporary file {target_path}": "Execução simulada: regravaria via arquivo temporário {target_path}",
"Edit": "Editar",
"Edit Pattern": "Editar padrão",
"Edit Show": "Editar série",
"Edit filename pattern": "Editar padrão de nome de arquivo",
"Edit shifted season": "Editar temporada deslocada",
"Edit stream": "Editar fluxo",
"Episode Offset": "Deslocamento de episódio",
"Episode offset": "Deslocamento de episódio",
"File": "Arquivo",
"File patterns": "Padrões de arquivo",
"First Episode": "Primeiro episódio",
"First episode": "Primeiro episódio",
"Forced": "Forçado",
"Help": "Ajuda",
"Help Screen": "Tela de ajuda",
"ID": "ID",
"Identify": "Identificar",
"Index": "Índice",
"Index / Subindex": "Índice / Subíndice",
"Index Episode Digits": "Dígitos do índice do episódio",
"Index Season Digits": "Dígitos do índice da temporada",
"Indicator Edisode Digits": "Dígitos do indicador do episódio",
"Indicator Season Digits": "Dígitos do indicador da temporada",
"Keep Editing": "Continuar editando",
"Keeping pending changes.": "Mantendo alterações pendentes.",
"Key": "Chave",
"Language": "Idioma",
"Last Episode": "Último episódio",
"Last episode": "Último episódio",
"Layout": "Layout",
"Media Tags": "Tags de mídia",
"More than one default audio stream detected and no prompt set": "Mais de um fluxo de áudio padrão detectado e nenhum prompt definido",
"More than one default audio stream detected! Please select stream": "Mais de um fluxo de áudio padrão detectado! Selecione o fluxo",
"More than one default subtitle stream detected and no prompt set": "Mais de um fluxo de legenda padrão detectado e nenhum prompt definido",
"More than one default subtitle stream detected! Please select stream": "Mais de um fluxo de legenda padrão detectado! Selecione o fluxo",
"More than one default video stream detected and no prompt set": "Mais de um fluxo de vídeo padrão detectado e nenhum prompt definido",
"More than one default video stream detected! Please select stream": "Mais de um fluxo de vídeo padrão detectado! Selecione o fluxo",
"More than one forced audio stream detected and no prompt set": "Mais de um fluxo de áudio forçado detectado e nenhum prompt definido",
"More than one forced audio stream detected! Please select stream": "Mais de um fluxo de áudio forçado detectado! Selecione o fluxo",
"More than one forced subtitle stream detected and no prompt set": "Mais de um fluxo de legenda forçada detectado e nenhum prompt definido",
"More than one forced subtitle stream detected! Please select stream": "Mais de um fluxo de legenda forçada detectado! Selecione o fluxo",
"More than one forced video stream detected and no prompt set": "Mais de um fluxo de vídeo forçado detectado e nenhum prompt definido",
"More than one forced video stream detected! Please select stream": "Mais de um fluxo de vídeo forçado detectado! Selecione o fluxo",
"Name": "Nome",
"New Pattern": "Novo padrão",
"New Show": "Nova série",
"New filename pattern": "Novo padrão de nome de arquivo",
"New shifted season": "Nova temporada deslocada",
"New stream": "Novo fluxo",
"No": "Não",
"No changes to apply.": "Nenhuma alteração para aplicar.",
"No changes to revert.": "Nenhuma alteração para reverter.",
"Normalization disabled.": "Normalização desativada.",
"Normalization enabled.": "Normalização ativada.",
"Normalize": "Normalizar",
"Notes": "Notas",
"Pattern": "Padrão",
"Planned Changes (file->edited output)": "Alterações planejadas (arquivo->saída editada)",
"Quality": "Qualidade",
"Quit": "Sair",
"Remove Pattern": "Remover padrão",
"Revert": "Reverter",
"Reverted pending changes.": "Alterações pendentes revertidas.",
"Save": "Salvar",
"Season Offset": "Deslocamento de temporada",
"Select a stream first.": "Selecione um fluxo primeiro.",
"Set Default": "Definir como padrão",
"Set Forced": "Definir como forçado",
"Settings Screen": "Tela de configurações",
"Numbering Mapping": "Temporadas deslocadas",
"Show": "Série",
"Shows": "Séries",
"Source Season": "Temporada de origem",
"SrcIndex": "Índice de origem",
"Status": "Status",
"Stay": "Permanecer",
"Stream dispositions": "Disposições do fluxo",
"Stream tags": "Tags do fluxo",
"Streams": "Fluxos",
"SubIndex": "Subíndice",
"Substitute": "Substituir",
"Substitute pattern": "Substituir padrão",
"Title": "Título",
"Type": "Tipo",
"Unable to update selected stream.": "Não foi possível atualizar o fluxo selecionado.",
"Up": "Cima",
"Update Pattern": "Atualizar padrão",
"Updated media tag {tag!r}.": "Tag de mídia {tag!r} atualizada.",
"Updated stream #{index} ({track_type}).": "Fluxo #{index} ({track_type}) atualizado.",
"Value": "Valor",
"Year": "Ano",
"Yes": "Sim",
"add media tag: key='{key}' value='{value}'": "adicionar tag de mídia: chave='{key}' valor='{value}'",
"add {track_type} track: index={index} lang={language}": "adicionar faixa {track_type}: índice={index} idioma={language}",
"attached_pic": "attached_pic",
"attachment": "anexo",
"audio": "áudio",
"captions": "legendas",
"change media tag: key='{key}' value='{value}'": "alterar tag de mídia: chave='{key}' valor='{value}'",
"change stream #{index} ({track_type}:{sub_index}) add disposition={disposition}": "alterar fluxo #{index} ({track_type}:{sub_index}) adicionar disposição={disposition}",
"change stream #{index} ({track_type}:{sub_index}) add key={key} value={value}": "alterar fluxo #{index} ({track_type}:{sub_index}) adicionar chave={key} valor={value}",
"change stream #{index} ({track_type}:{sub_index}) change key={key} value={value}": "alterar fluxo #{index} ({track_type}:{sub_index}) alterar chave={key} valor={value}",
"change stream #{index} ({track_type}:{sub_index}) remove disposition={disposition}": "alterar fluxo #{index} ({track_type}:{sub_index}) remover disposição={disposition}",
"change stream #{index} ({track_type}:{sub_index}) remove key={key} value={value}": "alterar fluxo #{index} ({track_type}:{sub_index}) remover chave={key} valor={value}",
"clean_effects": "apenas efeitos",
"comment": "comentário",
"default": "padrão",
"dependent": "dependente",
"descriptions": "descrições",
"dub": "dublado",
"for pattern": "para o padrão",
"forced": "forçado",
"from": "de",
"from pattern": "do padrão",
"from show": "da série",
"hearing_impaired": "deficiência auditiva",
"karaoke": "karaokê",
"lyrics": "letra",
"metadata": "metadados",
"non_diegetic": "não diegético",
"original": "original",
"pattern #{id}": "padrão #{id}",
"remove media tag: key='{key}' value='{value}'": "remover tag de mídia: chave='{key}' valor='{value}'",
"remove stream #{index}": "remover fluxo #{index}",
"show #{id}": "série #{id}",
"stereo": "estéreo",
"still_image": "imagem estática",
"sub index": "subíndice",
"subtitle": "legenda",
"timed_thumbnails": "miniaturas temporizadas",
"undefined": "indefinido",
"unknown": "desconhecido",
"video": "vídeo",
"visual_impaired": "deficiência visual"
}
}

361
assets/i18n/ta.json Normal file
View File

@@ -0,0 +1,361 @@
{
"iso_languages": {
"ABKHAZIAN": "அப்காசியன்",
"AFAR": "அஃபர்",
"AFRIKAANS": "ஆப்ரிக்கான்ச்",
"AKAN": "அகான்",
"ALBANIAN": "அல்பேனியன்",
"AMHARIC": "அம்ஆரிக்",
"ARABIC": "அராபிக்",
"ARAGONESE": "அரகோன்ச்",
"ARMENIAN": "அர்மேனியன்",
"ASSAMESE": "அச்சாமி",
"AVARIC": "அவாரிக்",
"AVESTAN": "அவேச்டன்",
"AYMARA": "அய்மாரா",
"AZERBAIJANI": "அசெர்பெய்சானி",
"BAMBARA": "பம்பரா",
"BASHKIR": "பாச்கிர்",
"BASQUE": "பாச்க்",
"BELARUSIAN": "பெலாருசியன்",
"BENGALI": "பெங்காலி",
"BISLAMA": "பிச்லாமா",
"BOKMAL": "Bokmål",
"BOSNIAN": "போச்னியன்",
"BRETON": "ப்ரெடன்",
"BULGARIAN": "பல்கேரியன்",
"BURMESE": "பர்மீசி",
"CATALAN": "Catalan",
"CHAMORRO": "சாமோர்ரோ",
"CHECHEN": "செக்சன்",
"CHICHEWA": "Chichewa",
"CHINESE": "சைனீச்",
"CHURCH_SLAVIC": "Church Slavic",
"CHUVASH": "சுவாச்",
"CORNISH": "கோர்னிச்",
"CORSICAN": "கோர்சிகேன்",
"CREE": "சிரீ",
"CROATIAN": "குரேசியன்",
"CZECH": "செக்",
"DANISH": "டானிச்",
"DIVEHI": "Divehi",
"DUTCH": "Dutch",
"DZONGKHA": "ட்சொங்க்கா",
"ENGLISH": "ஆங்கிலம்",
"ESPERANTO": "எச்பெரான்டொ",
"ESTONIAN": "எச்டோனியன்",
"EWE": "இவ்",
"FAROESE": "ஃபரோச்",
"FIJIAN": "ஃபிசியன்",
"FILIPINO": "Filipino",
"FINNISH": "பின்னிச்",
"FRENCH": "பிரெஞ்சு",
"FULAH": "ஃபுல்லா",
"GALICIAN": "காலிசியன்",
"GANDA": "கான்டா",
"GEORGIAN": "சியார்சியன்",
"GERMAN": "செர்மன்",
"GREEK": "Greek",
"GUARANI": "குர்ரானி",
"GUJARATI": "குசராத்தி",
"HAITIAN": "Haitian",
"HAUSA": "ஔசா",
"HEBREW": "ஈப்ரு",
"HERERO": "இரீரோ",
"HINDI": "இந்தி",
"HIRI_MOTU": "இரி மோட்டு",
"HUNGARIAN": "அங்கேரியன்",
"ICELANDIC": "ஐச்லாண்டிக்",
"IDO": "ஐடூ",
"IGBO": "இக்போ",
"INDONESIAN": "இந்தோனேசியன்",
"INTERLINGUA": "Interlingua",
"INTERLINGUE": "Interlingue",
"INUKTITUT": "இனுடிடட்",
"INUPIAQ": "இனுபைக்யூ",
"IRISH": "ஐரிச்",
"ITALIAN": "இத்தாலியன்",
"JAPANESE": "சப்பானிய",
"JAVANESE": "சவானிச்",
"KALAALLISUT": "Kalaallisut",
"KANNADA": "கன்னடம்",
"KANURI": "கனுரி",
"KASHMIRI": "காச்மீரி",
"KAZAKH": "கசாக்ச்",
"KHMER": "Khmer",
"KIKUYU": "Kikuyu",
"KINYARWANDA": "கின்யார்வான்டா",
"KIRGHIZ": "Kirghiz",
"KOMI": "கோமி",
"KONGO": "காங்கோ",
"KOREAN": "கொரியன்",
"KUANYAMA": "Kuanyama",
"KURDISH": "குர்திச்",
"LAO": "லாவோ",
"LATIN": "லத்தீன்",
"LATVIAN": "லாட்வியன்",
"LIMBURGAN": "Limburgan",
"LINGALA": "லின்காலா",
"LITHUANIAN": "லிதுவேனியன்",
"LUBA_KATANGA": "லூபா-கடான்கா",
"LUXEMBOURGISH": "Luxembourgish",
"MACEDONIAN": "மேசடோனியன்",
"MALAGASY": "மலகாசி",
"MALAY": "மலாய்",
"MALAYALAM": "மலையாளம்",
"MALTESE": "மல்டீச்",
"MANX": "மான்ச்",
"MAORI": "மௌரி",
"MARATHI": "மராத்தி",
"MARSHALLESE": "மார்சலீசீ",
"MONGOLIAN": "மங்கோலியன்",
"NAURU": "நவூரு",
"NAVAJO": "Navajo",
"NDONGA": "நடோன்கா",
"NEPALI": "நேபாலி",
"NORTHERN_SAMI": "கிழக்கு சாமி",
"NORTH_NDEBELE": "North Ndebele",
"NORWEGIAN": "நார்வேசியன்",
"NORWEGIAN_NYNORSK": "Nynorsk",
"OCCITAN": "Occitan",
"OJIBWA": "ஒசிப்வா",
"ORIYA": "ஒரியா",
"OROMO": "ஒரோமோ",
"OSSETIAN": "Ossetian",
"PALI": "பாலி",
"PANJABI": "Panjabi",
"PERSIAN": "பெர்சியன்",
"POLISH": "போலிச்",
"PORTUGUESE": "போர்த்துக்கீசிய",
"PUSHTO": "Pushto",
"QUECHUA": "க்யுசோ",
"ROMANIAN": "Romanian",
"ROMANSH": "ரோமான்ச்ச்",
"RUNDI": "ருண்டி",
"RUSSIAN": "ரச்யன்",
"SAMOAN": "சாமோயன்",
"SANGO": "சான்ங்கோ",
"SANSKRIT": "சான்ச்கிரிட்",
"SARDINIAN": "சார்டினியன்",
"SCOTTISH_GAELIC": "Scottish Gaelic",
"SERBIAN": "செர்பியன்",
"SHONA": "சோனா",
"SICHUAN_YI": "Sichuan Yi",
"SINDHI": "சிந்தி",
"SINHALA": "Sinhala",
"SLOVAK": "சுலோவாக்",
"SLOVENIAN": "ச்லோவெனியன்",
"SOMALI": "சோமாலி",
"SOUTHERN_SOTHO": "Southern Sotho",
"SOUTH_NDEBELE": "South Ndebele",
"SPANISH": "Spanish",
"SUNDANESE": "சூடானீச்",
"SWAHILI": "ச்வாஇலி",
"SWATI": "ச்வாதி",
"SWEDISH": "சுவீடிச்",
"TAGALOG": "டங்லாக்",
"TAHITIAN": "தஇதியன்",
"TAJIK": "தாசிக்",
"TAMIL": "தமிழ்",
"TATAR": "டாட்டர்",
"TELUGU": "தெலுங்கு",
"THAI": "தாய்",
"TIBETAN": "திபெத்திய",
"TIGRINYA": "தைக்ரின்யா",
"TONGA": "Tonga",
"TSONGA": "ட்சாங்கோ",
"TSWANA": "ட்ச்வனா",
"TURKISH": "துருக்கி",
"TURKMEN": "டர்க்மென்",
"TWI": "டிவி",
"UIGHUR": "Uighur",
"UKRAINIAN": "உக்ரெனியன்",
"UNDEFINED": "undefined",
"URDU": "உருது",
"UZBEK": "உச்பெக்",
"VENDA": "வேண்டா",
"VIETNAMESE": "வியட்னாம்",
"VOLAPUK": "வோலாபுக்",
"WALLOON": "வாலூன்",
"WELSH": "வெல்ச்",
"WESTERN_FRISIAN": "மேற்கு ஃபிரிசியன்",
"WOLOF": "ஓலோஃப்",
"XHOSA": "சோசா",
"YIDDISH": "இட்டிச்",
"YORUBA": "யோருபா",
"ZHUANG": "Zhuang",
"ZULU": "சுலு"
},
"phrases": {
"5.0(side)": "5.0(side)",
"5.1(side)": "5.1(side)",
"6.1": "6.1",
"6ch": "6ch",
"7.1": "7.1",
"<New show>": "<புதிய தொடர்>",
"Add": "சேர்",
"Add Pattern": "வடிவத்தை சேர்",
"Apply": "பயன்படுத்து",
"Apply failed: {error}": "பயன்படுத்தல் தோல்வியடைந்தது: {error}",
"Are you sure to delete the following filename pattern?": "பின்வரும் கோப்பு பெயர் வடிவத்தை நீக்க விரும்புகிறீர்களா?",
"Are you sure to delete the following shifted season?": "பின்வரும் மாற்றிய சீசனை நீக்க விரும்புகிறீர்களா?",
"Are you sure to delete the following show?": "பின்வரும் தொடரை நீக்க விரும்புகிறீர்களா?",
"Are you sure to delete the following {track_type} track?": "பின்வரும் {track_type} ஸ்ட்ரீமை நீக்க விரும்புகிறீர்களா?",
"Are you sure to delete this tag?": "இந்த குறிச்சொல்லை நீக்க விரும்புகிறீர்களா?",
"Audio Layout": "ஒலி அமைப்பு",
"Back": "பின்",
"Cancel": "ரத்து",
"Cannot add another stream with disposition flag 'default' or 'forced' set": "'default' அல்லது 'forced' disposition கொடி அமைந்த மற்றொரு ஸ்ட்ரீமை சேர்க்க முடியாது",
"Changes applied and file reloaded.": "மாற்றங்கள் பயன்படுத்தப்பட்டு கோப்பு மீளேற்றப்பட்டது.",
"Cleanup": "சுத்திகரிப்பு",
"Cleanup disabled.": "சுத்திகரிப்பு முடக்கப்பட்டது.",
"Cleanup enabled.": "சுத்திகரிப்பு இயக்கப்பட்டது.",
"Codec": "கோடெக்",
"Continuing edit session.": "திருத்த அமர்வு தொடர்கிறது.",
"Default": "இயல்புநிலை",
"Delete": "நீக்கு",
"Delete Show": "தொடரை நீக்கு",
"Deleted media tag {tag!r}.": "மீடியா குறிச்சொல் {tag!r} நீக்கப்பட்டது.",
"Differences": "வேறுபாடுகள்",
"Differences (file->db/output)": "வேறுபாடுகள் (கோப்பு->DB/வெளியீடு)",
"Discard": "கைவிடு",
"Discard pending metadata changes and quit?": "நிலுவையில் உள்ள மெட்டாடேட்டா மாற்றங்களை கைவிட்டு வெளியேறவா?",
"Discard pending metadata changes and reload the file state?": "நிலுவையில் உள்ள மெட்டாடேட்டா மாற்றங்களை கைவிட்டு கோப்பு நிலையை மீளேற்றவா?",
"Down": "கீழ்",
"Dry-run: would rewrite via temporary file {target_path}": "Dry-run: தற்காலிக கோப்பு {target_path} வழியாக மறுஎழுதப்படும்",
"Edit": "திருத்து",
"Edit Pattern": "வடிவத்தை திருத்து",
"Edit Show": "தொடரை திருத்து",
"Edit filename pattern": "கோப்பு பெயர் வடிவத்தை திருத்து",
"Edit shifted season": "மாற்றிய சீசனை திருத்து",
"Edit stream": "ஸ்ட்ரீமை திருத்து",
"Episode Offset": "அத்தியாய இடச்சரிவு",
"Episode offset": "அத்தியாய இடச்சரிவு",
"File": "கோப்பு",
"File patterns": "கோப்பு வடிவங்கள்",
"First Episode": "முதல் அத்தியாயம்",
"First episode": "முதல் அத்தியாயம்",
"Forced": "கட்டாயம்",
"Help": "உதவி",
"Help Screen": "உதவி திரை",
"ID": "அடையாளம்",
"Identify": "அடையாளம் காட்டு",
"Index": "சுட்டி",
"Index / Subindex": "சுட்டி / துணைச்சுட்டி",
"Index Episode Digits": "அத்தியாய சுட்டி இலக்கங்கள்",
"Index Season Digits": "சீசன் சுட்டி இலக்கங்கள்",
"Indicator Edisode Digits": "அத்தியாய குறியீட்டு இலக்கங்கள்",
"Indicator Season Digits": "சீசன் குறியீட்டு இலக்கங்கள்",
"Keep Editing": "திருத்தலை தொடரு",
"Keeping pending changes.": "நிலுவையில் உள்ள மாற்றங்கள் வைக்கப்படுகின்றன.",
"Key": "சாவி",
"Language": "மொழி",
"Last Episode": "கடைசி அத்தியாயம்",
"Last episode": "கடைசி அத்தியாயம்",
"Layout": "அமைப்பு",
"Media Tags": "மீடியா குறிச்சொற்கள்",
"More than one default audio stream detected and no prompt set": "ஒருக்கும் மேற்பட்ட இயல்புநிலை ஒலி ஸ்ட்ரீம்கள் கண்டறியப்பட்டன, மேலும் எந்த prompt-வும் அமைக்கப்படவில்லை",
"More than one default audio stream detected! Please select stream": "ஒருக்கும் மேற்பட்ட இயல்புநிலை ஒலி ஸ்ட்ரீம்கள் கண்டறியப்பட்டன! ஸ்ட்ரீமைத் தேர்ந்தெடுக்கவும்",
"More than one default subtitle stream detected and no prompt set": "ஒருக்கும் மேற்பட்ட இயல்புநிலை வசன ஸ்ட்ரீம்கள் கண்டறியப்பட்டன, மேலும் எந்த prompt-வும் அமைக்கப்படவில்லை",
"More than one default subtitle stream detected! Please select stream": "ஒருக்கும் மேற்பட்ட இயல்புநிலை வசன ஸ்ட்ரீம்கள் கண்டறியப்பட்டன! ஸ்ட்ரீமைத் தேர்ந்தெடுக்கவும்",
"More than one default video stream detected and no prompt set": "ஒருக்கும் மேற்பட்ட இயல்புநிலை வீடியோ ஸ்ட்ரீம்கள் கண்டறியப்பட்டன, மேலும் எந்த prompt-வும் அமைக்கப்படவில்லை",
"More than one default video stream detected! Please select stream": "ஒருக்கும் மேற்பட்ட இயல்புநிலை வீடியோ ஸ்ட்ரீம்கள் கண்டறியப்பட்டன! ஸ்ட்ரீமைத் தேர்ந்தெடுக்கவும்",
"More than one forced audio stream detected and no prompt set": "ஒருக்கும் மேற்பட்ட கட்டாய ஒலி ஸ்ட்ரீம்கள் கண்டறியப்பட்டன, மேலும் எந்த prompt-வும் அமைக்கப்படவில்லை",
"More than one forced audio stream detected! Please select stream": "ஒருக்கும் மேற்பட்ட கட்டாய ஒலி ஸ்ட்ரீம்கள் கண்டறியப்பட்டன! ஸ்ட்ரீமைத் தேர்ந்தெடுக்கவும்",
"More than one forced subtitle stream detected and no prompt set": "ஒருக்கும் மேற்பட்ட கட்டாய வசன ஸ்ட்ரீம்கள் கண்டறியப்பட்டன, மேலும் எந்த prompt-வும் அமைக்கப்படவில்லை",
"More than one forced subtitle stream detected! Please select stream": "ஒருக்கும் மேற்பட்ட கட்டாய வசன ஸ்ட்ரீம்கள் கண்டறியப்பட்டன! ஸ்ட்ரீமைத் தேர்ந்தெடுக்கவும்",
"More than one forced video stream detected and no prompt set": "ஒருக்கும் மேற்பட்ட கட்டாய வீடியோ ஸ்ட்ரீம்கள் கண்டறியப்பட்டன, மேலும் எந்த prompt-வும் அமைக்கப்படவில்லை",
"More than one forced video stream detected! Please select stream": "ஒருக்கும் மேற்பட்ட கட்டாய வீடியோ ஸ்ட்ரீம்கள் கண்டறியப்பட்டன! ஸ்ட்ரீமைத் தேர்ந்தெடுக்கவும்",
"Name": "பெயர்",
"New Pattern": "புதிய வடிவம்",
"New Show": "புதிய தொடர்",
"New filename pattern": "புதிய கோப்பு பெயர் வடிவம்",
"New shifted season": "புதிய மாற்றிய சீசன்",
"New stream": "புதிய ஸ்ட்ரீம்",
"No": "இல்லை",
"No changes to apply.": "பயன்படுத்த மாற்றங்கள் இல்லை.",
"No changes to revert.": "மீட்டெடுக்க மாற்றங்கள் இல்லை.",
"Normalization disabled.": "சீரமைப்பு முடக்கப்பட்டது.",
"Normalization enabled.": "சீரமைப்பு இயக்கப்பட்டது.",
"Normalize": "சீரமை",
"Notes": "குறிப்புகள்",
"Pattern": "வடிவம்",
"Planned Changes (file->edited output)": "திட்டமிட்ட மாற்றங்கள் (கோப்பு->திருத்திய வெளியீடு)",
"Quality": "தரம்",
"Quit": "வெளியேறு",
"Remove Pattern": "வடிவத்தை நீக்கு",
"Revert": "மீட்டு",
"Reverted pending changes.": "நிலுவையில் உள்ள மாற்றங்கள் மீட்டெடுக்கப்பட்டன.",
"Save": "சேமி",
"Season Offset": "சீசன் இடச்சரிவு",
"Select a stream first.": "முதலில் ஒரு ஸ்ட்ரீமைத் தேர்ந்தெடுக்கவும்.",
"Set Default": "இயல்புநிலையாக அமை",
"Set Forced": "கட்டாயமாக அமை",
"Settings Screen": "அமைப்புகள் திரை",
"Numbering Mapping": "மாற்றிய சீசன்கள்",
"Show": "தொடர்",
"Shows": "தொடர்கள்",
"Source Season": "மூல சீசன்",
"SrcIndex": "மூலச் சுட்டி",
"Status": "நிலை",
"Stay": "இரு",
"Stream dispositions": "ஸ்ட்ரீம் disposition-கள்",
"Stream tags": "ஸ்ட்ரீம் குறிச்சொற்கள்",
"Streams": "ஸ்ட்ரீம்கள்",
"SubIndex": "துணைச்சுட்டி",
"Substitute": "மாற்று",
"Substitute pattern": "வடிவத்தை மாற்று",
"Title": "தலைப்பு",
"Type": "வகை",
"Unable to update selected stream.": "தேர்ந்தெடுக்கப்பட்ட ஸ்ட்ரீமைப் புதுப்பிக்க முடியவில்லை.",
"Up": "மேல்",
"Update Pattern": "வடிவத்தை புதுப்பி",
"Updated media tag {tag!r}.": "மீடியா குறிச்சொல் {tag!r} புதுப்பிக்கப்பட்டது.",
"Updated stream #{index} ({track_type}).": "ஸ்ட்ரீம் #{index} ({track_type}) புதுப்பிக்கப்பட்டது.",
"Value": "மதிப்பு",
"Year": "ஆண்டு",
"Yes": "ஆம்",
"add media tag: key='{key}' value='{value}'": "மீடியா குறிச்சொல் சேர்: key='{key}' value='{value}'",
"add {track_type} track: index={index} lang={language}": "{track_type} ஸ்ட்ரீம் சேர்: index={index} lang={language}",
"attached_pic": "attached_pic",
"attachment": "இணைப்பு",
"audio": "ஒலி",
"captions": "உரைப்பதிவுகள்",
"change media tag: key='{key}' value='{value}'": "மீடியா குறிச்சொல் மாற்று: key='{key}' value='{value}'",
"change stream #{index} ({track_type}:{sub_index}) add disposition={disposition}": "ஸ்ட்ரீம் #{index} ({track_type}:{sub_index}) disposition சேர்={disposition}",
"change stream #{index} ({track_type}:{sub_index}) add key={key} value={value}": "ஸ்ட்ரீம் #{index} ({track_type}:{sub_index}) key சேர்={key} value={value}",
"change stream #{index} ({track_type}:{sub_index}) change key={key} value={value}": "ஸ்ட்ரீம் #{index} ({track_type}:{sub_index}) key மாற்று={key} value={value}",
"change stream #{index} ({track_type}:{sub_index}) remove disposition={disposition}": "ஸ்ட்ரீம் #{index} ({track_type}:{sub_index}) disposition நீக்கு={disposition}",
"change stream #{index} ({track_type}:{sub_index}) remove key={key} value={value}": "ஸ்ட்ரீம் #{index} ({track_type}:{sub_index}) key நீக்கு={key} value={value}",
"clean_effects": "ஒலி விளைவுகள் மட்டும்",
"comment": "கருத்துரை",
"default": "இயல்புநிலை",
"dependent": "சார்ந்த",
"descriptions": "விளக்கங்கள்",
"dub": "டப்",
"for pattern": "வடிவத்திற்கு",
"forced": "கட்டாயம்",
"from": "இருந்து",
"from pattern": "வடிவத்திலிருந்து",
"from show": "தொடரிலிருந்து",
"hearing_impaired": "கேள்வித்திறன் குறைபாடு",
"karaoke": "கரோக்கே",
"lyrics": "பாடல்வரிகள்",
"metadata": "மெட்டாடேட்டா",
"non_diegetic": "அல்லாத-டைஜெடிக்",
"original": "மூலம்",
"pattern #{id}": "வடிவு #{id}",
"remove media tag: key='{key}' value='{value}'": "மீடியா குறிச்சொல் நீக்கு: key='{key}' value='{value}'",
"remove stream #{index}": "ஸ்ட்ரீம் #{index} நீக்கு",
"show #{id}": "தொடர் #{id}",
"stereo": "ஸ்டீரியோ",
"still_image": "நிலைப்படம்",
"sub index": "துணைச்சுட்டி",
"subtitle": "வசனம்",
"timed_thumbnails": "நேர நிர்ணய சிறுபடங்கள்",
"undefined": "வரையறுக்கப்படாத",
"unknown": "தெரியாத",
"video": "வீடியோ",
"visual_impaired": "பார்வைத்திறன் குறைபாடு"
}
}

170
docs/file_formats.md Normal file
View File

@@ -0,0 +1,170 @@
# File Formats
This document captures source-file-format notes that complement the normative
requirements in `requirements/source_file_formats.md`.
The first documented format is a Matroska source that carries styled ASS/SSA
subtitle streams together with embedded font attachments.
## Styled ASS In Matroska With Embedded Fonts
These files are typically `.mkv` releases where subtitle rendering quality
depends on keeping both parts of the subtitle package together:
- one or more subtitle streams with codec `ass`
- one or more attachment streams that embed font files used by those subtitles
This matters because ASS subtitles are not plain text subtitles in the narrow
WebVTT sense. They can carry layout, styling, positioning, karaoke, signs, and
other typesetting effects. If the matching embedded fonts are lost, consumers
can still see subtitle text but the intended styling and sometimes glyph
coverage can be degraded.
For FFX this format is special because the ASS subtitle streams should remain
normally editable and mappable, while the related font attachments should be
transported unchanged.
## Observed Sample
Assessment date: `2026-04-17`
Observed sample file:
- `tests/assets/boruto_s01e283_ssa.mkv`
Commands used for assessment:
```bash
ffprobe tests/assets/boruto_s01e283_ssa.mkv
ffprobe -hide_banner -show_format -show_streams -of json tests/assets/boruto_s01e283_ssa.mkv
```
Observed stream layout:
| Stream index | Kind | Key details |
| --- | --- | --- |
| `0` | video | `codec_name=h264` |
| `1` | audio | `codec_name=aac`, `language=jpn` |
| `2` | subtitle | `codec_name=ass`, `language=ger`, default |
| `3` | subtitle | `codec_name=ass`, `language=eng` |
| `4`-`13` | attachment | `tags.mimetype=font/ttf`, `.ttf` filenames |
Observed attachment filenames:
- `AmazonEmberTanuki-Italic.ttf`
- `AmazonEmberTanuki-Regular.ttf`
- `Arial.ttf`
- `Arial Bold.ttf`
- `Georgia.ttf`
- `Times New Roman.ttf`
- `Times New Roman Bold.ttf`
- `Trebuchet MS.ttf`
- `Verdana.ttf`
- `Verdana Bold.ttf`
Important probe behavior from the real sample:
- Plain `ffprobe` lists the font streams as `Attachment: none`.
- Plain `ffprobe` also prints warnings such as `Could not find codec
parameters for stream 4 (Attachment: none): unknown codec` and later
`Unsupported codec with id 0 for input stream ...`.
- The JSON produced by `FileProperties.FFPROBE_COMMAND_TOKENS`
(`ffprobe -hide_banner -show_format -show_streams -of json`) still exposes
the attachment streams clearly through `codec_type="attachment"` and the
attachment tags.
- In that JSON, the attachment streams do not expose `codec_name`.
This last point is important for FFX: robust detection must not depend on
attachment `codec_name` being present.
## Detection Guidance
Current known indicators for this format are:
- one or more subtitle streams with `codec_type="subtitle"` and
`codec_name="ass"`
- one or more attachment streams with `codec_type="attachment"`
- attachment tags that identify embedded fonts, especially
`tags.mimetype="font/ttf"`
- attachment filenames that end in `.ttf`
The pattern can vary. FFX should therefore treat the above as a cluster of
signals rather than an exact signature tied to one file.
Inference from the observed sample plus FFmpeg documentation:
- MIME matching should not be limited to `font/ttf` alone.
- The Boruto sample uses `font/ttf`.
- FFmpeg's Matroska attachment example uses
`mimetype=application/x-truetype-font` for a `.ttf` attachment.
- Detection should therefore normalize multiple TTF-like MIME values rather
than depend on a single exact string.
## Processing Expectations In FFX
The format-specific requirements live in
`requirements/source_file_formats.md`. In practical terms, FFX should:
- recognize the ASS-plus-font-attachment pattern even when attachment probe
data is incomplete
- tell the operator that the pattern was detected and that special handling is
being used
- reject sidecar subtitle import for such sources, because converting or
replacing these subtitle tracks with ordinary external text subtitles would
break the intended subtitle package
- continue to allow normal manipulation of the ASS subtitle tracks themselves
- preserve the font attachment streams unchanged
## FFmpeg Notes
Relevant FFmpeg documentation confirms several behaviors that line up with
FFX's needs:
- FFmpeg documents `-attach` as adding an attachment stream to the output, and
explicitly names Matroska fonts used in subtitle rendering as an example.
- FFmpeg documents attachment streams as regular streams that are created after
the mapped media streams.
- FFmpeg documents `-dump_attachment` for extracting attachment streams, which
is useful for debugging or validating a source file's embedded fonts.
- FFmpeg's Matroska example requires a `mimetype` metadata tag for attached
fonts, which is consistent with using attachment tags as detection signals.
- FFmpeg also notes that attachments are implemented as codec extradata. That
helps explain why probe output for attachment streams can look different from
ordinary audio, video, and subtitle streams.
Implication for FFX:
- Attachment preservation is not an optional cosmetic feature for this format.
It is part of preserving the subtitle package correctly.
## Jellyfin Notes
Jellyfin's documentation also supports keeping this format intact:
- Jellyfin's subtitle compatibility table lists `ASS/SSA` as supported in
`MKV` and not supported in `MP4`.
- Jellyfin notes that when subtitles must be transcoded, they are either
converted to a supported format or burned into the video, and burning them in
is the most CPU-intensive path.
- Jellyfin's subtitle-extraction example for `SSA/ASS` first dumps attachment
streams and then extracts the ASS subtitle stream, which reflects the real
relationship between ASS subtitles and embedded fonts in MKV releases.
- Jellyfin's font documentation says text-based subtitles require fonts to
render properly.
- Jellyfin's configuration documentation says the web client uses configured
fallback fonts for ASS subtitles when other fonts such as MKV attachments or
client-side fonts are not available.
Inference from the Jellyfin compatibility tables:
- Keeping this subtitle format in Matroska is the safest interoperability
choice for Jellyfin consumers.
- Converting the subtitle payload to WebVTT would lose styled ASS behavior.
- Dropping the attachment streams would force client or fallback font
substitution and can change appearance or glyph coverage.
## References
- FFmpeg documentation: https://ffmpeg.org/ffmpeg.html
- Jellyfin codec support: https://jellyfin.org/docs/general/clients/codec-support/
- Jellyfin configuration and fonts: https://jellyfin.org/docs/general/administration/configuration/

View File

@@ -1,13 +1,13 @@
[project]
name = "ffx"
description = "FFX recoding and metadata managing tool"
version = "0.2.3"
version = "0.4.1"
license = {file = "LICENSE.md"}
dependencies = [
"requests",
"jinja2",
"click",
"textual",
"textual>=8.0",
"sqlalchemy",
]
readme = {file = "README.md", content-type = "text/markdown"}
@@ -27,6 +27,11 @@ Homepage = "https://gitea.maveno.de/Javanaut/ffx"
Repository = "https://gitea.maveno.de/Javanaut/ffx.git"
Issues = "https://gitea.maveno.de/Javanaut/ffx/issues"
[project.optional-dependencies]
test = [
"pytest",
]
[build-system]
requires = [
"setuptools",
@@ -35,4 +40,15 @@ requires = [
build-backend = "setuptools.build_meta"
[project.scripts]
ffx = "ffx.ffx:ffx"
ffx = "ffx.cli:ffx"
[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py"]
norecursedirs = ["tests/legacy", "tests/support"]
addopts = "-ra"
markers = [
"integration: exercises the FFX bundle with real ffmpeg/ffprobe processes",
"pattern_management: covers requirements/pattern_management.md",
"subtrack_mapping: covers requirements/subtrack_mapping.md",
]

9
src/ffx/__main__.py Normal file
View File

@@ -0,0 +1,9 @@
from .cli import ffx
def main():
ffx()
if __name__ == "__main__":
main()

220
src/ffx/_iso_language.py Normal file
View File

@@ -0,0 +1,220 @@
from enum import Enum
import difflib
class IsoLanguage(Enum):
ABKHAZIAN = {"name": "Abkhazian", "iso639_1": "ab", "iso639_2": ["abk"]}
AFAR = {"name": "Afar", "iso639_1": "aa", "iso639_2": ["aar"]}
AFRIKAANS = {"name": "Afrikaans", "iso639_1": "af", "iso639_2": ["afr"]}
AKAN = {"name": "Akan", "iso639_1": "ak", "iso639_2": ["aka"]}
ALBANIAN = {"name": "Albanian", "iso639_1": "sq", "iso639_2": ["sqi", "alb"]}
AMHARIC = {"name": "Amharic", "iso639_1": "am", "iso639_2": ["amh"]}
ARABIC = {"name": "Arabic", "iso639_1": "ar", "iso639_2": ["ara"]}
ARAGONESE = {"name": "Aragonese", "iso639_1": "an", "iso639_2": ["arg"]}
ARMENIAN = {"name": "Armenian", "iso639_1": "hy", "iso639_2": ["hye", "arm"]}
ASSAMESE = {"name": "Assamese", "iso639_1": "as", "iso639_2": ["asm"]}
AVARIC = {"name": "Avaric", "iso639_1": "av", "iso639_2": ["ava"]}
AVESTAN = {"name": "Avestan", "iso639_1": "ae", "iso639_2": ["ave"]}
AYMARA = {"name": "Aymara", "iso639_1": "ay", "iso639_2": ["aym"]}
AZERBAIJANI = {"name": "Azerbaijani", "iso639_1": "az", "iso639_2": ["aze"]}
BAMBARA = {"name": "Bambara", "iso639_1": "bm", "iso639_2": ["bam"]}
BASHKIR = {"name": "Bashkir", "iso639_1": "ba", "iso639_2": ["bak"]}
BASQUE = {"name": "Basque", "iso639_1": "eu", "iso639_2": ["eus", "baq"]}
BELARUSIAN = {"name": "Belarusian", "iso639_1": "be", "iso639_2": ["bel"]}
BENGALI = {"name": "Bengali", "iso639_1": "bn", "iso639_2": ["ben"]}
BISLAMA = {"name": "Bislama", "iso639_1": "bi", "iso639_2": ["bis"]}
BOKMAL = {"name": "Bokmål", "iso639_1": "nb", "iso639_2": ["nob"]}
BOSNIAN = {"name": "Bosnian", "iso639_1": "bs", "iso639_2": ["bos"]}
BRETON = {"name": "Breton", "iso639_1": "br", "iso639_2": ["bre"]}
BULGARIAN = {"name": "Bulgarian", "iso639_1": "bg", "iso639_2": ["bul"]}
BURMESE = {"name": "Burmese", "iso639_1": "my", "iso639_2": ["mya", "bur"]}
CATALAN = {"name": "Catalan", "iso639_1": "ca", "iso639_2": ["cat"]}
CHAMORRO = {"name": "Chamorro", "iso639_1": "ch", "iso639_2": ["cha"]}
CHECHEN = {"name": "Chechen", "iso639_1": "ce", "iso639_2": ["che"]}
CHICHEWA = {"name": "Chichewa", "iso639_1": "ny", "iso639_2": ["nya"]}
CHINESE = {"name": "Chinese", "iso639_1": "zh", "iso639_2": ["zho", "chi"]}
CHURCH_SLAVIC = {"name": "Church Slavic", "iso639_1": "cu", "iso639_2": ["chu"]}
CHUVASH = {"name": "Chuvash", "iso639_1": "cv", "iso639_2": ["chv"]}
CORNISH = {"name": "Cornish", "iso639_1": "kw", "iso639_2": ["cor"]}
CORSICAN = {"name": "Corsican", "iso639_1": "co", "iso639_2": ["cos"]}
CREE = {"name": "Cree", "iso639_1": "cr", "iso639_2": ["cre"]}
CROATIAN = {"name": "Croatian", "iso639_1": "hr", "iso639_2": ["hrv"]}
CZECH = {"name": "Czech", "iso639_1": "cs", "iso639_2": ["ces", "cze"]}
DANISH = {"name": "Danish", "iso639_1": "da", "iso639_2": ["dan"]}
DIVEHI = {"name": "Divehi", "iso639_1": "dv", "iso639_2": ["div"]}
DUTCH = {"name": "Dutch", "iso639_1": "nl", "iso639_2": ["nld", "dut"]}
DZONGKHA = {"name": "Dzongkha", "iso639_1": "dz", "iso639_2": ["dzo"]}
ENGLISH = {"name": "English", "iso639_1": "en", "iso639_2": ["eng"]}
ESPERANTO = {"name": "Esperanto", "iso639_1": "eo", "iso639_2": ["epo"]}
ESTONIAN = {"name": "Estonian", "iso639_1": "et", "iso639_2": ["est"]}
EWE = {"name": "Ewe", "iso639_1": "ee", "iso639_2": ["ewe"]}
FAROESE = {"name": "Faroese", "iso639_1": "fo", "iso639_2": ["fao"]}
FIJIAN = {"name": "Fijian", "iso639_1": "fj", "iso639_2": ["fij"]}
FINNISH = {"name": "Finnish", "iso639_1": "fi", "iso639_2": ["fin"]}
FRENCH = {"name": "French", "iso639_1": "fr", "iso639_2": ["fra", "fre"]}
FULAH = {"name": "Fulah", "iso639_1": "ff", "iso639_2": ["ful"]}
GALICIAN = {"name": "Galician", "iso639_1": "gl", "iso639_2": ["glg"]}
GANDA = {"name": "Ganda", "iso639_1": "lg", "iso639_2": ["lug"]}
GEORGIAN = {"name": "Georgian", "iso639_1": "ka", "iso639_2": ["kat", "geo"]}
GERMAN = {"name": "German", "iso639_1": "de", "iso639_2": ["deu", "ger"]}
GREEK = {"name": "Greek", "iso639_1": "el", "iso639_2": ["ell", "gre"]}
GUARANI = {"name": "Guarani", "iso639_1": "gn", "iso639_2": ["grn"]}
GUJARATI = {"name": "Gujarati", "iso639_1": "gu", "iso639_2": ["guj"]}
HAITIAN = {"name": "Haitian", "iso639_1": "ht", "iso639_2": ["hat"]}
HAUSA = {"name": "Hausa", "iso639_1": "ha", "iso639_2": ["hau"]}
HEBREW = {"name": "Hebrew", "iso639_1": "he", "iso639_2": ["heb"]}
HERERO = {"name": "Herero", "iso639_1": "hz", "iso639_2": ["her"]}
HINDI = {"name": "Hindi", "iso639_1": "hi", "iso639_2": ["hin"]}
HIRI_MOTU = {"name": "Hiri Motu", "iso639_1": "ho", "iso639_2": ["hmo"]}
HUNGARIAN = {"name": "Hungarian", "iso639_1": "hu", "iso639_2": ["hun"]}
ICELANDIC = {"name": "Icelandic", "iso639_1": "is", "iso639_2": ["isl", "ice"]}
IDO = {"name": "Ido", "iso639_1": "io", "iso639_2": ["ido"]}
IGBO = {"name": "Igbo", "iso639_1": "ig", "iso639_2": ["ibo"]}
INDONESIAN = {"name": "Indonesian", "iso639_1": "id", "iso639_2": ["ind"]}
INTERLINGUA = {"name": "Interlingua", "iso639_1": "ia", "iso639_2": ["ina"]}
INTERLINGUE = {"name": "Interlingue", "iso639_1": "ie", "iso639_2": ["ile"]}
INUKTITUT = {"name": "Inuktitut", "iso639_1": "iu", "iso639_2": ["iku"]}
INUPIAQ = {"name": "Inupiaq", "iso639_1": "ik", "iso639_2": ["ipk"]}
IRISH = {"name": "Irish", "iso639_1": "ga", "iso639_2": ["gle"]}
ITALIAN = {"name": "Italian", "iso639_1": "it", "iso639_2": ["ita"]}
JAPANESE = {"name": "Japanese", "iso639_1": "ja", "iso639_2": ["jpn"]}
JAVANESE = {"name": "Javanese", "iso639_1": "jv", "iso639_2": ["jav"]}
KALAALLISUT = {"name": "Kalaallisut", "iso639_1": "kl", "iso639_2": ["kal"]}
KANNADA = {"name": "Kannada", "iso639_1": "kn", "iso639_2": ["kan"]}
KANURI = {"name": "Kanuri", "iso639_1": "kr", "iso639_2": ["kau"]}
KASHMIRI = {"name": "Kashmiri", "iso639_1": "ks", "iso639_2": ["kas"]}
KAZAKH = {"name": "Kazakh", "iso639_1": "kk", "iso639_2": ["kaz"]}
KHMER = {"name": "Khmer", "iso639_1": "km", "iso639_2": ["khm"]}
KIKUYU = {"name": "Kikuyu", "iso639_1": "ki", "iso639_2": ["kik"]}
KINYARWANDA = {"name": "Kinyarwanda", "iso639_1": "rw", "iso639_2": ["kin"]}
KIRGHIZ = {"name": "Kirghiz", "iso639_1": "ky", "iso639_2": ["kir"]}
KOMI = {"name": "Komi", "iso639_1": "kv", "iso639_2": ["kom"]}
KONGO = {"name": "Kongo", "iso639_1": "kg", "iso639_2": ["kon"]}
KOREAN = {"name": "Korean", "iso639_1": "ko", "iso639_2": ["kor"]}
KUANYAMA = {"name": "Kuanyama", "iso639_1": "kj", "iso639_2": ["kua"]}
KURDISH = {"name": "Kurdish", "iso639_1": "ku", "iso639_2": ["kur"]}
LAO = {"name": "Lao", "iso639_1": "lo", "iso639_2": ["lao"]}
LATIN = {"name": "Latin", "iso639_1": "la", "iso639_2": ["lat"]}
LATVIAN = {"name": "Latvian", "iso639_1": "lv", "iso639_2": ["lav"]}
LIMBURGAN = {"name": "Limburgan", "iso639_1": "li", "iso639_2": ["lim"]}
LINGALA = {"name": "Lingala", "iso639_1": "ln", "iso639_2": ["lin"]}
LITHUANIAN = {"name": "Lithuanian", "iso639_1": "lt", "iso639_2": ["lit"]}
LUBA_KATANGA = {"name": "Luba-Katanga", "iso639_1": "lu", "iso639_2": ["lub"]}
LUXEMBOURGISH = {"name": "Luxembourgish", "iso639_1": "lb", "iso639_2": ["ltz"]}
MACEDONIAN = {"name": "Macedonian", "iso639_1": "mk", "iso639_2": ["mkd", "mac"]}
MALAGASY = {"name": "Malagasy", "iso639_1": "mg", "iso639_2": ["mlg"]}
MALAY = {"name": "Malay", "iso639_1": "ms", "iso639_2": ["msa", "may"]}
MALAYALAM = {"name": "Malayalam", "iso639_1": "ml", "iso639_2": ["mal"]}
MALTESE = {"name": "Maltese", "iso639_1": "mt", "iso639_2": ["mlt"]}
MANX = {"name": "Manx", "iso639_1": "gv", "iso639_2": ["glv"]}
MAORI = {"name": "Maori", "iso639_1": "mi", "iso639_2": ["mri", "mao"]}
MARATHI = {"name": "Marathi", "iso639_1": "mr", "iso639_2": ["mar"]}
MARSHALLESE = {"name": "Marshallese", "iso639_1": "mh", "iso639_2": ["mah"]}
MONGOLIAN = {"name": "Mongolian", "iso639_1": "mn", "iso639_2": ["mon"]}
NAURU = {"name": "Nauru", "iso639_1": "na", "iso639_2": ["nau"]}
NAVAJO = {"name": "Navajo", "iso639_1": "nv", "iso639_2": ["nav"]}
NDONGA = {"name": "Ndonga", "iso639_1": "ng", "iso639_2": ["ndo"]}
NEPALI = {"name": "Nepali", "iso639_1": "ne", "iso639_2": ["nep"]}
NORTH_NDEBELE = {"name": "North Ndebele", "iso639_1": "nd", "iso639_2": ["nde"]}
NORTHERN_SAMI = {"name": "Northern Sami", "iso639_1": "se", "iso639_2": ["sme"]}
NORWEGIAN = {"name": "Norwegian", "iso639_1": "no", "iso639_2": ["nor"]}
NORWEGIAN_NYNORSK = {"name": "Nynorsk", "iso639_1": "nn", "iso639_2": ["nno"]}
OCCITAN = {"name": "Occitan", "iso639_1": "oc", "iso639_2": ["oci"]}
OJIBWA = {"name": "Ojibwa", "iso639_1": "oj", "iso639_2": ["oji"]}
ORIYA = {"name": "Oriya", "iso639_1": "or", "iso639_2": ["ori"]}
OROMO = {"name": "Oromo", "iso639_1": "om", "iso639_2": ["orm"]}
OSSETIAN = {"name": "Ossetian", "iso639_1": "os", "iso639_2": ["oss"]}
PALI = {"name": "Pali", "iso639_1": "pi", "iso639_2": ["pli"]}
PANJABI = {"name": "Panjabi", "iso639_1": "pa", "iso639_2": ["pan"]}
PERSIAN = {"name": "Persian", "iso639_1": "fa", "iso639_2": ["fas", "per"]}
POLISH = {"name": "Polish", "iso639_1": "pl", "iso639_2": ["pol"]}
PORTUGUESE = {"name": "Portuguese", "iso639_1": "pt", "iso639_2": ["por"]}
PUSHTO = {"name": "Pushto", "iso639_1": "ps", "iso639_2": ["pus"]}
QUECHUA = {"name": "Quechua", "iso639_1": "qu", "iso639_2": ["que"]}
ROMANIAN = {"name": "Romanian", "iso639_1": "ro", "iso639_2": ["ron", "rum"]}
ROMANSH = {"name": "Romansh", "iso639_1": "rm", "iso639_2": ["roh"]}
RUNDI = {"name": "Rundi", "iso639_1": "rn", "iso639_2": ["run"]}
RUSSIAN = {"name": "Russian", "iso639_1": "ru", "iso639_2": ["rus"]}
SAMOAN = {"name": "Samoan", "iso639_1": "sm", "iso639_2": ["smo"]}
SANGO = {"name": "Sango", "iso639_1": "sg", "iso639_2": ["sag"]}
SANSKRIT = {"name": "Sanskrit", "iso639_1": "sa", "iso639_2": ["san"]}
SARDINIAN = {"name": "Sardinian", "iso639_1": "sc", "iso639_2": ["srd"]}
SCOTTISH_GAELIC = {"name": "Scottish Gaelic", "iso639_1": "gd", "iso639_2": ["gla"]}
SERBIAN = {"name": "Serbian", "iso639_1": "sr", "iso639_2": ["srp"]}
SHONA = {"name": "Shona", "iso639_1": "sn", "iso639_2": ["sna"]}
SICHUAN_YI = {"name": "Sichuan Yi", "iso639_1": "ii", "iso639_2": ["iii"]}
SINDHI = {"name": "Sindhi", "iso639_1": "sd", "iso639_2": ["snd"]}
SINHALA = {"name": "Sinhala", "iso639_1": "si", "iso639_2": ["sin"]}
SLOVAK = {"name": "Slovak", "iso639_1": "sk", "iso639_2": ["slk", "slo"]}
SLOVENIAN = {"name": "Slovenian", "iso639_1": "sl", "iso639_2": ["slv"]}
SOMALI = {"name": "Somali", "iso639_1": "so", "iso639_2": ["som"]}
SOUTH_NDEBELE = {"name": "South Ndebele", "iso639_1": "nr", "iso639_2": ["nbl"]}
SOUTHERN_SOTHO = {"name": "Southern Sotho", "iso639_1": "st", "iso639_2": ["sot"]}
SPANISH = {"name": "Spanish", "iso639_1": "es", "iso639_2": ["spa"]}
SUNDANESE = {"name": "Sundanese", "iso639_1": "su", "iso639_2": ["sun"]}
SWAHILI = {"name": "Swahili", "iso639_1": "sw", "iso639_2": ["swa"]}
SWATI = {"name": "Swati", "iso639_1": "ss", "iso639_2": ["ssw"]}
SWEDISH = {"name": "Swedish", "iso639_1": "sv", "iso639_2": ["swe"]}
TAGALOG = {"name": "Tagalog", "iso639_1": "tl", "iso639_2": ["tgl"]}
TAHITIAN = {"name": "Tahitian", "iso639_1": "ty", "iso639_2": ["tah"]}
TAJIK = {"name": "Tajik", "iso639_1": "tg", "iso639_2": ["tgk"]}
TAMIL = {"name": "Tamil", "iso639_1": "ta", "iso639_2": ["tam"]}
TATAR = {"name": "Tatar", "iso639_1": "tt", "iso639_2": ["tat"]}
TELUGU = {"name": "Telugu", "iso639_1": "te", "iso639_2": ["tel"]}
THAI = {"name": "Thai", "iso639_1": "th", "iso639_2": ["tha"]}
TIBETAN = {"name": "Tibetan", "iso639_1": "bo", "iso639_2": ["bod", "tib"]}
TIGRINYA = {"name": "Tigrinya", "iso639_1": "ti", "iso639_2": ["tir"]}
TONGA = {"name": "Tonga", "iso639_1": "to", "iso639_2": ["ton"]}
TSONGA = {"name": "Tsonga", "iso639_1": "ts", "iso639_2": ["tso"]}
TSWANA = {"name": "Tswana", "iso639_1": "tn", "iso639_2": ["tsn"]}
TURKISH = {"name": "Turkish", "iso639_1": "tr", "iso639_2": ["tur"]}
TURKMEN = {"name": "Turkmen", "iso639_1": "tk", "iso639_2": ["tuk"]}
TWI = {"name": "Twi", "iso639_1": "tw", "iso639_2": ["twi"]}
UIGHUR = {"name": "Uighur", "iso639_1": "ug", "iso639_2": ["uig"]}
UKRAINIAN = {"name": "Ukrainian", "iso639_1": "uk", "iso639_2": ["ukr"]}
URDU = {"name": "Urdu", "iso639_1": "ur", "iso639_2": ["urd"]}
UZBEK = {"name": "Uzbek", "iso639_1": "uz", "iso639_2": ["uzb"]}
VENDA = {"name": "Venda", "iso639_1": "ve", "iso639_2": ["ven"]}
VIETNAMESE = {"name": "Vietnamese", "iso639_1": "vi", "iso639_2": ["vie"]}
VOLAPUK = {"name": "Volapük", "iso639_1": "vo", "iso639_2": ["vol"]}
WALLOON = {"name": "Walloon", "iso639_1": "wa", "iso639_2": ["wln"]}
WELSH = {"name": "Welsh", "iso639_1": "cy", "iso639_2": ["cym", "wel"]}
WESTERN_FRISIAN = {"name": "Western Frisian", "iso639_1": "fy", "iso639_2": ["fry"]}
WOLOF = {"name": "Wolof", "iso639_1": "wo", "iso639_2": ["wol"]}
XHOSA = {"name": "Xhosa", "iso639_1": "xh", "iso639_2": ["xho"]}
YIDDISH = {"name": "Yiddish", "iso639_1": "yi", "iso639_2": ["yid"]}
YORUBA = {"name": "Yoruba", "iso639_1": "yo", "iso639_2": ["yor"]}
ZHUANG = {"name": "Zhuang", "iso639_1": "za", "iso639_2": ["zha"]}
ZULU = {"name": "Zulu", "iso639_1": "zu", "iso639_2": ["zul"]}
FILIPINO = {"name": "Filipino", "iso639_1": "tl", "iso639_2": ["fil"]}
UNDEFINED = {"name": "undefined", "iso639_1": "xx", "iso639_2": ["und"]}
@staticmethod
def find(label : str):
closestMatches = difflib.get_close_matches(label, [l.value["name"] for l in IsoLanguage], n=1)
if closestMatches:
foundLangs = [l for l in IsoLanguage if l.value["name"] == closestMatches[0]]
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
else:
return IsoLanguage.UNDEFINED
@staticmethod
def findThreeLetter(theeLetter : str):
foundLangs = [l for l in IsoLanguage if str(theeLetter) in l.value["iso639_2"]]
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
def label(self):
return str(self.value["name"])
def twoLetter(self):
return str(self.value["iso639_1"])
def threeLetter(self):
return str(self.value["iso639_2"][0])

View File

@@ -0,0 +1,67 @@
from enum import Enum
import os
class AttachmentFormat(Enum):
TTF = {'identifier': 'ttf', 'format': None, 'extension': 'ttf', 'label': 'TTF'}
PNG = {'identifier': 'png', 'format': None, 'extension': 'png', 'label': 'PNG'}
UNKNOWN = {'identifier': 'unknown', 'format': None, 'extension': None, 'label': 'UNKNOWN'}
def identifier(self):
return str(self.value['identifier'])
def label(self):
return str(self.value['label'])
def format(self):
return self.value['format']
def extension(self):
return str(self.value['extension'])
@staticmethod
def identify(identifier: str):
formats = [f for f in AttachmentFormat if f.value['identifier'] == str(identifier)]
if formats:
return formats[0]
return AttachmentFormat.UNKNOWN
@staticmethod
def identifyFfprobeStream(streamObj: dict):
identifier = streamObj.get("codec_name")
identifiedFormat = AttachmentFormat.identify(identifier)
if identifiedFormat != AttachmentFormat.UNKNOWN:
return identifiedFormat
if str(streamObj.get("codec_type", "")).strip() != "attachment":
return AttachmentFormat.UNKNOWN
tags = streamObj.get("tags", {}) or {}
mimetype = str(tags.get("mimetype", "")).strip().lower()
filename = str(tags.get("filename", "")).strip().lower()
filenameExtension = os.path.splitext(filename)[1]
if (
mimetype in {
"font/ttf",
"application/x-truetype-font",
"application/x-font-ttf",
}
or "truetype" in mimetype
or filenameExtension == ".ttf"
):
return AttachmentFormat.TTF
if mimetype in {"image/png", "image/x-png"} or filenameExtension == ".png":
return AttachmentFormat.PNG
return AttachmentFormat.UNKNOWN
@staticmethod
def fromTrackCodec(trackCodec):
identifier = getattr(trackCodec, "identifier", None)
if callable(identifier):
return AttachmentFormat.identify(trackCodec.identifier())
return AttachmentFormat.UNKNOWN

View File

@@ -9,6 +9,7 @@ class AudioLayout(Enum):
LAYOUT_7_1 = {"label": "7.1", "index": 4} #TODO: Does this exist?
LAYOUT_6CH = {"label": "6ch", "index": 5}
LAYOUT_5_0 = {"label": "5.0(side)", "index": 6}
LAYOUT_UNDEFINED = {"label": "undefined", "index": 0}
@@ -29,6 +30,15 @@ class AudioLayout(Enum):
except:
return AudioLayout.LAYOUT_UNDEFINED
# @staticmethod
# def fromIndex(index : int):
# try:
# target_index = int(index)
# except (TypeError, ValueError):
# return AudioLayout.LAYOUT_UNDEFINED
# return next((a for a in AudioLayout if a.value['index'] == target_index),
# AudioLayout.LAYOUT_UNDEFINED)
@staticmethod
def fromIndex(index : int):
try:

1635
src/ffx/cli.py Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,12 @@
import os, json
from .constants import (
DEFAULT_SHOW_INDEX_EPISODE_DIGITS,
DEFAULT_SHOW_INDEX_SEASON_DIGITS,
DEFAULT_SHOW_INDICATOR_EPISODE_DIGITS,
DEFAULT_SHOW_INDICATOR_SEASON_DIGITS,
)
class ConfigurationController():
CONFIG_FILENAME = 'ffx.json'
@@ -8,7 +15,13 @@ class ConfigurationController():
DATABASE_PATH_CONFIG_KEY = 'databasePath'
LOG_DIRECTORY_CONFIG_KEY = 'logDirectory'
SUBTITLES_DIRECTORY_CONFIG_KEY = 'subtitlesDirectory'
LANGUAGE_CONFIG_KEY = 'language'
OUTPUT_FILENAME_TEMPLATE_KEY = 'outputFilenameTemplate'
DEFAULT_INDEX_SEASON_DIGITS_CONFIG_KEY = 'defaultIndexSeasonDigits'
DEFAULT_INDEX_EPISODE_DIGITS_CONFIG_KEY = 'defaultIndexEpisodeDigits'
DEFAULT_INDICATOR_SEASON_DIGITS_CONFIG_KEY = 'defaultIndicatorSeasonDigits'
DEFAULT_INDICATOR_EPISODE_DIGITS_CONFIG_KEY = 'defaultIndicatorEpisodeDigits'
def __init__(self):
@@ -49,6 +62,51 @@ class ConfigurationController():
def getDatabaseFilePath(self):
return self.__databaseFilePath
def getSubtitlesDirectoryPath(self):
subtitlesDirectory = self.__configurationData.get(
ConfigurationController.SUBTITLES_DIRECTORY_CONFIG_KEY,
'',
)
return os.path.expanduser(str(subtitlesDirectory)) if subtitlesDirectory else ''
def getLanguage(self):
return str(self.__configurationData.get(ConfigurationController.LANGUAGE_CONFIG_KEY, '')).strip()
@classmethod
def getConfiguredIntegerValue(cls, configurationData: dict, configKey: str, defaultValue: int) -> int:
configuredValue = configurationData.get(configKey, defaultValue)
try:
return int(configuredValue)
except (TypeError, ValueError):
return int(defaultValue)
def getDefaultIndexSeasonDigits(self):
return ConfigurationController.getConfiguredIntegerValue(
self.__configurationData,
ConfigurationController.DEFAULT_INDEX_SEASON_DIGITS_CONFIG_KEY,
DEFAULT_SHOW_INDEX_SEASON_DIGITS,
)
def getDefaultIndexEpisodeDigits(self):
return ConfigurationController.getConfiguredIntegerValue(
self.__configurationData,
ConfigurationController.DEFAULT_INDEX_EPISODE_DIGITS_CONFIG_KEY,
DEFAULT_SHOW_INDEX_EPISODE_DIGITS,
)
def getDefaultIndicatorSeasonDigits(self):
return ConfigurationController.getConfiguredIntegerValue(
self.__configurationData,
ConfigurationController.DEFAULT_INDICATOR_SEASON_DIGITS_CONFIG_KEY,
DEFAULT_SHOW_INDICATOR_SEASON_DIGITS,
)
def getDefaultIndicatorEpisodeDigits(self):
return ConfigurationController.getConfiguredIntegerValue(
self.__configurationData,
ConfigurationController.DEFAULT_INDICATOR_EPISODE_DIGITS_CONFIG_KEY,
DEFAULT_SHOW_INDICATOR_EPISODE_DIGITS,
)
def getData(self):
return self.__configurationData
@@ -139,4 +197,4 @@ class ConfigurationController():
# raise click.ClickException(f"PatternController.getPattern(): {repr(ex)}")
# finally:
# s.close()
#
#

80
src/ffx/confirm_screen.py Normal file
View File

@@ -0,0 +1,80 @@
from textual.containers import Grid
from textual.screen import Screen
from textual.widgets import Button, Footer, Header, Static
from .i18n import t
from .screen_support import build_screen_log_pane
class ConfirmScreen(Screen):
BINDINGS = [
("escape", "back", t("Back")),
]
CSS = """
Grid {
grid-size: 4 7;
grid-rows: 2 2 2 2 2 2 2;
grid-columns: 1fr 1fr 1fr 1fr;
height: 100%;
width: 100%;
min-width: 80;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}
Button {
border: none;
}
.four {
column-span: 4;
}
"""
def __init__(
self,
message: str,
confirm_label: str = "Confirm",
cancel_label: str = "Cancel",
):
super().__init__()
self.__message = str(message)
self.__confirmLabel = str(t(confirm_label))
self.__cancelLabel = str(t(cancel_label))
def compose(self):
yield Header()
with Grid():
# Row 1
yield Static(self.__message, classes="four")
# Row 2
yield Static(" ", classes="four")
# Row 3
yield Button(self.__confirmLabel, id="confirm_button")
yield Button(self.__cancelLabel, id="cancel_button")
yield build_screen_log_pane()
yield Footer()
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button.id == "confirm_button":
self.dismiss(True)
if event.button.id == "cancel_button":
self.dismiss(False)
def action_back(self):
self.dismiss(False)

View File

@@ -1,15 +1,30 @@
VERSION='0.2.3'
DATABASE_VERSION = 2
VERSION='0.4.1'
DATABASE_VERSION = 3
DEFAULT_QUALITY = 32
DEFAULT_AV1_PRESET = 5
DEFAULT_VIDEO_ENCODER_LABEL = "vp9"
DEFAULT_CONTAINER_FORMAT = "webm"
DEFAULT_CONTAINER_EXTENSION = "webm"
SUPPORTED_INPUT_FILE_EXTENSIONS = ("mkv", "mp4", "avi", "flv", "webm")
FFMPEG_COMMAND_TOKENS = ("ffmpeg", "-y")
FFMPEG_NULL_OUTPUT_TOKENS = ("-f", "null", "/dev/null")
DEFAULT_STEREO_BANDWIDTH = "112"
DEFAULT_AC3_BANDWIDTH = "256"
DEFAULT_DTS_BANDWIDTH = "320"
DEFAULT_7_1_BANDWIDTH = "384"
DEFAULT_CROP_START = 60
DEFAULT_CROP_LENGTH = 180
DEFAULT_CROPDETECT_SEEK_SECONDS = 60
DEFAULT_CROPDETECT_DURATION_SECONDS = 180
DEFAULT_cut_start = 60
DEFAULT_cut_length = 180
DEFAULT_SHOW_INDEX_SEASON_DIGITS = 2
DEFAULT_SHOW_INDEX_EPISODE_DIGITS = 2
DEFAULT_SHOW_INDICATOR_SEASON_DIGITS = 2
DEFAULT_SHOW_INDICATOR_EPISODE_DIGITS = 2
DEFAULT_OUTPUT_FILENAME_TEMPLATE = '{{ ffx_show_name }} - {{ ffx_index }}{{ ffx_index_separator }}{{ ffx_episode_name }}{{ ffx_indicator_separator }}{{ ffx_indicator }}'

View File

@@ -1,20 +1,25 @@
import os, click
import os, shutil, click
from sqlalchemy import create_engine
from sqlalchemy import create_engine, inspect, text
from sqlalchemy.orm import sessionmaker
# Import the full model package so SQLAlchemy registers every mapped class
# before metadata creation and the first ORM query.
import ffx.model
from ffx.model.show import Base
from ffx.model.property import Property
from ffx.model.migration import (
DatabaseVersionException,
getMigrationPlan,
migrateDatabase,
)
from ffx.constants import DATABASE_VERSION
DATABASE_VERSION_KEY = 'database_version'
class DatabaseVersionException(Exception):
def __init__(self, errorMessage):
super().__init__(errorMessage)
EXPECTED_TABLE_NAMES = set(Base.metadata.tables.keys())
def databaseContext(databasePath: str = ''):
@@ -29,12 +34,18 @@ def databaseContext(databasePath: str = ''):
if not os.path.exists(ffxVarDir):
os.makedirs(ffxVarDir)
databasePath = os.path.join(ffxVarDir, 'ffx.db')
else:
databasePath = os.path.expanduser(databasePath)
if databasePath != ':memory:':
databasePath = os.path.abspath(databasePath)
databaseContext['path'] = databasePath
databaseContext['url'] = f"sqlite:///{databasePath}"
databaseContext['engine'] = create_engine(databaseContext['url'])
databaseContext['session'] = sessionmaker(bind=databaseContext['engine'])
Base.metadata.create_all(databaseContext['engine'])
bootstrapDatabaseIfNeeded(databaseContext)
# isSyncronuous = False
# while not isSyncronuous:
@@ -51,14 +62,126 @@ def databaseContext(databasePath: str = ''):
return databaseContext
def databaseNeedsBootstrap(databaseContext) -> bool:
inspector = inspect(databaseContext['engine'])
existingTableNames = set(inspector.get_table_names())
return not EXPECTED_TABLE_NAMES.issubset(existingTableNames)
def bootstrapDatabaseIfNeeded(databaseContext):
if not databaseNeedsBootstrap(databaseContext):
return
Base.metadata.create_all(databaseContext['engine'])
def ensureDatabaseVersion(databaseContext):
currentDatabaseVersion = getDatabaseVersion(databaseContext)
if currentDatabaseVersion:
if currentDatabaseVersion != DATABASE_VERSION:
raise DatabaseVersionException(f"Current database version ({currentDatabaseVersion}) does not match required ({DATABASE_VERSION})")
else:
if not currentDatabaseVersion:
setDatabaseVersion(databaseContext, DATABASE_VERSION)
return
if currentDatabaseVersion > DATABASE_VERSION:
raise DatabaseVersionException(
f"Current database version ({currentDatabaseVersion}) does not match required ({DATABASE_VERSION})"
)
if currentDatabaseVersion < DATABASE_VERSION:
promptForDatabaseMigration(databaseContext, currentDatabaseVersion, DATABASE_VERSION)
migrateDatabase(databaseContext, currentDatabaseVersion, DATABASE_VERSION, setDatabaseVersion)
currentDatabaseVersion = getDatabaseVersion(databaseContext)
if currentDatabaseVersion != DATABASE_VERSION:
raise DatabaseVersionException(
f"Current database version ({currentDatabaseVersion}) does not match required ({DATABASE_VERSION})"
)
ensureCurrentSchemaCompatibility(databaseContext)
def ensureCurrentSchemaCompatibility(databaseContext):
engine = databaseContext['engine']
inspector = inspect(engine)
showColumns = {
column['name']
for column in inspector.get_columns('shows')
}
alterStatements = []
if 'quality' not in showColumns:
alterStatements.append("ALTER TABLE shows ADD COLUMN quality INTEGER DEFAULT 0")
if 'notes' not in showColumns:
alterStatements.append("ALTER TABLE shows ADD COLUMN notes TEXT DEFAULT ''")
if not alterStatements:
return
with engine.begin() as connection:
for alterStatement in alterStatements:
connection.execute(text(alterStatement))
def promptForDatabaseMigration(databaseContext, currentDatabaseVersion: int, targetDatabaseVersion: int):
migrationPlan = getMigrationPlan(currentDatabaseVersion, targetDatabaseVersion)
click.echo("Database migration required.")
click.echo(f"Current version: {currentDatabaseVersion}")
click.echo(f"Target version: {targetDatabaseVersion}")
click.echo("Steps required:")
missingSteps = []
for migrationStep in migrationPlan:
moduleStatus = "present" if migrationStep.modulePresent else "missing"
click.echo(
f" {migrationStep.versionFrom} -> {migrationStep.versionTo}: "
+ f"{migrationStep.moduleName} [{moduleStatus}]"
)
if not migrationStep.modulePresent:
missingSteps.append(migrationStep)
if missingSteps:
firstMissingStep = missingSteps[0]
raise DatabaseVersionException(
f"No migration path from database version "
+ f"{firstMissingStep.versionFrom} to {firstMissingStep.versionTo}"
)
if not click.confirm(
"Create a backup and continue with database migration?",
default=True,
):
raise click.ClickException("Database migration aborted by user.")
backupPath = backupDatabaseBeforeMigration(
databaseContext,
currentDatabaseVersion,
targetDatabaseVersion,
)
click.echo(f"Database backup created: {backupPath}")
def backupDatabaseBeforeMigration(databaseContext, currentDatabaseVersion: int, targetDatabaseVersion: int) -> str:
databasePath = databaseContext.get('path', '')
if not databasePath or databasePath == ':memory:':
raise click.ClickException("Database migration backup requires a file-backed SQLite database.")
if not os.path.isfile(databasePath):
raise click.ClickException(f"Database file not found for backup: {databasePath}")
backupPath = f"{databasePath}.v{currentDatabaseVersion}-to-v{targetDatabaseVersion}.bak"
backupIndex = 1
while os.path.exists(backupPath):
backupPath = (
f"{databasePath}.v{currentDatabaseVersion}-to-v{targetDatabaseVersion}.{backupIndex}.bak"
)
backupIndex += 1
databaseContext['engine'].dispose()
shutil.copy2(databasePath, backupPath)
return backupPath
def getDatabaseVersion(databaseContext):
@@ -67,9 +190,9 @@ def getDatabaseVersion(databaseContext):
Session = databaseContext['session']
s = Session()
q = s.query(Property).filter(Property.key == DATABASE_VERSION_KEY)
versionProperty = s.query(Property).filter(Property.key == DATABASE_VERSION_KEY).first()
return int(q.first().value) if q.count() else 0
return int(versionProperty.value) if versionProperty is not None else 0
except Exception as ex:
raise click.ClickException(f"getDatabaseVersion(): {repr(ex)}")
@@ -99,4 +222,4 @@ def setDatabaseVersion(databaseContext, databaseVersion: int):
except Exception as ex:
raise click.ClickException(f"setDatabaseVersion(): {repr(ex)}")
finally:
s.close()
s.close()

View File

@@ -0,0 +1,24 @@
from .base import FfmpegRemedy, FfmpegRemedyDecision, FfmpegSkipFileWarning
from .monitor import FfmpegCommandRunner, FfmpegDiagnosticMonitor
from .retry_with_generated_pts import RetryWithGeneratedPtsRemedy
from .state import (
getDiagnosticsState,
getUnremediedIssues,
iterUnremediedIssueSummaryLines,
recordUnremediedIssue,
)
from .warn_corrupt_mpeg_audio import WarnCorruptMpegAudioRemedy
__all__ = [
"FfmpegCommandRunner",
"FfmpegDiagnosticMonitor",
"FfmpegRemedy",
"FfmpegRemedyDecision",
"FfmpegSkipFileWarning",
"RetryWithGeneratedPtsRemedy",
"WarnCorruptMpegAudioRemedy",
"getDiagnosticsState",
"getUnremediedIssues",
"iterUnremediedIssueSummaryLines",
"recordUnremediedIssue",
]

View File

@@ -0,0 +1,33 @@
from __future__ import annotations
from dataclasses import dataclass
class FfmpegSkipFileWarning(Exception):
pass
@dataclass(frozen=True)
class FfmpegRemedyDecision:
stop_process: bool = False
retry_input_tokens: tuple[str, ...] = ()
skip_file: bool = False
console_warning: str = ""
summary_identifier: str = ""
unremedied_issue_identifier: str = ""
@property
def retry_requested(self) -> bool:
return bool(self.retry_input_tokens)
class FfmpegRemedy:
identifier = "ffmpeg-remedy"
harmless = False
def inspect_line(
self,
line: str,
session: "FfmpegDiagnosticMonitor",
) -> FfmpegRemedyDecision | None:
raise NotImplementedError

View File

@@ -0,0 +1,222 @@
from __future__ import annotations
import re
from ffx.logging_utils import get_ffx_logger
from ffx.process import executeProcess
from .base import FfmpegSkipFileWarning, FfmpegRemedy
from .retry_with_generated_pts import RetryWithGeneratedPtsRemedy
from .state import recordUnremediedIssue
from .warn_corrupt_mpeg_audio import WarnCorruptMpegAudioRemedy
UNHANDLED_DIAGNOSTIC_PATTERNS = (
re.compile(r"\bwarning\b", re.IGNORECASE),
re.compile(r"\berror\b", re.IGNORECASE),
re.compile(r"\bfailed\b", re.IGNORECASE),
re.compile(r"\binvalid\b", re.IGNORECASE),
re.compile(r"\bmissing\b", re.IGNORECASE),
re.compile(r"\bcorrupt\b", re.IGNORECASE),
re.compile(r"\boverflow\b", re.IGNORECASE),
re.compile(r"\bdeprecated\b", re.IGNORECASE),
)
class FfmpegDiagnosticMonitor:
def __init__(
self,
context: dict | None,
command_sequence: list[str],
*,
remedies: list[FfmpegRemedy] | None = None,
emittedWarnings: set[str] | None = None,
):
self.context = context or {}
self.command_sequence = list(command_sequence)
self.logger = self.context.get("logger", get_ffx_logger())
self.source_path = str(self.context.get("current_source_path", "")).strip()
self.remedies = remedies or [
RetryWithGeneratedPtsRemedy(),
WarnCorruptMpegAudioRemedy(),
]
self._emittedWarnings = emittedWarnings if emittedWarnings is not None else set()
self.retry_input_tokens: tuple[str, ...] = ()
self.skip_file = False
self.skip_file_message = ""
def describe_source(self) -> str:
return self.source_path if self.source_path else "current file"
def command_contains_tokens(self, tokens: tuple[str, ...]) -> bool:
tokenCount = len(tokens)
if tokenCount == 0:
return True
return any(
tuple(self.command_sequence[index:index + tokenCount]) == tuple(tokens)
for index in range(len(self.command_sequence) - tokenCount + 1)
)
def emitConsoleWarning(self, warningMessage: str) -> None:
if warningMessage and warningMessage not in self._emittedWarnings:
self.logger.warning(warningMessage)
self._emittedWarnings.add(warningMessage)
def recordUnremediedIssue(self, issueIdentifier: str, issueLine: str) -> None:
isFirstIssueForFile = recordUnremediedIssue(
self.context,
self.describe_source(),
issueIdentifier,
)
if not isFirstIssueForFile:
return
self.emitConsoleWarning(
f"ffmpeg reported a diagnostic with no automatic remedy while converting "
+ f"{self.describe_source()}. FFX will continue, but review the output "
+ f"file. First unhandled line: {issueLine}"
)
def lineLooksLikeUnhandledDiagnostic(self, line: str) -> bool:
return any(pattern.search(line) for pattern in UNHANDLED_DIAGNOSTIC_PATTERNS)
def getUnhandledDiagnosticIdentifier(self, line: str) -> str:
loweredLine = str(line).lower()
if any(token in loweredLine for token in ("error", "failed", "invalid", "missing", "corrupt", "overflow")):
return "unhandled-error"
if any(token in loweredLine for token in ("warning", "deprecated")):
return "unhandled-warning"
return "unhandled-diagnostic"
def getSummaryIdentifier(
self,
remedy: FfmpegRemedy,
decision,
) -> str:
explicitIdentifier = str(decision.summary_identifier).strip()
if explicitIdentifier:
return explicitIdentifier
remedyIdentifier = str(getattr(remedy, "identifier", "")).strip()
if remedyIdentifier and remedyIdentifier != FfmpegRemedy.identifier:
return remedyIdentifier
return str(decision.unremedied_issue_identifier).strip()
def shouldRecordSummary(
self,
remedy: FfmpegRemedy,
decision,
) -> bool:
if getattr(remedy, "harmless", False):
return False
if decision.retry_requested and not decision.skip_file:
return False
return bool(self.getSummaryIdentifier(remedy, decision))
def handle_stderr_line(self, line: str) -> bool:
strippedLine = str(line).strip()
if not strippedLine:
return False
for remedy in self.remedies:
decision = remedy.inspect_line(strippedLine, self)
if decision is None:
continue
self.emitConsoleWarning(decision.console_warning)
if decision.retry_requested:
self.retry_input_tokens = tuple(decision.retry_input_tokens)
if self.shouldRecordSummary(remedy, decision):
recordUnremediedIssue(
self.context,
self.describe_source(),
self.getSummaryIdentifier(remedy, decision),
)
if decision.skip_file:
self.skip_file = True
self.skip_file_message = (
decision.console_warning
or f"Skipping file {self.describe_source()} because ffmpeg reported a fatal diagnostic."
)
return bool(decision.stop_process)
if self.lineLooksLikeUnhandledDiagnostic(strippedLine):
self.recordUnremediedIssue(
self.getUnhandledDiagnosticIdentifier(strippedLine),
strippedLine,
)
return False
@property
def retry_requested(self) -> bool:
return bool(self.retry_input_tokens)
def insertFfmpegInputOptions(
commandSequence: list[str],
extraTokens: tuple[str, ...],
) -> list[str]:
if not extraTokens:
return list(commandSequence)
if not commandSequence:
return list(extraTokens)
return [commandSequence[0]] + list(extraTokens) + list(commandSequence[1:])
class FfmpegCommandRunner:
def __init__(
self,
context: dict | None,
*,
remedies: list[FfmpegRemedy] | None = None,
):
self.__context = context or {}
self.__remedies = remedies
def execute(
self,
commandSequence: list[str],
*,
directory: str = None,
timeoutSeconds: float = None,
):
emittedWarnings: set[str] = set()
attemptCommandSequence = list(commandSequence)
while True:
monitor = FfmpegDiagnosticMonitor(
self.__context,
attemptCommandSequence,
remedies=self.__remedies,
emittedWarnings=emittedWarnings,
)
out, err, rc = executeProcess(
attemptCommandSequence,
directory=directory,
context=self.__context,
timeoutSeconds=timeoutSeconds,
stderrLineHandler=monitor.handle_stderr_line,
)
if monitor.retry_requested:
attemptCommandSequence = insertFfmpegInputOptions(
attemptCommandSequence,
monitor.retry_input_tokens,
)
continue
if monitor.skip_file:
raise FfmpegSkipFileWarning(monitor.skip_file_message)
return out, err, rc

View File

@@ -0,0 +1,41 @@
from __future__ import annotations
import re
from .base import FfmpegRemedy, FfmpegRemedyDecision
class RetryWithGeneratedPtsRemedy(FfmpegRemedy):
identifier = "retry-with-generated-pts"
RETRY_INPUT_TOKENS = ("-fflags", "+genpts")
TIMESTAMP_UNSET_PATTERN = re.compile(
r"Timestamps are unset in a packet for stream \d+"
)
def inspect_line(
self,
line: str,
session: "FfmpegDiagnosticMonitor",
) -> FfmpegRemedyDecision | None:
if self.TIMESTAMP_UNSET_PATTERN.search(line) is None:
return None
if session.command_contains_tokens(self.RETRY_INPUT_TOKENS):
return FfmpegRemedyDecision(
stop_process=True,
skip_file=True,
console_warning=(
f"Skipping file {session.describe_source()}: ffmpeg still reported "
+ "unset packet timestamps after retry with -fflags +genpts."
),
unremedied_issue_identifier="timestamp-unset-after-genpts",
)
return FfmpegRemedyDecision(
stop_process=True,
retry_input_tokens=self.RETRY_INPUT_TOKENS,
console_warning=(
f"ffmpeg reported unset packet timestamps for {session.describe_source()}. "
+ "Stopping early and retrying with -fflags +genpts."
),
)

View File

@@ -0,0 +1,53 @@
from __future__ import annotations
import os
DIAGNOSTICS_STATE_KEY = "diagnostics_state"
UNREMEDIED_ISSUES_KEY = "unremedied_issues"
def getDiagnosticsState(context: dict | None) -> dict:
if context is None:
return {UNREMEDIED_ISSUES_KEY: {}}
if DIAGNOSTICS_STATE_KEY not in context:
context[DIAGNOSTICS_STATE_KEY] = {
UNREMEDIED_ISSUES_KEY: {},
}
return context[DIAGNOSTICS_STATE_KEY]
def recordUnremediedIssue(
context: dict | None,
sourcePath: str,
identifier: str,
) -> bool:
if not sourcePath:
return False
diagnosticsState = getDiagnosticsState(context)
unremediedIssues = diagnosticsState[UNREMEDIED_ISSUES_KEY]
issueList = unremediedIssues.setdefault(sourcePath, [])
strippedIdentifier = str(identifier).strip()
if not strippedIdentifier or strippedIdentifier in issueList:
return False
issueList.append(strippedIdentifier)
return True
def getUnremediedIssues(context: dict | None) -> dict[str, list[str]]:
diagnosticsState = getDiagnosticsState(context)
return diagnosticsState.get(UNREMEDIED_ISSUES_KEY, {})
def iterUnremediedIssueSummaryLines(context: dict | None) -> list[str]:
summaryLines = []
unremediedIssues = getUnremediedIssues(context)
for sourcePath in sorted(unremediedIssues.keys()):
identifiers = unremediedIssues[sourcePath]
summaryLines.append(f"{os.path.basename(sourcePath)}: {', '.join(identifiers)}")
return summaryLines

View File

@@ -0,0 +1,34 @@
from __future__ import annotations
import re
from .base import FfmpegRemedy, FfmpegRemedyDecision
class WarnCorruptMpegAudioRemedy(FfmpegRemedy):
identifier = "warn-corrupt-mpeg-audio"
PATTERNS = (
re.compile(r"\[mp3float @ .*\] invalid block type", re.IGNORECASE),
re.compile(r"\[mp3float @ .*\] Header missing"),
re.compile(r"\[mp3float @ .*\] overread, skip ", re.IGNORECASE),
re.compile(r"Error while decoding MPEG audio frame\."),
re.compile(
r"Error submitting packet to decoder: Invalid data found when processing input"
),
)
def inspect_line(
self,
line: str,
session: "FfmpegDiagnosticMonitor",
) -> FfmpegRemedyDecision | None:
if not any(pattern.search(line) for pattern in self.PATTERNS):
return None
return FfmpegRemedyDecision(
console_warning=(
f"ffmpeg reported damaged MPEG audio frames while converting "
+ f"{session.describe_source()}. FFX will continue, but the output "
+ "audio may contain gaps or glitches."
),
)

View File

@@ -0,0 +1,27 @@
from .diagnostics import (
FfmpegCommandRunner,
FfmpegDiagnosticMonitor,
FfmpegRemedy,
FfmpegRemedyDecision,
FfmpegSkipFileWarning,
RetryWithGeneratedPtsRemedy,
WarnCorruptMpegAudioRemedy,
getDiagnosticsState,
getUnremediedIssues,
iterUnremediedIssueSummaryLines,
recordUnremediedIssue,
)
__all__ = [
"FfmpegCommandRunner",
"FfmpegDiagnosticMonitor",
"FfmpegRemedy",
"FfmpegRemedyDecision",
"FfmpegSkipFileWarning",
"RetryWithGeneratedPtsRemedy",
"WarnCorruptMpegAudioRemedy",
"getDiagnosticsState",
"getUnremediedIssues",
"iterUnremediedIssueSummaryLines",
"recordUnremediedIssue",
]

View File

@@ -1,740 +0,0 @@
#! /usr/bin/python3
import os, click, time, logging
from ffx.configuration_controller import ConfigurationController
from ffx.file_properties import FileProperties
from ffx.ffx_app import FfxApp
from ffx.ffx_controller import FfxController
from ffx.tmdb_controller import TmdbController
from ffx.database import databaseContext
from ffx.media_descriptor import MediaDescriptor
from ffx.track_descriptor import TrackDescriptor
from ffx.show_descriptor import ShowDescriptor
from ffx.track_type import TrackType
from ffx.video_encoder import VideoEncoder
from ffx.track_disposition import TrackDisposition
from ffx.track_codec import TrackCodec
from ffx.process import executeProcess
from ffx.helper import filterFilename, substituteTmdbFilename
from ffx.helper import getEpisodeFileBasename
from ffx.constants import DEFAULT_STEREO_BANDWIDTH, DEFAULT_AC3_BANDWIDTH, DEFAULT_DTS_BANDWIDTH, DEFAULT_7_1_BANDWIDTH
from ffx.filter.quality_filter import QualityFilter
from ffx.filter.preset_filter import PresetFilter
from ffx.filter.nlmeans_filter import NlmeansFilter
from ffx.constants import VERSION
from ffx.shifted_season_controller import ShiftedSeasonController
@click.group()
@click.pass_context
@click.option('--database-file', type=str, default='', help='Path to database file')
@click.option('-v', '--verbose', type=int, default=0, help='Set verbosity of output')
@click.option("--dry-run", is_flag=True, default=False)
def ffx(ctx, database_file, verbose, dry_run):
"""FFX"""
ctx.obj = {}
ctx.obj['config'] = ConfigurationController()
ctx.obj['database'] = databaseContext(databasePath=database_file
if database_file else ctx.obj['config'].getDatabaseFilePath())
ctx.obj['dry_run'] = dry_run
ctx.obj['verbosity'] = verbose
# Critical 50
# Error 40
# Warning 30
# Info 20
# Debug 10
fileLogVerbosity = max(40 - verbose * 10, 10)
consoleLogVerbosity = max(20 - verbose * 10, 10)
ctx.obj['logger'] = logging.getLogger('FFX')
ctx.obj['logger'].setLevel(logging.DEBUG)
ffxFileHandler = logging.FileHandler(ctx.obj['config'].getLogFilePath())
ffxFileHandler.setLevel(fileLogVerbosity)
ffxConsoleHandler = logging.StreamHandler()
ffxConsoleHandler.setLevel(consoleLogVerbosity)
fileFormatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ffxFileHandler.setFormatter(fileFormatter)
consoleFormatter = logging.Formatter(
'%(message)s')
ffxConsoleHandler.setFormatter(consoleFormatter)
ctx.obj['logger'].addHandler(ffxConsoleHandler)
ctx.obj['logger'].addHandler(ffxFileHandler)
# Define a subcommand
@ffx.command()
def version():
click.echo(VERSION)
# Another subcommand
@ffx.command()
def help():
click.echo(f"ffx {VERSION}\n")
click.echo(f"Usage: ffx [input file] [output file] [vp9|av1] [q=[nn[,nn,...]]] [p=nn] [a=nnn[k]] [ac3=nnn[k]] [dts=nnn[k]] [crop]")
@ffx.command()
@click.pass_context
@click.argument('filename', nargs=1)
def inspect(ctx, filename):
ctx.obj['command'] = 'inspect'
ctx.obj['arguments'] = {}
ctx.obj['arguments']['filename'] = filename
app = FfxApp(ctx.obj)
app.run()
def getUnmuxSequence(trackDescriptor: TrackDescriptor, sourcePath, targetPrefix, targetDirectory = ''):
# executable and input file
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
trackType = trackDescriptor.getType()
targetPathBase = os.path.join(targetDirectory, targetPrefix) if targetDirectory else targetPrefix
# mapping
commandTokens += ['-map',
f"0:{trackType.indicator()}:{trackDescriptor.getSubIndex()}",
'-c',
'copy']
trackCodec = trackDescriptor.getCodec()
# output format
codecFormat = trackCodec.format()
if codecFormat is not None:
commandTokens += ['-f', codecFormat]
# output filename
commandTokens += [f"{targetPathBase}.{trackCodec.extension()}"]
return commandTokens
@ffx.command()
@click.pass_context
@click.argument('paths', nargs=-1)
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
@click.option("-o", "--output-directory", type=str, default='')
@click.option("-s", "--subtitles-only", is_flag=True, default=False)
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
def unmux(ctx,
paths,
label,
output_directory,
subtitles_only,
nice,
cpu):
existingSourcePaths = [p for p in paths if os.path.isfile(p)]
ctx.obj['logger'].debug(f"\nUnmuxing {len(existingSourcePaths)} files")
ctx.obj['resource_limits'] = {}
ctx.obj['resource_limits']['niceness'] = nice
ctx.obj['resource_limits']['cpu_percent'] = cpu
for sourcePath in existingSourcePaths:
fp = FileProperties(ctx.obj, sourcePath)
try:
sourceMediaDescriptor = fp.getMediaDescriptor()
season = fp.getSeason()
episode = fp.getEpisode()
#TODO: Recognition für alle Formate anpassen
targetLabel = label if label else fp.getFileBasename()
targetIndicator = f"_S{season}E{episode}" if label and season != -1 and episode != -1 else ''
if label and not targetIndicator:
ctx.obj['logger'].warning(f"Skipping file {fp.getFilename()}: Label set but no indicator recognized")
continue
else:
ctx.obj['logger'].info(f"\nUnmuxing file {fp.getFilename()}\n")
for trackDescriptor in sourceMediaDescriptor.getAllTrackDescriptors():
if trackDescriptor.getType() == TrackType.SUBTITLE or not subtitles_only:
# SEASON_EPISODE_STREAM_LANGUAGE_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
targetPrefix = f"{targetLabel}{targetIndicator}_{trackDescriptor.getIndex()}_{trackDescriptor.getLanguage().threeLetter()}"
td: TrackDisposition
for td in sorted(trackDescriptor.getDispositionSet(), key=lambda d: d.index()):
targetPrefix += f"_{td.indicator()}"
unmuxSequence = getUnmuxSequence(trackDescriptor, sourcePath, targetPrefix, targetDirectory = output_directory)
if unmuxSequence:
if not ctx.obj['dry_run']:
#TODO #425: Codec Enum
ctx.obj['logger'].info(f"Unmuxing stream {trackDescriptor.getIndex()} into file {targetPrefix}.{trackDescriptor.getCodec().extension()}")
ctx.obj['logger'].debug(f"Executing unmuxing sequence")
out, err, rc = executeProcess(unmuxSequence, context = ctx.obj)
if rc:
ctx.obj['logger'].error(f"Unmuxing of stream {trackDescriptor.getIndex()} failed with error ({rc}) {err}")
else:
ctx.obj['logger'].warning(f"Skipping stream with unknown codec")
except Exception as ex:
ctx.obj['logger'].warning(f"Skipping File {sourcePath} ({ex})")
@ffx.command()
@click.pass_context
def shows(ctx):
ctx.obj['command'] = 'shows'
app = FfxApp(ctx.obj)
app.run()
def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
# Check for multiple default or forced dispositions if not set by user input or database requirements
#
# Query user for the correct sub indices, then configure flags in track descriptors associated with media descriptor accordingly.
# The correct tokens should then be created by
if len([v for v in mediaDescriptor.getVideoTracks() if v.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
if context['no_prompt']:
raise click.ClickException('More than one default video stream detected and no prompt set')
defaultVideoTrackSubIndex = click.prompt("More than one default video stream detected! Please select stream", type=int)
mediaDescriptor.setDefaultSubTrack(TrackType.VIDEO, defaultVideoTrackSubIndex)
if len([v for v in mediaDescriptor.getVideoTracks() if v.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
if context['no_prompt']:
raise click.ClickException('More than one forced video stream detected and no prompt set')
forcedVideoTrackSubIndex = click.prompt("More than one forced video stream detected! Please select stream", type=int)
mediaDescriptor.setForcedSubTrack(TrackType.VIDEO, forcedVideoTrackSubIndex)
if len([a for a in mediaDescriptor.getAudioTracks() if a.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
if context['no_prompt']:
raise click.ClickException('More than one default audio stream detected and no prompt set')
defaultAudioTrackSubIndex = click.prompt("More than one default audio stream detected! Please select stream", type=int)
mediaDescriptor.setDefaultSubTrack(TrackType.AUDIO, defaultAudioTrackSubIndex)
if len([a for a in mediaDescriptor.getAudioTracks() if a.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
if context['no_prompt']:
raise click.ClickException('More than one forced audio stream detected and no prompt set')
forcedAudioTrackSubIndex = click.prompt("More than one forced audio stream detected! Please select stream", type=int)
mediaDescriptor.setForcedSubTrack(TrackType.AUDIO, forcedAudioTrackSubIndex)
if len([s for s in mediaDescriptor.getSubtitleTracks() if s.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
if context['no_prompt']:
raise click.ClickException('More than one default subtitle stream detected and no prompt set')
defaultSubtitleTrackSubIndex = click.prompt("More than one default subtitle stream detected! Please select stream", type=int)
mediaDescriptor.setDefaultSubTrack(TrackType.SUBTITLE, defaultSubtitleTrackSubIndex)
if len([s for s in mediaDescriptor.getSubtitleTracks() if s.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
if context['no_prompt']:
raise click.ClickException('More than one forced subtitle stream detected and no prompt set')
forcedSubtitleTrackSubIndex = click.prompt("More than one forced subtitle stream detected! Please select stream", type=int)
mediaDescriptor.setForcedSubTrack(TrackType.SUBTITLE, forcedSubtitleTrackSubIndex)
@ffx.command()
@click.pass_context
@click.argument('paths', nargs=-1)
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
@click.option('-v', '--video-encoder', type=str, default=FfxController.DEFAULT_VIDEO_ENCODER, help=f"Target video encoder (vp9 or av1)", show_default=True)
@click.option('-q', '--quality', type=str, default="", help=f"Quality settings to be used with VP9 encoder")
@click.option('-p', '--preset', type=str, default="", help=f"Quality preset to be used with AV1 encoder")
@click.option('-a', '--stereo-bitrate', type=int, default=DEFAULT_STEREO_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode stereo audio streams", show_default=True)
@click.option('--ac3', type=int, default=DEFAULT_AC3_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode 5.1 audio streams", show_default=True)
@click.option('--dts', type=int, default=DEFAULT_DTS_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode 6.1 audio streams", show_default=True)
@click.option('--subtitle-directory', type=str, default='', help='Load subtitles from here')
@click.option('--subtitle-prefix', type=str, default='', help='Subtitle filename prefix')
@click.option('--language', type=str, multiple=True, help='Set stream language. Use format <stream index>:<3 letter iso code>')
@click.option('--title', type=str, multiple=True, help='Set stream title. Use format <stream index>:<title>')
@click.option('--default-video', type=int, default=-1, help='Index of default video stream')
@click.option('--forced-video', type=int, default=-1, help='Index of forced video stream')
@click.option('--default-audio', type=int, default=-1, help='Index of default audio stream')
@click.option('--forced-audio', type=int, default=-1, help='Index of forced audio stream')
@click.option('--default-subtitle', type=int, default=-1, help='Index of default subtitle stream')
@click.option('--forced-subtitle', type=int, default=-1, help='Index of forced subtitle stream')
@click.option('--rearrange-streams', type=str, default="", help='Rearrange output streams order. Use format comma separated integers')
@click.option("--crop", is_flag=False, flag_value="default", default="none")
@click.option("--output-directory", type=str, default='')
@click.option("--denoise", is_flag=False, flag_value="default", default="none")
@click.option("--denoise-use-hw", is_flag=True, default=False)
@click.option('--denoise-strength', type=str, default='', help='Denoising strength, more blurring vs more details.')
@click.option('--denoise-patch-size', type=str, default='', help='Subimage size to apply filtering on luminosity plane. Reduces broader noise patterns but costly.')
@click.option('--denoise-chroma-patch-size', type=str, default='', help='Subimage size to apply filtering on chroma planes.')
@click.option('--denoise-research-window', type=str, default='', help='Range to search for comparable patches on luminosity plane. Better filtering but costly.')
@click.option('--denoise-chroma-research-window', type=str, default='', help='Range to search for comparable patches on chroma planes.')
@click.option('--show', type=int, default=-1, help='Set TMDB show identifier')
@click.option('--season', type=int, default=-1, help='Set season of show')
@click.option('--episode', type=int, default=-1, help='Set episode of show')
@click.option("--no-tmdb", is_flag=True, default=False)
@click.option("--no-pattern", is_flag=True, default=False)
@click.option("--dont-pass-dispositions", is_flag=True, default=False)
@click.option("--no-prompt", is_flag=True, default=False)
@click.option("--no-signature", is_flag=True, default=False)
@click.option("--keep-mkvmerge-metadata", is_flag=True, default=False)
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
def convert(ctx,
paths,
label,
video_encoder,
quality,
preset,
stereo_bitrate,
ac3,
dts,
subtitle_directory,
subtitle_prefix,
language,
title,
default_video,
forced_video,
default_audio,
forced_audio,
default_subtitle,
forced_subtitle,
rearrange_streams,
crop,
output_directory,
denoise,
denoise_use_hw,
denoise_strength,
denoise_patch_size,
denoise_chroma_patch_size,
denoise_research_window,
denoise_chroma_research_window,
show,
season,
episode,
no_tmdb,
no_pattern,
dont_pass_dispositions,
no_prompt,
no_signature,
keep_mkvmerge_metadata,
nice,
cpu):
"""Batch conversion of audiovideo files in format suitable for web playback, e.g. jellyfin
Files found under PATHS will be converted according to parameters.
Filename extensions will be changed appropriately.
Suffices will we appended to filename in case of multiple created files
or if the filename has not changed."""
startTime = time.perf_counter()
context = ctx.obj
context['video_encoder'] = VideoEncoder.fromLabel(video_encoder)
targetFormat = FfxController.DEFAULT_FILE_FORMAT
targetExtension = FfxController.DEFAULT_FILE_EXTENSION
context['use_tmdb'] = not no_tmdb
context['use_pattern'] = not no_pattern
context['no_prompt'] = no_prompt
context['no_signature'] = no_signature
context['keep_mkvmerge_metadata'] = keep_mkvmerge_metadata
context['resource_limits'] = {}
context['resource_limits']['niceness'] = nice
context['resource_limits']['cpu_percent'] = cpu
context['import_subtitles'] = (subtitle_directory and subtitle_prefix)
if context['import_subtitles']:
context['subtitle_directory'] = subtitle_directory
context['subtitle_prefix'] = subtitle_prefix
existingSourcePaths = [p for p in paths if os.path.isfile(p) and p.split('.')[-1] in FfxController.INPUT_FILE_EXTENSIONS]
# CLI Overrides
cliOverrides = {}
if language:
cliOverrides['languages'] = {}
for overLang in language:
olTokens = overLang.split(':')
if len(olTokens) == 2:
try:
cliOverrides['languages'][int(olTokens[0])] = olTokens[1]
except ValueError:
ctx.obj['logger'].warning(f"Ignoring non-integer language index {olTokens[0]}")
continue
if title:
cliOverrides['titles'] = {}
for overTitle in title:
otTokens = overTitle.split(':')
if len(otTokens) == 2:
try:
cliOverrides['titles'][int(otTokens[0])] = otTokens[1]
except ValueError:
ctx.obj['logger'].warning(f"Ignoring non-integer title index {otTokens[0]}")
continue
if default_video != -1:
cliOverrides['default_video'] = default_video
if forced_video != -1:
cliOverrides['forced_video'] = forced_video
if default_audio != -1:
cliOverrides['default_audio'] = default_audio
if forced_audio != -1:
cliOverrides['forced_audio'] = forced_audio
if default_subtitle != -1:
cliOverrides['default_subtitle'] = default_subtitle
if forced_subtitle != -1:
cliOverrides['forced_subtitle'] = forced_subtitle
if show != -1 or season != -1 or episode != -1:
if len(existingSourcePaths) > 1:
context['logger'].warning(f"Ignoring TMDB show, season, episode overrides, not supported for multiple source files")
else:
cliOverrides['tmdb'] = {}
if show != -1:
cliOverrides['tmdb']['show'] = show
if season != -1:
cliOverrides['tmdb']['season'] = season
if episode != -1:
cliOverrides['tmdb']['episode'] = episode
if cliOverrides:
context['overrides'] = cliOverrides
if rearrange_streams:
try:
cliOverrides['stream_order'] = [int(si) for si in rearrange_streams.split(",")]
except ValueError as ve:
errorMessage = "Non-integer in rearrange stream parameter"
ctx.obj['logger'].error(errorMessage)
raise click.Abort()
ctx.obj['logger'].debug(f"\nVideo encoder: {video_encoder}")
qualityTokens = quality.split(',')
q_list = [q for q in qualityTokens if q.isnumeric()]
ctx.obj['logger'].debug(f"Qualities: {q_list}")
presetTokens = preset.split(',')
p_list = [p for p in presetTokens if p.isnumeric()]
ctx.obj['logger'].debug(f"Presets: {p_list}")
context['bitrates'] = {}
context['bitrates']['stereo'] = str(stereo_bitrate) if str(stereo_bitrate).endswith('k') else f"{stereo_bitrate}k"
context['bitrates']['ac3'] = str(ac3) if str(ac3).endswith('k') else f"{ac3}k"
context['bitrates']['dts'] = str(dts) if str(dts).endswith('k') else f"{dts}k"
ctx.obj['logger'].debug(f"Stereo bitrate: {context['bitrates']['stereo']}")
ctx.obj['logger'].debug(f"AC3 bitrate: {context['bitrates']['ac3']}")
ctx.obj['logger'].debug(f"DTS bitrate: {context['bitrates']['dts']}")
# Process crop parameters
context['perform_crop'] = (crop != 'none')
if context['perform_crop']:
cTokens = crop.split(',')
if cTokens and len(cTokens) == 2:
context['crop_start'] = int(cTokens[0])
context['crop_length'] = int(cTokens[1])
ctx.obj['logger'].debug(f"Crop start={context['crop_start']} length={context['crop_length']}")
tc = TmdbController() if context['use_tmdb'] else None
qualityKwargs = {QualityFilter.QUALITY_KEY: quality}
qf = QualityFilter(**qualityKwargs)
if context['video_encoder'] == VideoEncoder.AV1 and preset:
presetKwargs = {PresetFilter.PRESET_KEY: preset}
PresetFilter(**presetKwargs)
denoiseKwargs = {}
if denoise_strength:
denoiseKwargs[NlmeansFilter.STRENGTH_KEY] = denoise_strength
if denoise_patch_size:
denoiseKwargs[NlmeansFilter.PATCH_SIZE_KEY] = denoise_patch_size
if denoise_chroma_patch_size:
denoiseKwargs[NlmeansFilter.CHROMA_PATCH_SIZE_KEY] = denoise_chroma_patch_size
if denoise_research_window:
denoiseKwargs[NlmeansFilter.RESEARCH_WINDOW_KEY] = denoise_research_window
if denoise_chroma_research_window:
denoiseKwargs[NlmeansFilter.CHROMA_RESEARCH_WINDOW_KEY] = denoise_chroma_research_window
if denoise != 'none' or denoiseKwargs:
NlmeansFilter(**denoiseKwargs)
chainYield = list(qf.getChainYield())
ctx.obj['logger'].info(f"\nRunning {len(existingSourcePaths) * len(chainYield)} jobs")
jobIndex = 0
for sourcePath in existingSourcePaths:
# Separate basedir, basename and extension for current source file
sourceDirectory = os.path.dirname(sourcePath)
sourceFilename = os.path.basename(sourcePath)
sourcePathTokens = sourceFilename.split('.')
sourceFileBasename = '.'.join(sourcePathTokens[:-1])
sourceFilenameExtension = sourcePathTokens[-1]
ctx.obj['logger'].info(f"\nProcessing file {sourcePath}")
targetSuffices = {}
mediaFileProperties = FileProperties(context, sourceFilename)
ssc = ShiftedSeasonController(context)
showId = mediaFileProperties.getShowId()
#HINT: -1 if not set
if 'tmdb' in cliOverrides.keys() and 'season' in cliOverrides['tmdb']:
showSeason = cliOverrides['tmdb']['season']
else:
showSeason = mediaFileProperties.getSeason()
if 'tmdb' in cliOverrides.keys() and 'episode' in cliOverrides['tmdb']:
showEpisode = cliOverrides['tmdb']['episode']
else:
showEpisode = mediaFileProperties.getEpisode()
ctx.obj['logger'].debug(f"Season={showSeason} Episode={showEpisode}")
sourceMediaDescriptor = mediaFileProperties.getMediaDescriptor()
#HINT: This is None if the filename did not match anything in database
currentPattern = mediaFileProperties.getPattern() if context['use_pattern'] else None
ctx.obj['logger'].debug(f"Pattern matching: {'No' if currentPattern is None else 'Yes'}")
# Setup FfxController accordingly depending on pattern matching is enabled and a pattern was matched
if currentPattern is None:
checkUniqueDispositions(context, sourceMediaDescriptor)
currentShowDescriptor = None
if context['import_subtitles']:
sourceMediaDescriptor.importSubtitles(context['subtitle_directory'],
context['subtitle_prefix'],
showSeason,
showEpisode)
if cliOverrides:
sourceMediaDescriptor.applyOverrides(cliOverrides)
fc = FfxController(context, sourceMediaDescriptor)
else:
targetMediaDescriptor = currentPattern.getMediaDescriptor(ctx.obj)
checkUniqueDispositions(context, targetMediaDescriptor)
currentShowDescriptor = currentPattern.getShowDescriptor(ctx.obj)
if context['import_subtitles']:
targetMediaDescriptor.importSubtitles(context['subtitle_directory'],
context['subtitle_prefix'],
showSeason,
showEpisode)
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
if cliOverrides:
targetMediaDescriptor.applyOverrides(cliOverrides)
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
ctx.obj['logger'].debug(f"Input mapping tokens (2nd pass): {targetMediaDescriptor.getInputMappingTokens()}")
fc = FfxController(context, targetMediaDescriptor, sourceMediaDescriptor)
indexSeasonDigits = currentShowDescriptor.getIndexSeasonDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDEX_SEASON_DIGITS
indexEpisodeDigits = currentShowDescriptor.getIndexEpisodeDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDEX_EPISODE_DIGITS
indicatorSeasonDigits = currentShowDescriptor.getIndicatorSeasonDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDICATOR_SEASON_DIGITS
indicatorEpisodeDigits = currentShowDescriptor.getIndicatorEpisodeDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDICATOR_EPISODE_DIGITS
# Shift season and episode if defined for this show
if ('tmdb' not in cliOverrides.keys() and showId != -1
and showSeason != -1 and showEpisode != -1):
shiftedShowSeason, shiftedShowEpisode = ssc.shiftSeason(showId,
season=showSeason,
episode=showEpisode)
else:
shiftedShowSeason = showSeason
shiftedShowEpisode = showEpisode
# Assemble target filename accordingly depending on TMDB lookup is enabled
#HINT: -1 if not set
showId = cliOverrides['tmdb']['show'] if 'tmdb' in cliOverrides.keys() and 'show' in cliOverrides['tmdb'] else (-1 if currentShowDescriptor is None else currentShowDescriptor.getId())
if context['use_tmdb'] and showId != -1 and shiftedShowSeason != -1 and shiftedShowEpisode != -1:
ctx.obj['logger'].debug(f"Querying TMDB for show_id={showId} season={shiftedShowSeason} episode{shiftedShowEpisode}")
if currentPattern is None:
sName, showYear = tc.getShowNameAndYear(showId)
showName = filterFilename(sName)
showFilenamePrefix = f"{showName} ({str(showYear)})"
else:
showFilenamePrefix = currentShowDescriptor.getFilenamePrefix()
tmdbEpisodeResult = tc.queryEpisode(showId, shiftedShowSeason, shiftedShowEpisode)
ctx.obj['logger'].debug(f"tmdbEpisodeResult={tmdbEpisodeResult}")
if tmdbEpisodeResult:
substitutedEpisodeName = filterFilename(substituteTmdbFilename(tmdbEpisodeResult['name']))
sourceFileBasename = getEpisodeFileBasename(showFilenamePrefix,
substitutedEpisodeName,
shiftedShowSeason,
shiftedShowEpisode,
indexSeasonDigits,
indexEpisodeDigits,
indicatorSeasonDigits,
indicatorEpisodeDigits,
context=ctx.obj)
if label:
if shiftedShowSeason > -1 and shiftedShowEpisode > -1:
targetSuffices['se'] = f"S{shiftedShowSeason:0{indicatorSeasonDigits}d}E{shiftedShowEpisode:0{indicatorEpisodeDigits}d}"
elif shiftedShowEpisode > -1:
targetSuffices['se'] = f"E{shiftedShowEpisode:0{indicatorEpisodeDigits}d}"
else:
if 'se' in targetSuffices.keys():
del targetSuffices['se']
ctx.obj['logger'].debug(f"fileBasename={sourceFileBasename}")
for chainIteration in chainYield:
ctx.obj['logger'].debug(f"\nchain iteration: {chainIteration}\n")
# if len(q_list) > 1:
# targetSuffices['q'] = f"q{q}"
chainVariant = '-'.join([fy['variant'] for fy in chainIteration])
ctx.obj['logger'].debug(f"\nRunning job {jobIndex} file={sourcePath} variant={chainVariant}")
jobIndex += 1
ctx.obj['logger'].debug(f"label={label if label else 'Falsy'}")
ctx.obj['logger'].debug(f"sourceFileBasename={sourceFileBasename}")
# targetFileBasename = mediaFileProperties.assembleTargetFileBasename(label,
# q if len(q_list) > 1 else -1,
#
targetFileBasename = sourceFileBasename if context['use_tmdb'] and not label else label
targetFilenameTokens = [targetFileBasename]
if 'se' in targetSuffices.keys():
targetFilenameTokens += [targetSuffices['se']]
# if 'q' in targetSuffices.keys():
# targetFilenameTokens += [targetSuffices['q']]
for filterYield in chainIteration:
# filterIdentifier = filterYield['identifier']
# filterParameters = filterYield['parameters']
# filterSuffices = filterYield['suffices']
targetFilenameTokens += filterYield['suffices']
#TODO #387
# targetFilename = ((f"{sourceFileBasename}_q{q}" if len(q_list) > 1 else sourceFileBasename)
# if context['use_tmdb'] else targetFileBasename)
targetFilename = f"{'_'.join(targetFilenameTokens)}.{targetExtension}"
targetPath = os.path.join(output_directory if output_directory else sourceDirectory, targetFilename)
#TODO: target extension anpassen
ctx.obj['logger'].info(f"Creating file {targetFilename}")
fc.runJob(sourcePath,
targetPath,
targetFormat,
context['video_encoder'],
chainIteration)
#TODO: click.confirm('Warning! This file is not compliant to the defined source schema! Do you want to continue?', abort=True)
endTime = time.perf_counter()
ctx.obj['logger'].info(f"\nDONE\nTime elapsed {endTime - startTime}")
if __name__ == '__main__':
ffx()

View File

@@ -1,7 +1,10 @@
from textual.app import App
from .i18n import set_current_language, t
from .shows_screen import ShowsScreen
from .media_details_screen import MediaDetailsScreen
from .inspect_details_screen import InspectDetailsScreen
from .media_edit_screen import MediaEditScreen
from .screen_support import configure_screen_log_handler, set_screen_log_pane_enabled
class FfxApp(App):
@@ -9,8 +12,8 @@ class FfxApp(App):
TITLE = "FFX"
BINDINGS = [
("q", "quit()", "Quit"),
("h", "switch_mode('help')", "Help"),
("q", "quit()", t("Quit")),
("h", "switch_mode('help')", t("Help")),
]
@@ -19,6 +22,14 @@ class FfxApp(App):
# Data 'input' variable
self.context = context
set_current_language(self.context.get("language"))
debug_mode = bool(self.context.get("debug", False))
set_screen_log_pane_enabled(debug_mode)
configure_screen_log_handler(
self.context.get("logger"),
self,
enabled=debug_mode,
)
def on_mount(self) -> None:
@@ -28,11 +39,13 @@ class FfxApp(App):
if self.context['command'] == 'shows':
self.push_screen(ShowsScreen())
if self.context['command'] == 'inspect':
self.push_screen(MediaDetailsScreen())
if self.context['command'] == 'inspect':
self.push_screen(InspectDetailsScreen())
if self.context['command'] == 'edit':
self.push_screen(MediaEditScreen())
def getContext(self):
"""Data 'output' method"""
return self.context

View File

@@ -1,38 +1,52 @@
import os, click
import os, click, subprocess
from functools import lru_cache
from logging import Logger
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
from ffx.diagnostics import FfmpegCommandRunner
from ffx.media_descriptor import MediaDescriptor
from ffx.track_descriptor import TrackDescriptor
from ffx.audio_layout import AudioLayout
from ffx.track_type import TrackType
from ffx.track_codec import TrackCodec
from ffx.video_encoder import VideoEncoder
from ffx.process import executeProcess
from ffx.track_disposition import TrackDisposition
from ffx.track_codec import TrackCodec
from ffx.constants import DEFAULT_CROP_START, DEFAULT_CROP_LENGTH
from ffx.constants import (
DEFAULT_CONTAINER_EXTENSION,
DEFAULT_CONTAINER_FORMAT,
DEFAULT_VIDEO_ENCODER_LABEL,
DEFAULT_cut_start,
DEFAULT_cut_length,
FFMPEG_COMMAND_TOKENS,
FFMPEG_NULL_OUTPUT_TOKENS,
SUPPORTED_INPUT_FILE_EXTENSIONS,
)
from ffx.filter.quality_filter import QualityFilter
from ffx.filter.preset_filter import PresetFilter
from ffx.filter.crop_filter import CropFilter
from ffx.model.pattern import Pattern
class FfxController():
COMMAND_TOKENS = ['ffmpeg', '-y']
NULL_TOKENS = ['-f', 'null', '/dev/null'] # -f null /dev/null
COMMAND_TOKENS = list(FFMPEG_COMMAND_TOKENS)
NULL_TOKENS = list(FFMPEG_NULL_OUTPUT_TOKENS) # -f null /dev/null
TEMP_FILE_NAME = "ffmpeg2pass-0.log"
DEFAULT_VIDEO_ENCODER = VideoEncoder.VP9.label()
DEFAULT_VIDEO_ENCODER = DEFAULT_VIDEO_ENCODER_LABEL
DEFAULT_FILE_FORMAT = 'webm'
DEFAULT_FILE_EXTENSION = 'webm'
DEFAULT_FILE_FORMAT = DEFAULT_CONTAINER_FORMAT
DEFAULT_FILE_EXTENSION = DEFAULT_CONTAINER_EXTENSION
INPUT_FILE_EXTENSIONS = ['mkv', 'mp4', 'avi', 'flv', 'webm']
INPUT_FILE_EXTENSIONS = list(SUPPORTED_INPUT_FILE_EXTENSIONS)
CHANNEL_MAP_5_1 = 'FL-FL|FR-FR|FC-FC|LFE-LFE|SL-BL|SR-BR:5.1'
#!
SIGNATURE_TAGS = {'RECODED_WITH': 'FFX'}
# SIGNATURE_TAGS = {'RECODED_WITH': 'FFX'}
def __init__(self,
context : dict,
@@ -40,12 +54,64 @@ class FfxController():
sourceMediaDescriptor : MediaDescriptor = None):
self.__context = context
self.__sourceMediaDescriptor = sourceMediaDescriptor
self.__targetMediaDescriptor = targetMediaDescriptor
self.__sourceMediaDescriptor = sourceMediaDescriptor
self.__configurationData = self.__context['config'].getData()
self.__mdcs = MediaDescriptorChangeSet(context,
targetMediaDescriptor,
sourceMediaDescriptor)
self.__logger = context['logger']
self.__logger: Logger = context['logger']
self.__warnedH264Fallback = False
self.__ffmpegCommandRunner = FfmpegCommandRunner(context)
@staticmethod
@lru_cache(maxsize=None)
def isFfmpegEncoderAvailable(encoderName: str) -> bool:
completed = subprocess.run(
["ffmpeg", "-encoders"],
capture_output=True,
text=True,
check=False,
)
if completed.returncode != 0:
return False
resolvedEncoderName = str(encoderName).strip()
for line in completed.stdout.splitlines():
if not line.startswith(" "):
continue
tokens = line.split(maxsplit=2)
if len(tokens) >= 2 and tokens[1] == resolvedEncoderName:
return True
return False
@classmethod
def getSupportedSoftwareH264Encoder(cls) -> str | None:
if cls.isFfmpegEncoderAvailable("libx264"):
return "libx264"
if cls.isFfmpegEncoderAvailable("libopenh264"):
return "libopenh264"
return None
def executeCommandSequence(self, commandSequence):
if commandSequence and str(commandSequence[0]).strip() == "ffmpeg":
out, err, rc = self.__ffmpegCommandRunner.execute(
commandSequence,
timeoutSeconds=None,
)
else:
out, err, rc = executeProcess(commandSequence, context=self.__context)
if rc:
raise click.ClickException(f"Command resulted in error: rc={rc} error={err}")
return out, err, rc
def generateAV1Tokens(self, quality, preset, subIndex : int = 0):
@@ -55,6 +121,31 @@ class FfxController():
'-pix_fmt', 'yuv420p10le']
# -c:v libx264 -preset slow -crf 17
def generateH264Tokens(self, quality, subIndex : int = 0):
h264Encoder = self.getSupportedSoftwareH264Encoder()
if h264Encoder == "libx264":
return [f"-c:v:{int(subIndex)}", 'libx264',
"-preset", "slow",
'-crf', str(quality)]
if h264Encoder == "libopenh264":
if not self.__warnedH264Fallback:
self.__logger.warning(
"libx264 encoder unavailable; falling back to libopenh264 for H.264 encoding."
)
self.__warnedH264Fallback = True
return [f"-c:v:{int(subIndex)}", 'libopenh264',
'-pix_fmt', 'yuv420p']
raise click.ClickException(
"H.264 encoding requested but no supported software H.264 encoder is available. "
+ "Tried libx264 and libopenh264."
)
# -c:v:0 libvpx-vp9 -row-mt 1 -crf 32 -pass 1 -speed 4 -frame-parallel 0 -g 9999 -aq-mode 0
def generateVP9Pass1Tokens(self, quality, subIndex : int = 0):
@@ -82,33 +173,94 @@ class FfxController():
'-auto-alt-ref', '1',
'-lag-in-frames', '25']
def generateVideoCopyTokens(self, subIndex):
return [f"-c:v:{int(subIndex)}",
'copy']
def generateAudioCopyTokens(self, subIndex):
return [f"-c:a:{int(subIndex)}", 'copy']
def generateVideoCopyAllTokens(self):
if self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
return ["-c:v", "copy"]
return []
def generateAudioCopyAllTokens(self):
if self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.AUDIO):
return ["-c:a", "copy"]
return []
def generateSubtitleCopyTokens(self, subIndex):
return [f"-c:s:{int(subIndex)}", 'copy']
def generateAttachmentCopyTokens(self, subIndex):
return [f"-c:t:{int(subIndex)}", 'copy']
def generateCopyTokens(self):
copyTokens = []
for trackDescriptor in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
copyTokens += self.generateVideoCopyTokens(trackDescriptor.getSubIndex())
for trackDescriptor in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.AUDIO):
copyTokens += self.generateAudioCopyTokens(trackDescriptor.getSubIndex())
for trackDescriptor in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.SUBTITLE):
copyTokens += self.generateSubtitleCopyTokens(trackDescriptor.getSubIndex())
attachmentDescriptors = (
self.__sourceMediaDescriptor.getTrackDescriptors(trackType=TrackType.ATTACHMENT)
if self.__sourceMediaDescriptor is not None
else self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.ATTACHMENT)
)
for trackDescriptor in attachmentDescriptors:
copyTokens += self.generateAttachmentCopyTokens(trackDescriptor.getSubIndex())
return copyTokens
def generateCropTokens(self):
if 'crop_start' in self.__context.keys() and 'crop_length' in self.__context.keys():
cropStart = int(self.__context['crop_start'])
cropLength = int(self.__context['crop_length'])
if 'cut_start' in self.__context.keys() and 'cut_length' in self.__context.keys():
cropStart = int(self.__context['cut_start'])
cropLength = int(self.__context['cut_length'])
else:
cropStart = DEFAULT_CROP_START
cropLength = DEFAULT_CROP_LENGTH
cropStart = DEFAULT_cut_start
cropLength = DEFAULT_cut_length
return ['-ss', str(cropStart), '-t', str(cropLength)]
def generateOutputTokens(self, filePathBase, format = '', ext = ''):
outputFilePath = f"{filePathBase}{'.'+str(ext) if ext else ''}"
self.__logger.debug(f"FfxController.generateOutputTokens(): base='{filePathBase}' format='{format}' ext='{ext}'")
outputFilePath = f"{filePathBase}{('.'+str(ext)) if ext else ''}"
if format:
return ['-f', format, outputFilePath]
else:
return [outputFilePath]
def generateEncodingMetadataTags(self, videoEncoder: VideoEncoder, quality, preset) -> dict:
metadataTags = {}
if videoEncoder in (VideoEncoder.AV1, VideoEncoder.H264, VideoEncoder.VP9):
metadataTags["ENCODING_QUALITY"] = str(quality)
if videoEncoder == VideoEncoder.AV1:
metadataTags["ENCODING_PRESET"] = str(preset)
return metadataTags
def generateAudioEncodingTokens(self):
"""Generates ffmpeg options audio streams including channel remapping, codec and bitrate"""
audioTokens = []
targetAudioTrackDescriptors = [td for td in self.__targetMediaDescriptor.getAllTrackDescriptors() if td.getType() == TrackType.AUDIO]
# targetAudioTrackDescriptors = [td for td in self.__targetMediaDescriptor.getAllTrackDescriptors() if td.getType() == TrackType.AUDIO]
targetAudioTrackDescriptors = self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.AUDIO)
trackSubIndex = 0
for trackDescriptor in targetAudioTrackDescriptors:
@@ -144,136 +296,165 @@ class FfxController():
f"channelmap={FfxController.CHANNEL_MAP_5_1}",
f"-b:a:{trackSubIndex}",
self.__context['bitrates']['ac3']]
# -ac 5 ?
if trackAudioLayout == AudioLayout.LAYOUT_5_0:
audioTokens += [f"-c:a:{trackSubIndex}",
'libopus',
f"-filter:a:{trackSubIndex}",
'channelmap=channel_layout=5.0',
f"-b:a:{trackSubIndex}",
self.__context['bitrates']['ac3']]
trackSubIndex += 1
return audioTokens
# -disposition:s:0 default -disposition:s:1 0
def generateDispositionTokens(self):
targetTrackDescriptors = self.__targetMediaDescriptor.getAllTrackDescriptors()
sourceTrackDescriptors = ([] if self.__sourceMediaDescriptor is None
else self.__sourceMediaDescriptor.getAllTrackDescriptors())
dispositionTokens = []
for trackIndex in range(len(targetTrackDescriptors)):
td = targetTrackDescriptors[trackIndex]
#HINT: No dispositions for pgs subtitle tracks that have no external file source
if (td.getExternalSourceFilePath()
or td.getCodec() != TrackCodec.PGS):
subIndex = td.getSubIndex()
streamIndicator = td.getType().indicator()
sourceDispositionSet = sourceTrackDescriptors[td.getSourceIndex()].getDispositionSet() if sourceTrackDescriptors else set()
#TODO: Alles discarden was im targetDescriptor vorhanden ist (?)
sourceDispositionSet.discard(TrackDisposition.DEFAULT)
dispositionSet = td.getDispositionSet() | sourceDispositionSet
if dispositionSet:
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in dispositionSet])]
else:
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
return dispositionTokens
def generateMetadataTokens(self):
metadataTokens = []
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
removeTrackKeys = metadataConfiguration['streams']['remove'] if 'streams' in metadataConfiguration.keys() and 'remove' in metadataConfiguration['streams'].keys() else []
mediaTags = {k:v for k,v in self.__targetMediaDescriptor.getTags().items() if not k in removeGlobalKeys}
if (not 'no_signature' in self.__context.keys()
or not self.__context['no_signature']):
outputMediaTags = mediaTags | signatureTags
else:
outputMediaTags = mediaTags
for tagKey, tagValue in outputMediaTags.items():
metadataTokens += [f"-metadata:g",
f"{tagKey}={tagValue}"]
for removeKey in removeGlobalKeys:
metadataTokens += [f"-metadata:g",
f"{removeKey}="]
removeMkvmergeMetadata = (not 'keep_mkvmerge_metadata' in self.__context.keys()
or not self.__context['keep_mkvmerge_metadata'])
#HINT: With current ffmpeg version track metadata tags are not passed to the outfile
for td in self.__targetMediaDescriptor.getAllTrackDescriptors():
typeIndicator = td.getType().indicator()
subIndex = td.getSubIndex()
for tagKey, tagValue in td.getTags().items():
if not tagKey in removeTrackKeys:
metadataTokens += [f"-metadata:s:{typeIndicator}:{subIndex}",
f"{tagKey}={tagValue}"]
for removeKey in removeTrackKeys:
metadataTokens += [f"-metadata:s:{typeIndicator}:{subIndex}",
f"{removeKey}="]
return metadataTokens
def generateAudioProcessingTokens(self):
if self.__context.get('copy_audio', False):
return self.generateAudioCopyAllTokens()
return self.generateAudioEncodingTokens()
def runJob(self,
sourcePath,
targetPath,
targetFormat: str = '',
videoEncoder: VideoEncoder = VideoEncoder.VP9,
chainIteration: list = []):
chainIteration: list = [],
cropArguments: dict = {},
currentPattern: Pattern = None,
currentShowDescriptor = None):
# quality: int = DEFAULT_QUALITY,
# preset: int = DEFAULT_AV1_PRESET):
videoEncoder: VideoEncoder = self.__context.get('video_encoder', VideoEncoder.VP9)
self.__context['current_source_path'] = sourcePath
copyVideo = self.__context.get('copy_video', False) or videoEncoder == VideoEncoder.COPY
qualityFilters = [fy for fy in chainIteration if fy['identifier'] == 'quality']
presetFilters = [fy for fy in chainIteration if fy['identifier'] == 'preset']
cropFilters = [fy for fy in chainIteration if fy['identifier'] == 'crop']
denoiseFilters = [fy for fy in chainIteration if fy['identifier'] == 'nlmeans']
deinterlaceFilters = [fy for fy in chainIteration if fy['identifier'] == 'bwdif']
if copyVideo:
quality = None
self.__context['encoding_metadata_tags'] = {}
else:
if qualityFilters and (quality := qualityFilters[0]['parameters']['quality']):
self.__logger.info(f"Setting quality {quality} from command line")
elif currentPattern is not None and (quality := currentPattern.quality):
self.__logger.info(f"Setting quality {quality} from pattern")
elif currentShowDescriptor is not None and (quality := currentShowDescriptor.getQuality()):
self.__logger.info(f"Setting quality {quality} from show")
else:
quality = (QualityFilter.DEFAULT_H264_QUALITY
if (videoEncoder == VideoEncoder.H264)
else QualityFilter.DEFAULT_VP9_QUALITY)
self.__logger.info(f"Setting quality {quality} from default")
quality = qualityFilters[0]['parameters']['quality'] if qualityFilters else QualityFilter.DEFAULT_QUALITY
preset = presetFilters[0]['parameters']['preset'] if presetFilters else PresetFilter.DEFAULT_PRESET
if not copyVideo:
self.__context['encoding_metadata_tags'] = self.generateEncodingMetadataTags(
videoEncoder,
quality,
preset,
)
denoiseTokens = denoiseFilters[0]['tokens'] if denoiseFilters else []
filterParamTokens = []
if cropArguments and not copyVideo:
cropParams = (f"crop="
+ f"{cropArguments[CropFilter.OUTPUT_WIDTH_KEY]}"
+ f":{cropArguments[CropFilter.OUTPUT_HEIGHT_KEY]}"
+ f":{cropArguments[CropFilter.OFFSET_X_KEY]}"
+ f":{cropArguments[CropFilter.OFFSET_Y_KEY]}")
filterParamTokens.append(cropParams)
if not copyVideo:
filterParamTokens.extend(denoiseFilters[0]['tokens'] if denoiseFilters else [])
filterParamTokens.extend(deinterlaceFilters[0]['tokens'] if deinterlaceFilters else [])
deinterlaceFilters
filterTokens = ['-vf', ', '.join(filterParamTokens)] if filterParamTokens else []
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
if videoEncoder == VideoEncoder.COPY:
commandSequence = (commandTokens
+ self.__targetMediaDescriptor.getImportFileTokens()
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
+ self.__mdcs.generateDispositionTokens())
commandSequence += self.__mdcs.generateMetadataTokens()
commandSequence += self.generateCopyTokens()
if self.__context['perform_cut']:
commandSequence += self.generateCropTokens()
commandSequence += self.generateOutputTokens(targetPath,
targetFormat)
self.__logger.debug("FfxController.runJob(): Running command sequence")
if not self.__context['dry_run']:
self.executeCommandSequence(commandSequence)
return
if copyVideo:
commandSequence = (commandTokens
+ self.__targetMediaDescriptor.getImportFileTokens()
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
+ self.__mdcs.generateDispositionTokens())
commandSequence += self.__mdcs.generateMetadataTokens()
commandSequence += self.generateVideoCopyAllTokens()
commandSequence += self.generateAudioProcessingTokens()
if self.__context['perform_cut']:
commandSequence += self.generateCropTokens()
commandSequence += self.generateOutputTokens(targetPath,
targetFormat)
self.__logger.debug("FfxController.runJob(): Running command sequence")
if not self.__context['dry_run']:
self.executeCommandSequence(commandSequence)
return
if videoEncoder == VideoEncoder.AV1:
commandSequence = (commandTokens
+ self.__targetMediaDescriptor.getImportFileTokens()
+ self.__targetMediaDescriptor.getInputMappingTokens()
+ self.generateDispositionTokens())
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
+ self.__mdcs.generateDispositionTokens())
# Optional tokens
commandSequence += self.generateMetadataTokens()
commandSequence += denoiseTokens
commandSequence += self.__mdcs.generateMetadataTokens()
commandSequence += filterTokens
commandSequence += (self.generateAudioEncodingTokens()
+ self.generateAV1Tokens(int(quality), int(preset))
+ self.generateAudioEncodingTokens())
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec != TrackCodec.PNG:
commandSequence += self.generateAV1Tokens(int(quality), int(preset))
if self.__context['perform_crop']:
commandSequence += FfxController.generateCropTokens()
commandSequence += self.generateAudioProcessingTokens()
if self.__context['perform_cut']:
commandSequence += self.generateCropTokens()
commandSequence += self.generateOutputTokens(targetPath,
targetFormat)
@@ -281,7 +462,38 @@ class FfxController():
self.__logger.debug(f"FfxController.runJob(): Running command sequence")
if not self.__context['dry_run']:
executeProcess(commandSequence, context = self.__context)
self.executeCommandSequence(commandSequence)
if videoEncoder == VideoEncoder.H264:
commandSequence = (commandTokens
+ self.__targetMediaDescriptor.getImportFileTokens()
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
+ self.__mdcs.generateDispositionTokens())
# Optional tokens
commandSequence += self.__mdcs.generateMetadataTokens()
commandSequence += filterTokens
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec != TrackCodec.PNG:
commandSequence += self.generateH264Tokens(int(quality))
commandSequence += self.generateAudioProcessingTokens()
if self.__context['perform_cut']:
commandSequence += self.generateCropTokens()
commandSequence += self.generateOutputTokens(targetPath,
targetFormat)
self.__logger.debug(f"FfxController.runJob(): Running command sequence")
if not self.__context['dry_run']:
self.executeCommandSequence(commandSequence)
if videoEncoder == VideoEncoder.VP9:
@@ -294,11 +506,14 @@ class FfxController():
# the required bitrate for the second run is determined and recorded
# TODO: Results seems to be slightly better with first pass omitted,
# Confirm or find better filter settings for 2-pass
# commandSequence1 += self.__context['denoiser'].generateDenoiseTokens()
# commandSequence1 += self.__context['denoiser'].generatefilterTokens()
commandSequence1 += self.generateVP9Pass1Tokens(int(quality))
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec != TrackCodec.PNG:
commandSequence1 += self.generateVP9Pass1Tokens(int(quality))
if self.__context['perform_crop']:
if self.__context['perform_cut']:
commandSequence1 += self.generateCropTokens()
commandSequence1 += FfxController.NULL_TOKENS
@@ -309,20 +524,25 @@ class FfxController():
self.__logger.debug(f"FfxController.runJob(): Running command sequence 1")
if not self.__context['dry_run']:
executeProcess(commandSequence1, context = self.__context)
self.executeCommandSequence(commandSequence1)
commandSequence2 = (commandTokens
+ self.__targetMediaDescriptor.getImportFileTokens()
+ self.__targetMediaDescriptor.getInputMappingTokens()
+ self.generateDispositionTokens())
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
+ self.__mdcs.generateDispositionTokens())
# Optional tokens
commandSequence2 += self.generateMetadataTokens()
commandSequence2 += denoiseTokens
commandSequence2 += self.__mdcs.generateMetadataTokens()
commandSequence2 += filterTokens
commandSequence2 += self.generateVP9Pass2Tokens(int(quality)) + self.generateAudioEncodingTokens()
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec != TrackCodec.PNG:
commandSequence2 += self.generateVP9Pass2Tokens(int(quality))
if self.__context['perform_crop']:
commandSequence2 += self.generateAudioProcessingTokens()
if self.__context['perform_cut']:
commandSequence2 += self.generateCropTokens()
commandSequence2 += self.generateOutputTokens(targetPath,
@@ -331,9 +551,7 @@ class FfxController():
self.__logger.debug(f"FfxController.runJob(): Running command sequence 2")
if not self.__context['dry_run']:
out, err, rc = executeProcess(commandSequence2, context = self.__context)
if rc:
raise click.ClickException(f"Command resulted in error: rc={rc} error={err}")
self.executeCommandSequence(commandSequence2)
@@ -358,4 +576,4 @@ class FfxController():
str(length),
path]
out, err, rc = executeProcess(commandTokens, context = self.__context)
self.executeCommandSequence(commandTokens)

View File

@@ -1,23 +1,47 @@
import os, re, json
from .constants import (
DEFAULT_CROPDETECT_DURATION_SECONDS,
DEFAULT_CROPDETECT_SEEK_SECONDS,
FFMPEG_COMMAND_TOKENS,
FFMPEG_NULL_OUTPUT_TOKENS,
)
from .media_descriptor import MediaDescriptor
from .pattern_controller import PatternController
from ffx.filter.crop_filter import CropFilter
from .process import executeProcess
from ffx.model.pattern import Pattern
class FileProperties():
_cropdetect_cache: dict[tuple[str, int, int, int, int], dict[str, str]] = {}
FILE_EXTENSIONS = ['mkv', 'mp4', 'avi', 'flv', 'webm']
FFPROBE_COMMAND_TOKENS = ["ffprobe", "-hide_banner", "-show_format", "-show_streams", "-of", "json"]
SE_INDICATOR_PATTERN = '([sS][0-9]+[eE][0-9]+)'
SEASON_EPISODE_INDICATOR_MATCH = '[sS]([0-9]+)[eE]([0-9]+)'
EPISODE_INDICATOR_MATCH = '[eE]([0-9]+)'
CROPDETECT_PATTERN = 'crop=[0-9]+:[0-9]+:[0-9]+:[0-9]+$'
DEFAULT_INDEX_DIGITS = 3
@classmethod
def extractSeasonEpisodeValues(cls, sourceText: str) -> tuple[int | None, int] | None:
seasonEpisodeMatch = re.search(cls.SEASON_EPISODE_INDICATOR_MATCH, str(sourceText))
if seasonEpisodeMatch is not None:
return int(seasonEpisodeMatch.group(1)), int(seasonEpisodeMatch.group(2))
episodeMatch = re.search(cls.EPISODE_INDICATOR_MATCH, str(sourceText))
if episodeMatch is not None:
return None, int(episodeMatch.group(1))
return None
def __init__(self, context, sourcePath):
self.context = context
@@ -39,10 +63,19 @@ class FileProperties():
self.__sourceFileBasename = self.__sourceFilename
self.__sourceFilenameExtension = ''
self.__pc = PatternController(context)
self.__usePattern = bool(self.context.get('use_pattern', True))
self.__pc = (
PatternController(context)
if self.__usePattern and 'database' in self.context
else None
)
# Checking if database contains matching pattern
matchResult = self.__pc.matchFilename(self.__sourceFilename)
matchResult = (
self.__pc.matchFilename(self.__sourceFilename)
if self.__pc is not None
else {}
)
self.__logger.debug(f"FileProperties.__init__(): Match result: {matchResult}")
@@ -52,26 +85,67 @@ class FileProperties():
databaseMatchedGroups = matchResult['match'].groups()
self.__logger.debug(f"FileProperties.__init__(): Matched groups: {databaseMatchedGroups}")
seIndicator = databaseMatchedGroups[0]
se_match = re.search(FileProperties.SEASON_EPISODE_INDICATOR_MATCH, seIndicator)
e_match = re.search(FileProperties.EPISODE_INDICATOR_MATCH, seIndicator)
indicatorSource = databaseMatchedGroups[0]
else:
self.__logger.debug(f"FileProperties.__init__(): Checking file name for indicator {self.__sourceFilename}")
indicatorSource = self.__sourceFilename
se_match = re.search(FileProperties.SEASON_EPISODE_INDICATOR_MATCH, self.__sourceFilename)
e_match = re.search(FileProperties.EPISODE_INDICATOR_MATCH, self.__sourceFilename)
if se_match is not None:
self.__season = int(se_match.group(1))
self.__episode = int(se_match.group(2))
elif e_match is not None:
self.__season = -1
self.__episode = int(e_match.group(1))
else:
seasonEpisodeValues = self.extractSeasonEpisodeValues(indicatorSource)
if seasonEpisodeValues is None:
self.__season = -1
self.__episode = -1
else:
sourceSeason, sourceEpisode = seasonEpisodeValues
self.__season = -1 if sourceSeason is None else int(sourceSeason)
self.__episode = int(sourceEpisode)
self.__ffprobeData = None
def _getCropdetectWindow(self):
cropdetectContext = self.context.get('cropdetect', {})
seekSeconds = int(cropdetectContext.get('seek_seconds', DEFAULT_CROPDETECT_SEEK_SECONDS))
durationSeconds = int(cropdetectContext.get('duration_seconds', DEFAULT_CROPDETECT_DURATION_SECONDS))
if seekSeconds < 0:
raise ValueError("Crop detection seek seconds must be zero or greater.")
if durationSeconds <= 0:
raise ValueError("Crop detection duration seconds must be greater than zero.")
return seekSeconds, durationSeconds
def _getCropdetectCacheKey(self):
sourceStat = os.stat(self.__sourcePath)
seekSeconds, durationSeconds = self._getCropdetectWindow()
return (
os.path.abspath(self.__sourcePath),
sourceStat.st_mtime_ns,
sourceStat.st_size,
seekSeconds,
durationSeconds,
)
@classmethod
def _clear_cropdetect_cache(cls):
cls._cropdetect_cache.clear()
def _getFfprobeData(self):
if self.__ffprobeData is not None:
return self.__ffprobeData
ffprobeOutput, ffprobeError, returnCode = executeProcess(
FileProperties.FFPROBE_COMMAND_TOKENS + [self.__sourcePath]
)
if 'Invalid data found when processing input' in ffprobeError:
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
if returnCode != 0:
raise Exception(f"ffprobe returned with error {returnCode}")
self.__ffprobeData = json.loads(ffprobeOutput)
return self.__ffprobeData
def getFormatData(self):
@@ -94,22 +168,7 @@ class FileProperties():
}
}
"""
# ffprobe -hide_banner -show_format -of json
ffprobeOutput, ffprobeError, returnCode = executeProcess(["ffprobe",
"-hide_banner",
"-show_format",
"-of", "json",
self.__sourcePath]) #,
#context = self.context)
if 'Invalid data found when processing input' in ffprobeError:
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
if returnCode != 0:
raise Exception(f"ffprobe returned with error {returnCode}")
return json.loads(ffprobeOutput)['format']
return self._getFfprobeData()['format']
def getStreamData(self):
@@ -154,24 +213,64 @@ class FileProperties():
}
}
"""
return self._getFfprobeData()['streams']
# ffprobe -hide_banner -show_streams -of json
ffprobeOutput, ffprobeError, returnCode = executeProcess(["ffprobe",
"-hide_banner",
"-show_streams",
"-of", "json",
self.__sourcePath]) #,
#context = self.context)
if 'Invalid data found when processing input' in ffprobeError:
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
def findCropArguments(self):
""""""
cacheKey = self._getCropdetectCacheKey()
cachedCropArguments = FileProperties._cropdetect_cache.get(cacheKey)
if cachedCropArguments is not None:
self.__logger.debug(
"FileProperties.findCropArguments(): Reusing cached cropdetect result for %s",
self.__sourcePath,
)
return dict(cachedCropArguments)
seekSeconds, durationSeconds = self._getCropdetectWindow()
cropdetectCommand = (
list(FFMPEG_COMMAND_TOKENS)
+ ["-ss", str(seekSeconds), "-i", self.__sourcePath, "-t", str(durationSeconds), "-vf", "cropdetect"]
+ list(FFMPEG_NULL_OUTPUT_TOKENS)
)
_ffmpegOutput, ffmpegError, returnCode = executeProcess(cropdetectCommand, context=self.context)
errorLines = ffmpegError.split('\n')
crops = {}
for el in errorLines:
cropdetect_match = re.search(FileProperties.CROPDETECT_PATTERN, el)
if cropdetect_match is not None:
cropParam = str(cropdetect_match.group(0))
crops[cropParam] = crops.get(cropParam, 0) + 1
if crops:
cropString = max(crops.items(), key=lambda item: (item[1], item[0]))[0]
cropTokens = cropString.split('=')
cropValueTokens = cropTokens[1]
cropValues = cropValueTokens.split(':')
cropArguments = {
CropFilter.OUTPUT_WIDTH_KEY: cropValues[0],
CropFilter.OUTPUT_HEIGHT_KEY: cropValues[1],
CropFilter.OFFSET_X_KEY: cropValues[2],
CropFilter.OFFSET_Y_KEY: cropValues[3]
}
FileProperties._cropdetect_cache[cacheKey] = dict(cropArguments)
return cropArguments
if returnCode != 0:
raise Exception(f"ffprobe returned with error {returnCode}")
raise Exception(f"ffmpeg cropdetect returned with error {returnCode}")
return json.loads(ffprobeOutput)['streams']
FileProperties._cropdetect_cache[cacheKey] = {}
return {}
def getMediaDescriptor(self):

View File

@@ -0,0 +1,51 @@
import itertools
from .filter import Filter
class CropFilter(Filter):
IDENTIFIER = 'crop'
OUTPUT_WIDTH_KEY = 'output_width'
OUTPUT_HEIGHT_KEY = 'output_height'
OFFSET_X_KEY = 'x_offset'
OFFSET_Y_KEY = 'y_offset'
def __init__(self, **kwargs):
self.__outputWidth = int(kwargs.get(CropFilter.OUTPUT_WIDTH_KEY, 0))
self.__outputHeight = int(kwargs.get(CropFilter.OUTPUT_HEIGHT_KEY, 0))
self.__offsetX = int(kwargs.get(CropFilter.OFFSET_X_KEY, 0))
self.__offsetY = int(kwargs.get(CropFilter.OFFSET_Y_KEY, 0))
super().__init__(self)
def setArguments(self, **kwargs):
self.__outputWidth = int(kwargs.get(CropFilter.OUTPUT_WIDTH_KEY))
self.__outputHeight = int(kwargs.get(CropFilter.OUTPUT_HEIGHT_KEY))
self.__offsetX = int(kwargs.get(CropFilter.OFFSET_X_KEY,))
self.__offsetY = int(kwargs.get(CropFilter.OFFSET_Y_KEY,))
def getPayload(self):
payload = {'identifier': CropFilter.IDENTIFIER,
'parameters': {
CropFilter.OUTPUT_WIDTH_KEY: self.__outputWidth,
CropFilter.OUTPUT_HEIGHT_KEY: self.__outputHeight,
CropFilter.OFFSET_X_KEY: self.__offsetX,
CropFilter.OFFSET_Y_KEY: self.__offsetY
},
'suffices': [],
'variant': f"C{self.__outputWidth}-{self.__outputHeight}-{self.__offsetX}-{self.__offsetY}",
'tokens': ['crop='
+ f"{self.__outputWidth}"
+ f":{self.__outputHeight}"
+ f":{self.__offsetX}"
+ f":{self.__offsetY}"]}
return payload
def getYield(self):
yield self.getPayload()

View File

@@ -0,0 +1,140 @@
import itertools
from .filter import Filter
class DeinterlaceFilter(Filter):
IDENTIFIER = 'bwdif'
# DEFAULT_STRENGTH: float = 2.8
# DEFAULT_PATCH_SIZE: int = 13
# DEFAULT_CHROMA_PATCH_SIZE: int = 9
# DEFAULT_RESEARCH_WINDOW: int = 23
# DEFAULT_CHROMA_RESEARCH_WINDOW: int= 17
# STRENGTH_KEY = 'strength'
# PATCH_SIZE_KEY = 'patch_size'
# CHROMA_PATCH_SIZE_KEY = 'chroma_patch_size'
# RESEARCH_WINDOW_KEY = 'research_window'
# CHROMA_RESEARCH_WINDOW_KEY = 'chroma_research_window'
def __init__(self, **kwargs):
# self.__useHardware = kwargs.get('use_hardware', False)
# self.__strengthList = []
# strength = kwargs.get(NlmeansFilter.STRENGTH_KEY, '')
# if strength:
# strengthTokens = strength.split(',')
# for st in strengthTokens:
# try:
# strengthValue = float(st)
# except:
# raise ValueError('NlmeansFilter: Strength value has to be of type float')
# if strengthValue < 1.0 or strengthValue > 30.0:
# raise ValueError('NlmeansFilter: Strength value has to be between 1.0 and 30.0')
# self.__strengthList.append(strengthValue)
# else:
# self.__strengthList = [NlmeansFilter.DEFAULT_STRENGTH]
# self.__patchSizeList = []
# patchSize = kwargs.get(NlmeansFilter.PATCH_SIZE_KEY, '')
# if patchSize:
# patchSizeTokens = patchSize.split(',')
# for pst in patchSizeTokens:
# try:
# patchSizeValue = int(pst)
# except:
# raise ValueError('NlmeansFilter: Patch size value has to be of type int')
# if patchSizeValue < 0 or patchSizeValue > 99:
# raise ValueError('NlmeansFilter: Patch size value has to be between 0 and 99')
# if patchSizeValue % 2 == 0:
# raise ValueError('NlmeansFilter: Patch size value has to an odd number')
# self.__patchSizeList.append(patchSizeValue)
# else:
# self.__patchSizeList = [NlmeansFilter.DEFAULT_PATCH_SIZE]
# self.__chromaPatchSizeList = []
# chromaPatchSize = kwargs.get(NlmeansFilter.CHROMA_PATCH_SIZE_KEY, '')
# if chromaPatchSize:
# chromaPatchSizeTokens = chromaPatchSize.split(',')
# for cpst in chromaPatchSizeTokens:
# try:
# chromaPatchSizeValue = int(pst)
# except:
# raise ValueError('NlmeansFilter: Chroma patch size value has to be of type int')
# if chromaPatchSizeValue < 0 or chromaPatchSizeValue > 99:
# raise ValueError('NlmeansFilter: Chroma patch value has to be between 0 and 99')
# if chromaPatchSizeValue % 2 == 0:
# raise ValueError('NlmeansFilter: Chroma patch value has to an odd number')
# self.__chromaPatchSizeList.append(chromaPatchSizeValue)
# else:
# self.__chromaPatchSizeList = [NlmeansFilter.DEFAULT_CHROMA_PATCH_SIZE]
# self.__researchWindowList = []
# researchWindow = kwargs.get(NlmeansFilter.RESEARCH_WINDOW_KEY, '')
# if researchWindow:
# researchWindowTokens = researchWindow.split(',')
# for rwt in researchWindowTokens:
# try:
# researchWindowValue = int(rwt)
# except:
# raise ValueError('NlmeansFilter: Research window value has to be of type int')
# if researchWindowValue < 0 or researchWindowValue > 99:
# raise ValueError('NlmeansFilter: Research window value has to be between 0 and 99')
# if researchWindowValue % 2 == 0:
# raise ValueError('NlmeansFilter: Research window value has to an odd number')
# self.__researchWindowList.append(researchWindowValue)
# else:
# self.__researchWindowList = [NlmeansFilter.DEFAULT_RESEARCH_WINDOW]
# self.__chromaResearchWindowList = []
# chromaResearchWindow = kwargs.get(NlmeansFilter.CHROMA_RESEARCH_WINDOW_KEY, '')
# if chromaResearchWindow:
# chromaResearchWindowTokens = chromaResearchWindow.split(',')
# for crwt in chromaResearchWindowTokens:
# try:
# chromaResearchWindowValue = int(crwt)
# except:
# raise ValueError('NlmeansFilter: Chroma research window value has to be of type int')
# if chromaResearchWindowValue < 0 or chromaResearchWindowValue > 99:
# raise ValueError('NlmeansFilter: Chroma research window value has to be between 0 and 99')
# if chromaResearchWindowValue % 2 == 0:
# raise ValueError('NlmeansFilter: Chroma research window value has to an odd number')
# self.__chromaResearchWindowList.append(chromaResearchWindowValue)
# else:
# self.__chromaResearchWindowList = [NlmeansFilter.DEFAULT_CHROMA_RESEARCH_WINDOW]
super().__init__(self)
def getPayload(self):
# strength = iteration[0]
# patchSize = iteration[1]
# chromaPatchSize = iteration[2]
# researchWindow = iteration[3]
# chromaResearchWindow = iteration[4]
suffices = []
# filterName = 'nlmeans_opencl' if self.__useHardware else 'nlmeans'
payload = {'identifier': DeinterlaceFilter.IDENTIFIER,
'parameters': {},
'suffices': suffices,
'variant': f"DEINT",
'tokens': ['bwdif=mode=1']}
return payload
def getYield(self):
# for it in itertools.product(self.__strengthList,
# self.__patchSizeList,
# self.__chromaPatchSizeList,
# self.__researchWindowList,
# self.__chromaResearchWindowList):
yield self.getPayload()

View File

@@ -144,11 +144,11 @@ class NlmeansFilter(Filter):
'suffices': suffices,
'variant': f"DS{strength}-DP{patchSize}-DPC{chromaPatchSize}"
+ f"-DR{researchWindow}-DRC{chromaResearchWindow}",
'tokens': ['-vf', f"{filterName}=s={strength}"
+ f":p={patchSize}"
+ f":pc={chromaPatchSize}"
+ f":r={researchWindow}"
+ f":rc={chromaResearchWindow}"]}
'tokens': [f"{filterName}=s={strength}"
+ f":p={patchSize}"
+ f":pc={chromaPatchSize}"
+ f":r={researchWindow}"
+ f":rc={chromaResearchWindow}"]}
return payload

View File

@@ -1,18 +1,24 @@
import itertools
import click
from .filter import Filter
from ffx.video_encoder import VideoEncoder
class QualityFilter(Filter):
IDENTIFIER = 'quality'
DEFAULT_QUALITY = 32
DEFAULT_VP9_QUALITY = 32
DEFAULT_H264_QUALITY = 17
QUALITY_KEY = 'quality'
def __init__(self, **kwargs):
context = click.get_current_context().obj
self.__qualitiesList = []
qualities = kwargs.get(QualityFilter.QUALITY_KEY, '')
if qualities:
@@ -26,7 +32,9 @@ class QualityFilter(Filter):
raise ValueError('QualityFilter: Quality value has to be between 0 and 63')
self.__qualitiesList.append(qualityValue)
else:
self.__qualitiesList = [QualityFilter.DEFAULT_QUALITY]
self.__qualitiesList = [None]
super().__init__(self)
@@ -51,4 +59,4 @@ class QualityFilter(Filter):
def getYield(self):
for q in self.__qualitiesList:
yield self.getPayload(q)
yield self.getPayload(q)

View File

@@ -2,12 +2,30 @@ from textual.app import ComposeResult
from textual.screen import Screen
from textual.widgets import Footer, Placeholder
from .i18n import t
from .screen_support import build_screen_log_pane, go_back_or_exit
class HelpScreen(Screen):
BINDINGS = [
("escape", "back", t("Back")),
]
def __init__(self):
super().__init__()
context = self.app.getContext()
def compose(self) -> ComposeResult:
yield Placeholder("Help Screen")
# Row 1
yield Placeholder(t("Help Screen"))
yield build_screen_log_pane()
yield Footer()
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
def action_back(self):
go_back_or_exit(self)

View File

@@ -1,8 +1,12 @@
import re, logging
import re
from jinja2 import Environment, Undefined
from .constants import DEFAULT_OUTPUT_FILENAME_TEMPLATE
from .configuration_controller import ConfigurationController
from .logging_utils import get_ffx_logger
from .show_descriptor import ShowDescriptor
from enum import Enum
class EmptyStringUndefined(Undefined):
@@ -10,13 +14,69 @@ class EmptyStringUndefined(Undefined):
return ''
class LogLevel(Enum):
DEBUG = 'debug'
INFO = 'info'
WARNING = 'warning'
ERROR = 'error'
CRITICAL = 'critical'
DIFF_ADDED_KEY = 'added'
DIFF_REMOVED_KEY = 'removed'
DIFF_CHANGED_KEY = 'changed'
DIFF_UNCHANGED_KEY = 'unchanged'
FILENAME_FILTER_TRANSLATION = str.maketrans(
{
"/": "-",
":": ";",
"*": "",
"'": "",
"?": "#",
"": "",
"": "",
}
)
TMDB_FILLER_MARKERS = (" (*)", "(*)")
TMDB_EPISODE_RANGE_SUFFIX_REGEX = re.compile(r"\(([0-9]+)[-/]([0-9]+)\)$")
TMDB_EPISODE_PART_SUFFIX_REGEX = re.compile(r"\(([0-9]+)\)$")
RICH_COLOR_REGEX = re.compile(r"\[[a-z_]+\](.+)\[/[a-z_]+\]")
def dictDiff(a : dict, b : dict):
def dictDiff(a : dict, b : dict, ignoreKeys: list = [], removeKeys: list = []):
"""
ignoreKeys: Ignored keys are filtered from calculating diff at all
removeKeys: Override diff calculation to remove keys certainly
"""
a_filtered = {k:v for k,v in a.items() if not k in ignoreKeys}
b_filtered = {k:v for k,v in b.items() if not k in ignoreKeys and k not in removeKeys}
a_only = {k:v for k,v in a_filtered.items() if not k in b_filtered.keys()}
b_only = {k:v for k,v in b_filtered.items() if not k in a_filtered.keys()}
a_b = set(a_filtered.keys()) & set(b_filtered.keys())
changed = {k:b_filtered[k] for k in a_b if a_filtered[k] != b_filtered[k]}
unchanged = {k:b_filtered[k] for k in a_b if a_filtered[k] == b_filtered[k]}
diffResult = {}
if a_only:
diffResult[DIFF_REMOVED_KEY] = a_only
diffResult[DIFF_UNCHANGED_KEY] = unchanged
if b_only:
diffResult[DIFF_ADDED_KEY] = b_only
if changed:
diffResult[DIFF_CHANGED_KEY] = changed
return diffResult
def dictKeysDiff(a : dict, b : dict):
a_keys = set(a.keys())
b_keys = set(b.keys())
@@ -40,9 +100,10 @@ def dictDiff(a : dict, b : dict):
return diffResult
def dictCache(element: dict, cache: list = []):
for index in range(len(cache)):
diff = dictDiff(cache[index], element)
diff = dictKeysDiff(cache[index], element)
if not diff:
return index, cache
cache.append(element)
@@ -53,11 +114,13 @@ def setDiff(a : set, b : set) -> set:
a_only = a - b
b_only = b - a
a_and_b = a & b
diffResult = {}
if a_only:
diffResult[DIFF_REMOVED_KEY] = a_only
diffResult[DIFF_UNCHANGED_KEY] = a_and_b
if b_only:
diffResult[DIFF_ADDED_KEY] = b_only
@@ -67,7 +130,7 @@ def setDiff(a : set, b : set) -> set:
def permutateList(inputList: list, permutation: list):
# 0,1,2: ABC
# 0,2,1: ACB
# 0,2,1: ACBffmpeg:
# 1,2,0: BCA
pass
@@ -78,47 +141,45 @@ def filterFilename(fileName: str) -> str:
"""This filter replaces charactes from TMDB responses with characters
less problemating when using in filenames or removes them"""
fileName = str(fileName).replace('/', '-')
fileName = str(fileName).replace(':', ';')
fileName = str(fileName).replace('*', '')
fileName = str(fileName).replace("'", '')
fileName = str(fileName).replace("?", '#')
return fileName.strip()
return str(fileName).translate(FILENAME_FILTER_TRANSLATION).strip()
def substituteTmdbFilename(fileName: str) -> str:
"""If chaining this method with filterFilename use this one first as the latter will destroy some patterns"""
# This indicates filler episodes in TMDB episode names
fileName = str(fileName).replace(' (*)', '')
fileName = str(fileName).replace('(*)', '')
normalizedFileName = str(fileName)
# This indicates the index of multi-episode files
episodePartMatch = re.search("\\(([0-9]+)\\)$", fileName)
for fillerMarker in TMDB_FILLER_MARKERS:
normalizedFileName = normalizedFileName.replace(fillerMarker, '')
episodeRangeMatch = TMDB_EPISODE_RANGE_SUFFIX_REGEX.search(normalizedFileName)
if episodeRangeMatch is not None:
partFirstIndex, partLastIndex = episodeRangeMatch.groups()
return TMDB_EPISODE_RANGE_SUFFIX_REGEX.sub(
f"Teil {partFirstIndex}-{partLastIndex}",
normalizedFileName,
count=1,
)
episodePartMatch = TMDB_EPISODE_PART_SUFFIX_REGEX.search(normalizedFileName)
if episodePartMatch is not None:
partSuffix = str(episodePartMatch.group(0))
partIndex = episodePartMatch.groups()[0]
fileName = str(fileName).replace(partSuffix, f"Teil {partIndex}")
partIndex = episodePartMatch.group(1)
return TMDB_EPISODE_PART_SUFFIX_REGEX.sub(
f"Teil {partIndex}",
normalizedFileName,
count=1,
)
# Also multi-episodes with first and last episode index
episodePartMatch = re.search("\\(([0-9]+)[-\\/]([0-9]+)\\)$", fileName)
if episodePartMatch is not None:
partSuffix = str(episodePartMatch.group(0))
partFirstIndex = episodePartMatch.groups()[0]
partLastIndex = episodePartMatch.groups()[1]
fileName = str(fileName).replace(partSuffix, f"Teil {partFirstIndex}-{partLastIndex}")
return fileName
return normalizedFileName
def getEpisodeFileBasename(showName,
episodeName,
season,
episode,
indexSeasonDigits = 2,
indexEpisodeDigits = 2,
indicatorSeasonDigits = 2,
indicatorEpisodeDigits = 2,
indexSeasonDigits = None,
indexEpisodeDigits = None,
indicatorSeasonDigits = None,
indicatorEpisodeDigits = None,
context = None):
"""
One Piece:
@@ -150,12 +211,21 @@ def getEpisodeFileBasename(showName,
configData = cc.getData() if cc is not None else {}
outputFilenameTemplate = configData.get(ConfigurationController.OUTPUT_FILENAME_TEMPLATE_KEY,
DEFAULT_OUTPUT_FILENAME_TEMPLATE)
defaultDigitLengths = ShowDescriptor.getDefaultDigitLengths(context)
if indexSeasonDigits is None:
indexSeasonDigits = defaultDigitLengths[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]
if indexEpisodeDigits is None:
indexEpisodeDigits = defaultDigitLengths[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]
if indicatorSeasonDigits is None:
indicatorSeasonDigits = defaultDigitLengths[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]
if indicatorEpisodeDigits is None:
indicatorEpisodeDigits = defaultDigitLengths[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]
if context is not None and 'logger' in context.keys():
logger = context['logger']
else:
logger = logging.getLogger('FFX')
logger.addHandler(logging.NullHandler())
logger = get_ffx_logger()
indexSeparator = ' ' if indexSeasonDigits or indexEpisodeDigits else ''
@@ -185,3 +255,16 @@ def getEpisodeFileBasename(showName,
# return ''.join(filenameTokens)
def formatRichColor(text: str, color: str = None):
if color is None:
return text
else:
return f"[{color}]{text}[/{color}]"
def removeRichColor(text: str):
richColorMatch = RICH_COLOR_REGEX.search(str(text))
if richColorMatch is None:
return text
else:
return str(richColorMatch.group(1))

158
src/ffx/i18n.py Normal file
View File

@@ -0,0 +1,158 @@
from __future__ import annotations
import json
import os
from pathlib import Path
DEFAULT_LANGUAGE = "de"
SOURCE_LANGUAGE = "en"
SUPPORTED_LANGUAGES = {
"de": "Deutsch",
"en": "English",
"fr": "Français",
"ja": "日本語",
"nb": "Norsk bokmål",
"eo": "Esperanto",
"ta": "தமிழ்",
"pt": "Português",
"es": "Español",
}
LANGUAGE_ALIASES = {
"deu": "de",
"ger": "de",
"english": "en",
"eng": "en",
"fra": "fr",
"fre": "fr",
"french": "fr",
"jpn": "ja",
"japanese": "ja",
"nor": "nb",
"nob": "nb",
"no": "nb",
"nn": "nb",
"bokmal": "nb",
"norwegian": "nb",
"epo": "eo",
"esperanto": "eo",
"tam": "ta",
"tamil": "ta",
"por": "pt",
"portuguese": "pt",
"spa": "es",
"spanish": "es",
}
_catalog_cache: dict[str, dict] = {}
_current_language = DEFAULT_LANGUAGE
def _assets_directory() -> Path:
return Path(__file__).resolve().parents[2] / "assets" / "i18n"
def normalize_language_code(value: str | None) -> str | None:
if value is None:
return None
normalized = str(value).strip().replace("-", "_")
if not normalized:
return None
base_language = normalized.split(".")[0].split("_")[0].lower()
if base_language in SUPPORTED_LANGUAGES:
return base_language
return LANGUAGE_ALIASES.get(base_language)
def detect_system_language(env: dict[str, str] | None = None) -> str | None:
environment = env or os.environ
for key in ("LC_ALL", "LC_MESSAGES", "LANG"):
if language_code := normalize_language_code(environment.get(key)):
return language_code
return None
def get_default_config_path(home_directory: str | None = None) -> Path:
base_home = Path(home_directory or os.path.expanduser("~"))
return base_home / ".local" / "etc" / "ffx.json"
def read_configured_language(
config_path: str | os.PathLike | None = None,
*,
home_directory: str | None = None,
) -> str | None:
resolved_path = Path(config_path) if config_path is not None else get_default_config_path(home_directory)
if not resolved_path.is_file():
return None
try:
config_data = json.loads(resolved_path.read_text(encoding="utf-8"))
except (OSError, ValueError, TypeError):
return None
return normalize_language_code(config_data.get("language"))
def resolve_application_language(
*,
cli_language: str | None = None,
config_language: str | None = None,
system_language: str | None = None,
env: dict[str, str] | None = None,
) -> str:
for candidate in (
cli_language,
config_language,
system_language or detect_system_language(env),
):
if normalized := normalize_language_code(candidate):
return normalized
return DEFAULT_LANGUAGE
def set_current_language(language_code: str | None) -> str:
global _current_language
_current_language = normalize_language_code(language_code) or DEFAULT_LANGUAGE
return _current_language
def get_current_language() -> str:
return _current_language
def _load_catalog(language_code: str) -> dict:
normalized = normalize_language_code(language_code) or DEFAULT_LANGUAGE
if normalized not in _catalog_cache:
catalog_path = _assets_directory() / f"{normalized}.json"
if catalog_path.is_file():
_catalog_cache[normalized] = json.loads(catalog_path.read_text(encoding="utf-8"))
else:
_catalog_cache[normalized] = {"phrases": {}, "iso_languages": {}}
return _catalog_cache[normalized]
def _lookup_phrase(language_code: str, source_text: str) -> str | None:
phrases = _load_catalog(language_code).get("phrases", {})
return phrases.get(source_text)
def t(source_text: str, **kwargs) -> str:
translated = (
_lookup_phrase(get_current_language(), source_text)
or _lookup_phrase(SOURCE_LANGUAGE, source_text)
or source_text
)
return translated.format(**kwargs) if kwargs else translated
def translate_iso_language(member_name: str, fallback: str) -> str:
for language_code in (get_current_language(), SOURCE_LANGUAGE):
translations = _load_catalog(language_code).get("iso_languages", {})
if member_name in translations:
return str(translations[member_name])
return str(fallback)

View File

@@ -0,0 +1,603 @@
import re
import click
from rich.text import Text
from textual.containers import Grid
from textual.widgets import Button, Footer, Header, Input, Static
from textual.widgets._data_table import CellDoesNotExist
from ffx.file_properties import FileProperties
from ffx.helper import DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
from ffx.show_descriptor import ShowDescriptor
from ffx.track_descriptor import TrackDescriptor
from .i18n import t
from .media_workflow_screen_base import MediaWorkflowScreenBase
from .pattern_details_screen import PatternDetailsScreen
from .screen_support import (
add_auto_table_column,
build_screen_controllers,
build_screen_log_pane,
go_back_or_exit,
localized_column_width,
update_table_column_label,
)
from .show_details_screen import ShowDetailsScreen
class InspectDetailsScreen(MediaWorkflowScreenBase):
GRID_COLUMN_LABEL_MIN = 12
GRID_COLUMN_2 = 20
GRID_COLUMN_3 = 40
GRID_COLUMN_4 = "4fr"
GRID_COLUMN_5 = 10
GRID_COLUMN_6 = "5fr"
CSS = f"""
Grid {{
grid-size: 6 8;
grid-rows: 9 2 2 2 2 10 2 10;
grid-columns: {GRID_COLUMN_LABEL_MIN} {GRID_COLUMN_2} {GRID_COLUMN_3} {GRID_COLUMN_4} {GRID_COLUMN_5} {GRID_COLUMN_6};
height: 100%;
width: 100%;
min-width: 120;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}}
DataTable .datatable--cursor {{
background: darkorange;
color: black;
}}
DataTable .datatable--header {{
background: steelblue;
color: white;
}}
Input {{
border: none;
}}
Button {{
border: none;
}}
DataTable {{
min-height: 24;
width: 100%;
}}
.two {{
column-span: 2;
}}
.three {{
column-span: 3;
}}
.four {{
column-span: 4;
}}
.five {{
column-span: 5;
}}
#differences-table {{
row-span: 10;
}}
.yellow {{
tint: yellow 40%;
}}
"""
@classmethod
def _grid_columns_spec(cls, label_column_width: int | None = None) -> str:
return " ".join(
[
str(
cls.GRID_COLUMN_LABEL_MIN
if label_column_width is None
else int(label_column_width)
),
str(cls.GRID_COLUMN_2),
str(cls.GRID_COLUMN_3),
str(cls.GRID_COLUMN_4),
str(cls.GRID_COLUMN_5),
str(cls.GRID_COLUMN_6),
]
)
COMMAND_NAME = "inspect"
DIFFERENCES_COLUMN_LABEL = "Differences (file->db/output)"
BINDINGS = [
("escape", "back", t("Back")),
("q", "app.quit", t("Quit")),
("n", "new_pattern", t("New Pattern")),
("u", "update_pattern", t("Update Pattern")),
("e", "edit_pattern", t("Edit Pattern")),
]
def __init__(self):
self._showRowData: dict[object, ShowDescriptor | None] = {}
self._showSortColumnKey = None
self._showSortReverse = False
self._showColumnLabels: dict[object, str] = {}
super().__init__()
controllers = build_screen_controllers(
self.context,
pattern=True,
show=True,
track=True,
tag=True,
)
self._pc = controllers["pattern"]
self._sc = controllers["show"]
self._tc = controllers["track"]
self._tac = controllers["tag"]
self.reloadProperties(reset_draft=True)
def compose(self):
self._build_media_tags_table()
self._build_tracks_table()
self._build_differences_table()
yield Header()
with Grid(id="main_grid"):
self.showsTable = self._build_shows_table()
# Row 1
yield Static(t("Show"))
yield self.showsTable
yield Static(" ")
yield self.differencesTable
# Row 2
yield Static(" ", classes="five")
# Row 3
yield Static(" ")
yield Button(t("Substitute"), id="pattern_button")
yield Static(" ", classes="three")
# Row 4
yield Static(t("Pattern"))
yield Input(type="text", id="pattern_input", classes="three")
yield Static(" ")
# Row 5
yield Static(" ", classes="five")
# Row 6
yield Static(t("Media Tags"))
yield self.mediaTagsTable
yield Static(" ")
# Row 7
yield Static(" ", classes="five")
# Row 8
yield Static(t("Streams"))
yield self.tracksTable
yield Static(" ")
yield build_screen_log_pane()
yield Footer()
def _update_grid_layout(self) -> None:
leftColumnWidth = max(
localized_column_width(t("Show"), self.GRID_COLUMN_LABEL_MIN),
localized_column_width(t("Pattern"), self.GRID_COLUMN_LABEL_MIN),
localized_column_width(t("Media Tags"), self.GRID_COLUMN_LABEL_MIN),
localized_column_width(t("Streams"), self.GRID_COLUMN_LABEL_MIN),
)
grid = self.query_one("#main_grid", Grid)
grid.styles.grid_columns = self._grid_columns_spec(leftColumnWidth)
def action_back(self):
go_back_or_exit(self)
def getDisplayedMediaDescriptor(self):
if self._currentPattern is not None and self._targetMediaDescriptor is not None:
return self._targetMediaDescriptor
return self._sourceMediaDescriptor
def getTrackEditSourceDescriptor(self):
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
if (
selectedTrackDescriptor is None
or self._currentPattern is None
or self._targetMediaDescriptor is None
):
return selectedTrackDescriptor
for sourceTrackDescriptor in self._sourceMediaDescriptor.getTrackDescriptors():
if (
sourceTrackDescriptor.getSourceIndex()
== selectedTrackDescriptor.getSourceIndex()
and sourceTrackDescriptor.getType() == selectedTrackDescriptor.getType()
):
return sourceTrackDescriptor
return None
def _build_shows_table(self):
from textual.widgets import DataTable
showsTable = DataTable(classes="three")
idLabel = t("ID")
nameLabel = t("Name")
yearLabel = t("Year")
self._showColumnKeyId = add_auto_table_column(showsTable, idLabel)
self._showColumnKeyName = add_auto_table_column(showsTable, nameLabel)
self._showColumnKeyYear = add_auto_table_column(showsTable, yearLabel)
self._showColumnLabels = {
self._showColumnKeyId: idLabel,
self._showColumnKeyName: nameLabel,
self._showColumnKeyYear: yearLabel,
}
showsTable.cursor_type = "row"
return showsTable
def _get_selected_show_row_key(self):
try:
row_key, _ = self.showsTable.coordinate_to_cell_key(
self.showsTable.cursor_coordinate
)
return row_key
except CellDoesNotExist:
return None
def _move_show_cursor_to_row_key(self, row_key):
if row_key is None:
return
try:
row_index = int(self.showsTable.get_row_index(row_key))
except Exception:
return
self.showsTable.move_cursor(row=row_index)
def _sort_key_for_show_column(self, column_key):
if column_key == self._showColumnKeyId:
return lambda value: int(value) if str(value).strip().isdigit() else -1
if column_key == self._showColumnKeyYear:
return lambda value: int(value) if str(value).strip().isdigit() else -1
if column_key == self._showColumnKeyName:
return lambda value: str(value).casefold()
return None
def _update_show_header_labels(self):
if not hasattr(self, "showsTable"):
return
arrow_up = ""
arrow_down = ""
for column_key, base_label in self._showColumnLabels.items():
column = self.showsTable.columns.get(column_key)
if column is None:
continue
label_text = base_label
if column_key == self._showSortColumnKey:
label_text = (
f"{base_label} "
f"{arrow_down if self._showSortReverse else arrow_up}"
)
update_table_column_label(self.showsTable, column_key, Text(label_text))
def _apply_show_sort(self, *, preserve_row_key=None):
if self._showSortColumnKey is None:
self._update_show_header_labels()
return
self.showsTable.sort(
self._showSortColumnKey,
key=self._sort_key_for_show_column(self._showSortColumnKey),
reverse=self._showSortReverse,
)
self._move_show_cursor_to_row_key(preserve_row_key)
self._update_show_header_labels()
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
self._update_grid_layout()
if self._currentPattern is None:
self._add_show_row(None)
for show in self._sc.getAllShows():
self._add_show_row(show.getDescriptor(self.context))
self._showSortColumnKey = self._showColumnKeyName
self._apply_show_sort()
if self._currentPattern is not None:
showIdentifier = self._currentPattern.getShowId()
showRowIndex = self.getRowIndexFromShowId(showIdentifier)
if showRowIndex is not None:
self.showsTable.move_cursor(row=showRowIndex)
self.query_one("#pattern_input", Input).value = self._currentPattern.getPattern()
else:
self.query_one("#pattern_input", Input).value = self._mediaFilename
self.highlightPattern(True)
self.updateMediaTags()
self.updateTracks()
self.updateDifferences()
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button.id == "pattern_button":
pattern = self.query_one("#pattern_input", Input).value
patternMatch = re.search(FileProperties.SE_INDICATOR_PATTERN, pattern)
if patternMatch:
self.query_one("#pattern_input", Input).value = pattern.replace(
patternMatch.group(1),
FileProperties.SE_INDICATOR_PATTERN,
)
if event.button.id == "select_default_button":
if self.setSelectedTrackDefault():
self.updateTracks()
self.updateDifferences()
if event.button.id == "select_forced_button":
if self.setSelectedTrackForced():
self.updateTracks()
self.updateDifferences()
def on_data_table_header_selected(self, event) -> None:
if event.data_table is not self.showsTable:
return
selected_row_key = self._get_selected_show_row_key()
if self._showSortColumnKey == event.column_key:
self._showSortReverse = not self._showSortReverse
else:
self._showSortColumnKey = event.column_key
self._showSortReverse = False
self._apply_show_sort(preserve_row_key=selected_row_key)
def removeShow(self, showId: int = -1):
for row_key, show_descriptor in list(self._showRowData.items()):
if (
(showId == -1 and show_descriptor is None)
or (
show_descriptor is not None
and show_descriptor.getId() == showId
)
):
self.showsTable.remove_row(row_key)
self._showRowData.pop(row_key, None)
return
def getRowIndexFromShowId(self, showId: int = -1) -> int | None:
for row_key, show_descriptor in self._showRowData.items():
if (
(showId == -1 and show_descriptor is None)
or (
show_descriptor is not None
and show_descriptor.getId() == showId
)
):
return int(self.showsTable.get_row_index(row_key))
return None
def _add_show_row(self, show_descriptor: ShowDescriptor | None):
if show_descriptor is None:
row_key = self.showsTable.add_row(" ", t("<New show>"), " ")
else:
row_key = self.showsTable.add_row(
str(show_descriptor.getId()),
str(show_descriptor.getName()),
str(show_descriptor.getYear()),
)
self._showRowData[row_key] = show_descriptor
return row_key
def highlightPattern(self, state: bool):
patternInput = self.query_one("#pattern_input", Input)
patternInput.styles.background = "red" if state else None
def getSelectedShowDescriptor(self) -> ShowDescriptor | None:
try:
row_key, _ = self.showsTable.coordinate_to_cell_key(
self.showsTable.cursor_coordinate
)
if row_key is not None:
return self._showRowData.get(row_key)
except (CellDoesNotExist, AttributeError):
return None
return None
def getPatternObjFromInput(self):
patternObj = {}
try:
patternObj["show_id"] = self.getSelectedShowDescriptor().getId()
patternObj["pattern"] = str(self.query_one("#pattern_input", Input).value)
except Exception:
return {}
return patternObj
def handle_new_pattern(self, showDescriptor: ShowDescriptor):
if type(showDescriptor) is not ShowDescriptor:
raise TypeError(
"InspectDetailsScreen.handle_new_pattern(): Argument 'showDescriptor' has to be of type ShowDescriptor"
)
self.removeShow()
showRowIndex = self.getRowIndexFromShowId(showDescriptor.getId())
if showRowIndex is None:
row_key = self._add_show_row(showDescriptor)
self._apply_show_sort(preserve_row_key=row_key)
showRowIndex = self.getRowIndexFromShowId(showDescriptor.getId())
if showRowIndex is not None:
self.showsTable.move_cursor(row=showRowIndex)
patternObj = self.getPatternObjFromInput()
if patternObj:
mediaTags = {}
for tagKey, tagValue in self._sourceMediaDescriptor.getTags().items():
if (
tagKey not in self._ignoreGlobalKeys
and tagKey not in self._removeGlobalKeys
):
mediaTags[tagKey] = tagValue
patternId = self._pc.savePatternSchema(
patternObj,
trackDescriptors=self._sourceMediaDescriptor.getTrackDescriptors(),
mediaTags=mediaTags,
)
if patternId:
self.reloadProperties(reset_draft=True)
self.updateMediaTags()
self.updateTracks()
self.updateDifferences()
self.highlightPattern(False)
def action_new_pattern(self):
selectedShowDescriptor = self.getSelectedShowDescriptor()
if selectedShowDescriptor is None:
self.app.push_screen(ShowDetailsScreen(), self.handle_new_pattern)
else:
self.handle_new_pattern(selectedShowDescriptor)
def action_update_pattern(self):
if self._currentPattern is not None:
patternObj = self.getPatternObjFromInput()
if (
patternObj
and self._currentPattern.getPattern() != patternObj["pattern"]
):
updated = self._pc.updatePattern(
self._currentPattern.getId(),
patternObj,
)
if updated:
self.reloadProperties(reset_draft=True)
self.updateMediaTags()
self.updateTracks()
self.updateDifferences()
return updated
tagDifferences = self._mediaChangeSetObj.get(MediaDescriptorChangeSet.TAGS_KEY, {})
for addedTagKey in tagDifferences.get(DIFF_ADDED_KEY, {}).keys():
self._tac.deleteMediaTagByKey(self._currentPattern.getId(), addedTagKey)
for removedTagKey in tagDifferences.get(DIFF_REMOVED_KEY, {}).keys():
currentTags = self._sourceMediaDescriptor.getTags()
self._tac.updateMediaTag(
self._currentPattern.getId(),
removedTagKey,
currentTags[removedTagKey],
)
for changedTagKey in tagDifferences.get(DIFF_CHANGED_KEY, {}).keys():
currentTags = self._sourceMediaDescriptor.getTags()
self._tac.updateMediaTag(
self._currentPattern.getId(),
changedTagKey,
currentTags[changedTagKey],
)
trackDifferences = self._mediaChangeSetObj.get(MediaDescriptorChangeSet.TRACKS_KEY, {})
for trackDescriptor in trackDifferences.get(DIFF_ADDED_KEY, {}).values():
self._tc.addTrack(trackDescriptor, patternId=self._currentPattern.getId())
for trackDescriptor in trackDifferences.get(DIFF_REMOVED_KEY, {}).values():
self._tc.deleteTrack(trackDescriptor.getId())
for trackIndex, trackDiff in trackDifferences.get(DIFF_CHANGED_KEY, {}).items():
targetTracks = [
track
for track in self._targetMediaDescriptor.getTrackDescriptors()
if track.getIndex() == trackIndex
]
targetTrackId = targetTracks[0].getId() if targetTracks else None
targetTrackIndex = targetTracks[0].getIndex() if targetTracks else None
tagsDiff = trackDiff.get(TrackDescriptor.TAGS_KEY, {})
for tagKey, tagValue in tagsDiff.get(DIFF_ADDED_KEY, {}).items():
self._tac.updateTrackTag(targetTrackId, tagKey, tagValue)
for tagKey in tagsDiff.get(DIFF_REMOVED_KEY, {}).keys():
self._tac.deleteTrackTagByKey(targetTrackId, tagKey)
for tagKey, tagValue in tagsDiff.get(DIFF_CHANGED_KEY, {}).items():
self._tac.updateTrackTag(targetTrackId, tagKey, tagValue)
dispositionDiff = trackDiff.get(TrackDescriptor.DISPOSITION_SET_KEY, {})
for changedDisposition in dispositionDiff.get(DIFF_ADDED_KEY, set()):
if targetTrackIndex is not None:
self._tc.setDispositionState(
self._currentPattern.getId(),
targetTrackIndex,
changedDisposition,
True,
)
for changedDisposition in dispositionDiff.get(DIFF_REMOVED_KEY, set()):
if targetTrackIndex is not None:
self._tc.setDispositionState(
self._currentPattern.getId(),
targetTrackIndex,
changedDisposition,
False,
)
self.reloadProperties(reset_draft=True)
self.updateMediaTags()
self.updateTracks()
self.updateDifferences()
def action_edit_pattern(self):
patternObj = self.getPatternObjFromInput()
if patternObj.get("pattern"):
selectedPatternId = self._pc.findPattern(patternObj)
if selectedPatternId is None:
raise click.ClickException(
"InspectDetailsScreen.action_edit_pattern(): Pattern to edit has no id"
)
self.app.push_screen(
PatternDetailsScreen(
patternId=selectedPatternId,
showId=self.getSelectedShowDescriptor().getId(),
),
self.handle_edit_pattern,
)
def handle_edit_pattern(self, screenResult):
self.reloadProperties(reset_draft=True)
if self._currentPattern is not None:
self.query_one("#pattern_input", Input).value = self._currentPattern.getPattern()
self.updateMediaTags()
self.updateTracks()
self.updateDifferences()

View File

@@ -1,106 +1,226 @@
from enum import Enum
import difflib
from .i18n import translate_iso_language
class IsoLanguage(Enum):
AFRIKAANS = {"name": "Afrikaans", "iso639_1": "af", "iso639_2": "afr"}
ALBANIAN = {"name": "Albanian", "iso639_1": "sq", "iso639_2": "alb"}
ARABIC = {"name": "Arabic", "iso639_1": "ar", "iso639_2": "ara"}
ARMENIAN = {"name": "Armenian", "iso639_1": "hy", "iso639_2": "arm"}
AZERBAIJANI = {"name": "Azerbaijani", "iso639_1": "az", "iso639_2": "aze"}
BASQUE = {"name": "Basque", "iso639_1": "eu", "iso639_2": "baq"}
BELARUSIAN = {"name": "Belarusian", "iso639_1": "be", "iso639_2": "bel"}
BULGARIAN = {"name": "Bulgarian", "iso639_1": "bg", "iso639_2": "bul"}
CATALAN = {"name": "Catalan", "iso639_1": "ca", "iso639_2": "cat"}
CHINESE = {"name": "Chinese", "iso639_1": "zh", "iso639_2": "chi"}
CROATIAN = {"name": "Croatian", "iso639_1": "hr", "iso639_2": "hrv"}
CZECH = {"name": "Czech", "iso639_1": "cs", "iso639_2": "cze"}
DANISH = {"name": "Danish", "iso639_1": "da", "iso639_2": "dan"}
DUTCH = {"name": "Dutch", "iso639_1": "nl", "iso639_2": "dut"}
ENGLISH = {"name": "English", "iso639_1": "en", "iso639_2": "eng"}
ESTONIAN = {"name": "Estonian", "iso639_1": "et", "iso639_2": "est"}
FINNISH = {"name": "Finnish", "iso639_1": "fi", "iso639_2": "fin"}
FRENCH = {"name": "French", "iso639_1": "fr", "iso639_2": "fre"}
GEORGIAN = {"name": "Georgian", "iso639_1": "ka", "iso639_2": "geo"}
GERMAN = {"name": "German", "iso639_1": "de", "iso639_2": "ger"}
GREEK = {"name": "Greek", "iso639_1": "el", "iso639_2": "gre"}
HEBREW = {"name": "Hebrew", "iso639_1": "he", "iso639_2": "heb"}
HINDI = {"name": "Hindi", "iso639_1": "hi", "iso639_2": "hin"}
HUNGARIAN = {"name": "Hungarian", "iso639_1": "hu", "iso639_2": "hun"}
ICELANDIC = {"name": "Icelandic", "iso639_1": "is", "iso639_2": "ice"}
INDONESIAN = {"name": "Indonesian", "iso639_1": "id", "iso639_2": "ind"}
IRISH = {"name": "Irish", "iso639_1": "ga", "iso639_2": "gle"}
ITALIAN = {"name": "Italian", "iso639_1": "it", "iso639_2": "ita"}
JAPANESE = {"name": "Japanese", "iso639_1": "ja", "iso639_2": "jpn"}
KAZAKH = {"name": "Kazakh", "iso639_1": "kk", "iso639_2": "kaz"}
KOREAN = {"name": "Korean", "iso639_1": "ko", "iso639_2": "kor"}
LATIN = {"name": "Latin", "iso639_1": "la", "iso639_2": "lat"}
LATVIAN = {"name": "Latvian", "iso639_1": "lv", "iso639_2": "lav"}
LITHUANIAN = {"name": "Lithuanian", "iso639_1": "lt", "iso639_2": "lit"}
MACEDONIAN = {"name": "Macedonian", "iso639_1": "mk", "iso639_2": "mac"}
MALAY = {"name": "Malay", "iso639_1": "ms", "iso639_2": "may"}
MALTESE = {"name": "Maltese", "iso639_1": "mt", "iso639_2": "mlt"}
NORWEGIAN = {"name": "Norwegian", "iso639_1": "no", "iso639_2": "nor"}
PERSIAN = {"name": "Persian", "iso639_1": "fa", "iso639_2": "per"}
POLISH = {"name": "Polish", "iso639_1": "pl", "iso639_2": "pol"}
PORTUGUESE = {"name": "Portuguese", "iso639_1": "pt", "iso639_2": "por"}
ROMANIAN = {"name": "Romanian", "iso639_1": "ro", "iso639_2": "rum"}
RUSSIAN = {"name": "Russian", "iso639_1": "ru", "iso639_2": "rus"}
NORTHERN_SAMI = {"name": "Northern Sami", "iso639_1": "se", "iso639_2": "sme"}
SAMOAN = {"name": "Samoan", "iso639_1": "sm", "iso639_2": "smo"}
SANGO = {"name": "Sango", "iso639_1": "sg", "iso639_2": "sag"}
SANSKRIT = {"name": "Sanskrit", "iso639_1": "sa", "iso639_2": "san"}
SARDINIAN = {"name": "Sardinian", "iso639_1": "sc", "iso639_2": "srd"}
SERBIAN = {"name": "Serbian", "iso639_1": "sr", "iso639_2": "srp"}
SHONA = {"name": "Shona", "iso639_1": "sn", "iso639_2": "sna"}
SINDHI = {"name": "Sindhi", "iso639_1": "sd", "iso639_2": "snd"}
SINHALA = {"name": "Sinhala", "iso639_1": "si", "iso639_2": "sin"}
SLOVAK = {"name": "Slovak", "iso639_1": "sk", "iso639_2": "slk"}
SLOVENIAN = {"name": "Slovenian", "iso639_1": "sl", "iso639_2": "slv"}
SOMALI = {"name": "Somali", "iso639_1": "so", "iso639_2": "som"}
SOUTHERN_SOTHO = {"name": "Southern Sotho", "iso639_1": "st", "iso639_2": "sot"}
SPANISH = {"name": "Spanish", "iso639_1": "es", "iso639_2": "spa"}
SUNDANESE = {"name": "Sundanese", "iso639_1": "su", "iso639_2": "sun"}
SWAHILI = {"name": "Swahili", "iso639_1": "sw", "iso639_2": "swa"}
SWATI = {"name": "Swati", "iso639_1": "ss", "iso639_2": "ssw"}
SWEDISH = {"name": "Swedish", "iso639_1": "sv", "iso639_2": "swe"}
TAGALOG = {"name": "Tagalog", "iso639_1": "tl", "iso639_2": "tgl"}
TAMIL = {"name": "Tamil", "iso639_1": "ta", "iso639_2": "tam"}
THAI = {"name": "Thai", "iso639_1": "th", "iso639_2": "tha"}
TURKISH = {"name": "Turkish", "iso639_1": "tr", "iso639_2": "tur"}
UKRAINIAN = {"name": "Ukrainian", "iso639_1": "uk", "iso639_2": "ukr"}
URDU = {"name": "Urdu", "iso639_1": "ur", "iso639_2": "urd"}
VIETNAMESE = {"name": "Vietnamese", "iso639_1": "vi", "iso639_2": "vie"}
WELSH = {"name": "Welsh", "iso639_1": "cy", "iso639_2": "wel"}
ABKHAZIAN = {"name": "Abkhazian", "iso639_1": "ab", "iso639_2": ["abk"]}
AFAR = {"name": "Afar", "iso639_1": "aa", "iso639_2": ["aar"]}
AFRIKAANS = {"name": "Afrikaans", "iso639_1": "af", "iso639_2": ["afr"]}
AKAN = {"name": "Akan", "iso639_1": "ak", "iso639_2": ["aka"]}
ALBANIAN = {"name": "Albanian", "iso639_1": "sq", "iso639_2": ["sqi", "alb"]}
AMHARIC = {"name": "Amharic", "iso639_1": "am", "iso639_2": ["amh"]}
ARABIC = {"name": "Arabic", "iso639_1": "ar", "iso639_2": ["ara"]}
ARAGONESE = {"name": "Aragonese", "iso639_1": "an", "iso639_2": ["arg"]}
ARMENIAN = {"name": "Armenian", "iso639_1": "hy", "iso639_2": ["hye", "arm"]}
ASSAMESE = {"name": "Assamese", "iso639_1": "as", "iso639_2": ["asm"]}
AVARIC = {"name": "Avaric", "iso639_1": "av", "iso639_2": ["ava"]}
AVESTAN = {"name": "Avestan", "iso639_1": "ae", "iso639_2": ["ave"]}
AYMARA = {"name": "Aymara", "iso639_1": "ay", "iso639_2": ["aym"]}
AZERBAIJANI = {"name": "Azerbaijani", "iso639_1": "az", "iso639_2": ["aze"]}
BAMBARA = {"name": "Bambara", "iso639_1": "bm", "iso639_2": ["bam"]}
BASHKIR = {"name": "Bashkir", "iso639_1": "ba", "iso639_2": ["bak"]}
BASQUE = {"name": "Basque", "iso639_1": "eu", "iso639_2": ["eus", "baq"]}
BELARUSIAN = {"name": "Belarusian", "iso639_1": "be", "iso639_2": ["bel"]}
BENGALI = {"name": "Bengali", "iso639_1": "bn", "iso639_2": ["ben"]}
BISLAMA = {"name": "Bislama", "iso639_1": "bi", "iso639_2": ["bis"]}
BOKMAL = {"name": "Bokmål", "iso639_1": "nb", "iso639_2": ["nob"]}
BOSNIAN = {"name": "Bosnian", "iso639_1": "bs", "iso639_2": ["bos"]}
BRETON = {"name": "Breton", "iso639_1": "br", "iso639_2": ["bre"]}
BULGARIAN = {"name": "Bulgarian", "iso639_1": "bg", "iso639_2": ["bul"]}
BURMESE = {"name": "Burmese", "iso639_1": "my", "iso639_2": ["mya", "bur"]}
CATALAN = {"name": "Catalan", "iso639_1": "ca", "iso639_2": ["cat"]}
CHAMORRO = {"name": "Chamorro", "iso639_1": "ch", "iso639_2": ["cha"]}
CHECHEN = {"name": "Chechen", "iso639_1": "ce", "iso639_2": ["che"]}
CHICHEWA = {"name": "Chichewa", "iso639_1": "ny", "iso639_2": ["nya"]}
CHINESE = {"name": "Chinese", "iso639_1": "zh", "iso639_2": ["zho", "chi"]}
CHURCH_SLAVIC = {"name": "Church Slavic", "iso639_1": "cu", "iso639_2": ["chu"]}
CHUVASH = {"name": "Chuvash", "iso639_1": "cv", "iso639_2": ["chv"]}
CORNISH = {"name": "Cornish", "iso639_1": "kw", "iso639_2": ["cor"]}
CORSICAN = {"name": "Corsican", "iso639_1": "co", "iso639_2": ["cos"]}
CREE = {"name": "Cree", "iso639_1": "cr", "iso639_2": ["cre"]}
CROATIAN = {"name": "Croatian", "iso639_1": "hr", "iso639_2": ["hrv"]}
CZECH = {"name": "Czech", "iso639_1": "cs", "iso639_2": ["ces", "cze"]}
DANISH = {"name": "Danish", "iso639_1": "da", "iso639_2": ["dan"]}
DIVEHI = {"name": "Divehi", "iso639_1": "dv", "iso639_2": ["div"]}
DUTCH = {"name": "Dutch", "iso639_1": "nl", "iso639_2": ["nld", "dut"]}
DZONGKHA = {"name": "Dzongkha", "iso639_1": "dz", "iso639_2": ["dzo"]}
ENGLISH = {"name": "English", "iso639_1": "en", "iso639_2": ["eng"]}
ESPERANTO = {"name": "Esperanto", "iso639_1": "eo", "iso639_2": ["epo"]}
ESTONIAN = {"name": "Estonian", "iso639_1": "et", "iso639_2": ["est"]}
EWE = {"name": "Ewe", "iso639_1": "ee", "iso639_2": ["ewe"]}
FAROESE = {"name": "Faroese", "iso639_1": "fo", "iso639_2": ["fao"]}
FIJIAN = {"name": "Fijian", "iso639_1": "fj", "iso639_2": ["fij"]}
FINNISH = {"name": "Finnish", "iso639_1": "fi", "iso639_2": ["fin"]}
FRENCH = {"name": "French", "iso639_1": "fr", "iso639_2": ["fra", "fre"]}
FULAH = {"name": "Fulah", "iso639_1": "ff", "iso639_2": ["ful"]}
GALICIAN = {"name": "Galician", "iso639_1": "gl", "iso639_2": ["glg"]}
GANDA = {"name": "Ganda", "iso639_1": "lg", "iso639_2": ["lug"]}
GEORGIAN = {"name": "Georgian", "iso639_1": "ka", "iso639_2": ["kat", "geo"]}
GERMAN = {"name": "German", "iso639_1": "de", "iso639_2": ["deu", "ger"]}
GREEK = {"name": "Greek", "iso639_1": "el", "iso639_2": ["ell", "gre"]}
GUARANI = {"name": "Guarani", "iso639_1": "gn", "iso639_2": ["grn"]}
GUJARATI = {"name": "Gujarati", "iso639_1": "gu", "iso639_2": ["guj"]}
HAITIAN = {"name": "Haitian", "iso639_1": "ht", "iso639_2": ["hat"]}
HAUSA = {"name": "Hausa", "iso639_1": "ha", "iso639_2": ["hau"]}
HEBREW = {"name": "Hebrew", "iso639_1": "he", "iso639_2": ["heb"]}
HERERO = {"name": "Herero", "iso639_1": "hz", "iso639_2": ["her"]}
HINDI = {"name": "Hindi", "iso639_1": "hi", "iso639_2": ["hin"]}
HIRI_MOTU = {"name": "Hiri Motu", "iso639_1": "ho", "iso639_2": ["hmo"]}
HUNGARIAN = {"name": "Hungarian", "iso639_1": "hu", "iso639_2": ["hun"]}
ICELANDIC = {"name": "Icelandic", "iso639_1": "is", "iso639_2": ["isl", "ice"]}
IDO = {"name": "Ido", "iso639_1": "io", "iso639_2": ["ido"]}
IGBO = {"name": "Igbo", "iso639_1": "ig", "iso639_2": ["ibo"]}
INDONESIAN = {"name": "Indonesian", "iso639_1": "id", "iso639_2": ["ind"]}
INTERLINGUA = {"name": "Interlingua", "iso639_1": "ia", "iso639_2": ["ina"]}
INTERLINGUE = {"name": "Interlingue", "iso639_1": "ie", "iso639_2": ["ile"]}
INUKTITUT = {"name": "Inuktitut", "iso639_1": "iu", "iso639_2": ["iku"]}
INUPIAQ = {"name": "Inupiaq", "iso639_1": "ik", "iso639_2": ["ipk"]}
IRISH = {"name": "Irish", "iso639_1": "ga", "iso639_2": ["gle"]}
ITALIAN = {"name": "Italian", "iso639_1": "it", "iso639_2": ["ita"]}
JAPANESE = {"name": "Japanese", "iso639_1": "ja", "iso639_2": ["jpn"]}
JAVANESE = {"name": "Javanese", "iso639_1": "jv", "iso639_2": ["jav"]}
KALAALLISUT = {"name": "Kalaallisut", "iso639_1": "kl", "iso639_2": ["kal"]}
KANNADA = {"name": "Kannada", "iso639_1": "kn", "iso639_2": ["kan"]}
KANURI = {"name": "Kanuri", "iso639_1": "kr", "iso639_2": ["kau"]}
KASHMIRI = {"name": "Kashmiri", "iso639_1": "ks", "iso639_2": ["kas"]}
KAZAKH = {"name": "Kazakh", "iso639_1": "kk", "iso639_2": ["kaz"]}
KHMER = {"name": "Khmer", "iso639_1": "km", "iso639_2": ["khm"]}
KIKUYU = {"name": "Kikuyu", "iso639_1": "ki", "iso639_2": ["kik"]}
KINYARWANDA = {"name": "Kinyarwanda", "iso639_1": "rw", "iso639_2": ["kin"]}
KIRGHIZ = {"name": "Kirghiz", "iso639_1": "ky", "iso639_2": ["kir"]}
KOMI = {"name": "Komi", "iso639_1": "kv", "iso639_2": ["kom"]}
KONGO = {"name": "Kongo", "iso639_1": "kg", "iso639_2": ["kon"]}
KOREAN = {"name": "Korean", "iso639_1": "ko", "iso639_2": ["kor"]}
KUANYAMA = {"name": "Kuanyama", "iso639_1": "kj", "iso639_2": ["kua"]}
KURDISH = {"name": "Kurdish", "iso639_1": "ku", "iso639_2": ["kur"]}
LAO = {"name": "Lao", "iso639_1": "lo", "iso639_2": ["lao"]}
LATIN = {"name": "Latin", "iso639_1": "la", "iso639_2": ["lat"]}
LATVIAN = {"name": "Latvian", "iso639_1": "lv", "iso639_2": ["lav"]}
LIMBURGAN = {"name": "Limburgan", "iso639_1": "li", "iso639_2": ["lim"]}
LINGALA = {"name": "Lingala", "iso639_1": "ln", "iso639_2": ["lin"]}
LITHUANIAN = {"name": "Lithuanian", "iso639_1": "lt", "iso639_2": ["lit"]}
LUBA_KATANGA = {"name": "Luba-Katanga", "iso639_1": "lu", "iso639_2": ["lub"]}
LUXEMBOURGISH = {"name": "Luxembourgish", "iso639_1": "lb", "iso639_2": ["ltz"]}
MACEDONIAN = {"name": "Macedonian", "iso639_1": "mk", "iso639_2": ["mkd", "mac"]}
MALAGASY = {"name": "Malagasy", "iso639_1": "mg", "iso639_2": ["mlg"]}
MALAY = {"name": "Malay", "iso639_1": "ms", "iso639_2": ["msa", "may"]}
MALAYALAM = {"name": "Malayalam", "iso639_1": "ml", "iso639_2": ["mal"]}
MALTESE = {"name": "Maltese", "iso639_1": "mt", "iso639_2": ["mlt"]}
MANX = {"name": "Manx", "iso639_1": "gv", "iso639_2": ["glv"]}
MAORI = {"name": "Maori", "iso639_1": "mi", "iso639_2": ["mri", "mao"]}
MARATHI = {"name": "Marathi", "iso639_1": "mr", "iso639_2": ["mar"]}
MARSHALLESE = {"name": "Marshallese", "iso639_1": "mh", "iso639_2": ["mah"]}
MONGOLIAN = {"name": "Mongolian", "iso639_1": "mn", "iso639_2": ["mon"]}
NAURU = {"name": "Nauru", "iso639_1": "na", "iso639_2": ["nau"]}
NAVAJO = {"name": "Navajo", "iso639_1": "nv", "iso639_2": ["nav"]}
NDONGA = {"name": "Ndonga", "iso639_1": "ng", "iso639_2": ["ndo"]}
NEPALI = {"name": "Nepali", "iso639_1": "ne", "iso639_2": ["nep"]}
NORTH_NDEBELE = {"name": "North Ndebele", "iso639_1": "nd", "iso639_2": ["nde"]}
NORTHERN_SAMI = {"name": "Northern Sami", "iso639_1": "se", "iso639_2": ["sme"]}
NORWEGIAN = {"name": "Norwegian", "iso639_1": "no", "iso639_2": ["nor"]}
NORWEGIAN_NYNORSK = {"name": "Nynorsk", "iso639_1": "nn", "iso639_2": ["nno"]}
OCCITAN = {"name": "Occitan", "iso639_1": "oc", "iso639_2": ["oci"]}
OJIBWA = {"name": "Ojibwa", "iso639_1": "oj", "iso639_2": ["oji"]}
ORIYA = {"name": "Oriya", "iso639_1": "or", "iso639_2": ["ori"]}
OROMO = {"name": "Oromo", "iso639_1": "om", "iso639_2": ["orm"]}
OSSETIAN = {"name": "Ossetian", "iso639_1": "os", "iso639_2": ["oss"]}
PALI = {"name": "Pali", "iso639_1": "pi", "iso639_2": ["pli"]}
PANJABI = {"name": "Panjabi", "iso639_1": "pa", "iso639_2": ["pan"]}
PERSIAN = {"name": "Persian", "iso639_1": "fa", "iso639_2": ["fas", "per"]}
POLISH = {"name": "Polish", "iso639_1": "pl", "iso639_2": ["pol"]}
PORTUGUESE = {"name": "Portuguese", "iso639_1": "pt", "iso639_2": ["por"]}
PUSHTO = {"name": "Pushto", "iso639_1": "ps", "iso639_2": ["pus"]}
QUECHUA = {"name": "Quechua", "iso639_1": "qu", "iso639_2": ["que"]}
ROMANIAN = {"name": "Romanian", "iso639_1": "ro", "iso639_2": ["ron", "rum"]}
ROMANSH = {"name": "Romansh", "iso639_1": "rm", "iso639_2": ["roh"]}
RUNDI = {"name": "Rundi", "iso639_1": "rn", "iso639_2": ["run"]}
RUSSIAN = {"name": "Russian", "iso639_1": "ru", "iso639_2": ["rus"]}
SAMOAN = {"name": "Samoan", "iso639_1": "sm", "iso639_2": ["smo"]}
SANGO = {"name": "Sango", "iso639_1": "sg", "iso639_2": ["sag"]}
SANSKRIT = {"name": "Sanskrit", "iso639_1": "sa", "iso639_2": ["san"]}
SARDINIAN = {"name": "Sardinian", "iso639_1": "sc", "iso639_2": ["srd"]}
SCOTTISH_GAELIC = {"name": "Scottish Gaelic", "iso639_1": "gd", "iso639_2": ["gla"]}
SERBIAN = {"name": "Serbian", "iso639_1": "sr", "iso639_2": ["srp"]}
SHONA = {"name": "Shona", "iso639_1": "sn", "iso639_2": ["sna"]}
SICHUAN_YI = {"name": "Sichuan Yi", "iso639_1": "ii", "iso639_2": ["iii"]}
SINDHI = {"name": "Sindhi", "iso639_1": "sd", "iso639_2": ["snd"]}
SINHALA = {"name": "Sinhala", "iso639_1": "si", "iso639_2": ["sin"]}
SLOVAK = {"name": "Slovak", "iso639_1": "sk", "iso639_2": ["slk", "slo"]}
SLOVENIAN = {"name": "Slovenian", "iso639_1": "sl", "iso639_2": ["slv"]}
SOMALI = {"name": "Somali", "iso639_1": "so", "iso639_2": ["som"]}
SOUTH_NDEBELE = {"name": "South Ndebele", "iso639_1": "nr", "iso639_2": ["nbl"]}
SOUTHERN_SOTHO = {"name": "Southern Sotho", "iso639_1": "st", "iso639_2": ["sot"]}
SPANISH = {"name": "Spanish", "iso639_1": "es", "iso639_2": ["spa"]}
SUNDANESE = {"name": "Sundanese", "iso639_1": "su", "iso639_2": ["sun"]}
SWAHILI = {"name": "Swahili", "iso639_1": "sw", "iso639_2": ["swa"]}
SWATI = {"name": "Swati", "iso639_1": "ss", "iso639_2": ["ssw"]}
SWEDISH = {"name": "Swedish", "iso639_1": "sv", "iso639_2": ["swe"]}
TAGALOG = {"name": "Tagalog", "iso639_1": "tl", "iso639_2": ["tgl"]}
TAHITIAN = {"name": "Tahitian", "iso639_1": "ty", "iso639_2": ["tah"]}
TAJIK = {"name": "Tajik", "iso639_1": "tg", "iso639_2": ["tgk"]}
TAMIL = {"name": "Tamil", "iso639_1": "ta", "iso639_2": ["tam"]}
TATAR = {"name": "Tatar", "iso639_1": "tt", "iso639_2": ["tat"]}
TELUGU = {"name": "Telugu", "iso639_1": "te", "iso639_2": ["tel"]}
THAI = {"name": "Thai", "iso639_1": "th", "iso639_2": ["tha"]}
TIBETAN = {"name": "Tibetan", "iso639_1": "bo", "iso639_2": ["bod", "tib"]}
TIGRINYA = {"name": "Tigrinya", "iso639_1": "ti", "iso639_2": ["tir"]}
TONGA = {"name": "Tonga", "iso639_1": "to", "iso639_2": ["ton"]}
TSONGA = {"name": "Tsonga", "iso639_1": "ts", "iso639_2": ["tso"]}
TSWANA = {"name": "Tswana", "iso639_1": "tn", "iso639_2": ["tsn"]}
TURKISH = {"name": "Turkish", "iso639_1": "tr", "iso639_2": ["tur"]}
TURKMEN = {"name": "Turkmen", "iso639_1": "tk", "iso639_2": ["tuk"]}
TWI = {"name": "Twi", "iso639_1": "tw", "iso639_2": ["twi"]}
UIGHUR = {"name": "Uighur", "iso639_1": "ug", "iso639_2": ["uig"]}
UKRAINIAN = {"name": "Ukrainian", "iso639_1": "uk", "iso639_2": ["ukr"]}
URDU = {"name": "Urdu", "iso639_1": "ur", "iso639_2": ["urd"]}
UZBEK = {"name": "Uzbek", "iso639_1": "uz", "iso639_2": ["uzb"]}
VENDA = {"name": "Venda", "iso639_1": "ve", "iso639_2": ["ven"]}
VIETNAMESE = {"name": "Vietnamese", "iso639_1": "vi", "iso639_2": ["vie"]}
VOLAPUK = {"name": "Volapük", "iso639_1": "vo", "iso639_2": ["vol"]}
WALLOON = {"name": "Walloon", "iso639_1": "wa", "iso639_2": ["wln"]}
WELSH = {"name": "Welsh", "iso639_1": "cy", "iso639_2": ["cym", "wel"]}
WESTERN_FRISIAN = {"name": "Western Frisian", "iso639_1": "fy", "iso639_2": ["fry"]}
WOLOF = {"name": "Wolof", "iso639_1": "wo", "iso639_2": ["wol"]}
XHOSA = {"name": "Xhosa", "iso639_1": "xh", "iso639_2": ["xho"]}
YIDDISH = {"name": "Yiddish", "iso639_1": "yi", "iso639_2": ["yid"]}
YORUBA = {"name": "Yoruba", "iso639_1": "yo", "iso639_2": ["yor"]}
ZHUANG = {"name": "Zhuang", "iso639_1": "za", "iso639_2": ["zha"]}
ZULU = {"name": "Zulu", "iso639_1": "zu", "iso639_2": ["zul"]}
UNDEFINED = {"name": "undefined", "iso639_1": "xx", "iso639_2": "und"}
FILIPINO = {"name": "Filipino", "iso639_1": "tl", "iso639_2": ["fil"]}
UNDEFINED = {"name": "undefined", "iso639_1": "xx", "iso639_2": ["und"]}
@staticmethod
def find(label : str):
closestMatches = difflib.get_close_matches(label, [l.value["name"] for l in IsoLanguage], n=1)
candidate_map = {}
for language in IsoLanguage:
candidate_map[language.value["name"]] = language
candidate_map[translate_iso_language(language.name, language.value["name"])] = language
closestMatches = difflib.get_close_matches(label, list(candidate_map.keys()), n=1)
if closestMatches:
foundLangs = [l for l in IsoLanguage if l.value['name'] == closestMatches[0]]
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
return candidate_map.get(closestMatches[0], IsoLanguage.UNDEFINED)
else:
return IsoLanguage.UNDEFINED
@staticmethod
def findThreeLetter(theeLetter : str):
foundLangs = [l for l in IsoLanguage if l.value['iso639_2'] == str(theeLetter)]
foundLangs = [l for l in IsoLanguage if str(theeLetter) in l.value["iso639_2"]]
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
def label(self):
return str(self.value['name'])
return str(translate_iso_language(self.name, self.value["name"]))
def twoLetter(self):
return str(self.value['iso639_1'])
return str(self.value["iso639_1"])
def threeLetter(self):
return str(self.value['iso639_2'])
return str(self.value["iso639_2"][0])

97
src/ffx/logging_utils.py Normal file
View File

@@ -0,0 +1,97 @@
import logging
import os
FFX_LOGGER_NAME = "FFX"
CONSOLE_HANDLER_NAME = "ffx-console"
FILE_HANDLER_NAME = "ffx-file"
MUTED_CONSOLE_LEVEL = logging.CRITICAL + 1
def get_ffx_logger(name: str = FFX_LOGGER_NAME) -> logging.Logger:
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
if not logger.handlers:
logger.addHandler(logging.NullHandler())
return logger
def configure_ffx_logger(
log_file_path: str,
file_level: int,
console_level: int,
name: str = FFX_LOGGER_NAME,
) -> logging.Logger:
logger = get_ffx_logger(name)
logger.propagate = False
for handler in list(logger.handlers):
if isinstance(handler, logging.NullHandler):
logger.removeHandler(handler)
console_handler = next(
(handler for handler in logger.handlers if handler.get_name() == CONSOLE_HANDLER_NAME),
None,
)
if console_handler is None:
console_handler = logging.StreamHandler()
console_handler.set_name(CONSOLE_HANDLER_NAME)
logger.addHandler(console_handler)
console_handler.setLevel(console_level)
console_handler.setFormatter(logging.Formatter("%(message)s"))
normalized_log_path = os.path.abspath(log_file_path)
file_handler = next(
(handler for handler in logger.handlers if handler.get_name() == FILE_HANDLER_NAME),
None,
)
if (
file_handler is not None
and os.path.abspath(file_handler.baseFilename) != normalized_log_path
):
logger.removeHandler(file_handler)
file_handler.close()
file_handler = None
if file_handler is None:
file_handler = logging.FileHandler(normalized_log_path)
file_handler.set_name(FILE_HANDLER_NAME)
logger.addHandler(file_handler)
file_handler.setLevel(file_level)
file_handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
return logger
def set_ffx_console_logging_enabled(
logger: logging.Logger | None,
*,
enabled: bool,
):
if logger is None:
return None
console_handler = next(
(handler for handler in logger.handlers if handler.get_name() == CONSOLE_HANDLER_NAME),
None,
)
if console_handler is None:
return None
if enabled:
saved_level = getattr(console_handler, "_ffx_saved_level", None)
if saved_level is not None:
console_handler.setLevel(saved_level)
delattr(console_handler, "_ffx_saved_level")
return console_handler
if not hasattr(console_handler, "_ffx_saved_level"):
console_handler._ffx_saved_level = console_handler.level
console_handler.setLevel(MUTED_CONSOLE_LEVEL)
return console_handler

View File

@@ -25,14 +25,14 @@ class MediaController():
pid = int(patternId)
s = self.Session()
q = s.query(Pattern).filter(Pattern.id == pid)
pattern = s.query(Pattern).filter(Pattern.id == pid).first()
if q.count():
pattern = q.first
if pattern is not None:
for mediaTagKey, mediaTagValue in mediaDescriptor.getTags():
self.__tac.updateMediaTag(pid, mediaTagKey, mediaTagValue)
for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
# for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
for trackDescriptor in mediaDescriptor.getTrackDescriptors():
self.__tc.addTrack(trackDescriptor, patternId = pid)
s.commit()

View File

@@ -1,7 +1,8 @@
import os, re, click, logging
import os, re, click
from typing import List, Self
from ffx.attachment_format import AttachmentFormat
from ffx.track_type import TrackType
from ffx.iso_language import IsoLanguage
@@ -9,8 +10,7 @@ from ffx.track_disposition import TrackDisposition
from ffx.track_codec import TrackCodec
from ffx.track_descriptor import TrackDescriptor
from ffx.helper import dictDiff, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY
from ffx.logging_utils import get_ffx_logger
class MediaDescriptor:
@@ -22,6 +22,7 @@ class MediaDescriptor:
TRACKS_KEY = "tracks"
TRACK_DESCRIPTOR_LIST_KEY = "track_descriptors"
ATTACHMENT_DESCRIPTOR_LIST_KEY = "attachment_descriptors"
CLEAR_TAGS_FLAG_KEY = "clear_tags"
FFPROBE_DISPOSITION_KEY = "disposition"
@@ -31,7 +32,9 @@ class MediaDescriptor:
#407 remove as well
EXCLUDED_MEDIA_TAGS = ["creation_time"]
SEASON_EPISODE_STREAM_LANGUAGE_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
STREAM_LANGUAGE_DISPOSITIONS_MATCH = '([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
SUBTITLE_FILE_EXTENSION = 'vtt'
def __init__(self, **kwargs):
@@ -45,8 +48,7 @@ class MediaDescriptor:
self.__logger = self.__context['logger']
else:
self.__context = {}
self.__logger = logging.getLogger('FFX')
self.__logger.addHandler(logging.NullHandler())
self.__logger = get_ffx_logger()
if MediaDescriptor.TAGS_KEY in kwargs.keys():
if type(kwargs[MediaDescriptor.TAGS_KEY]) is not dict:
@@ -69,9 +71,9 @@ class MediaDescriptor:
raise TypeError(
f"TrackDesciptor.__init__(): All elements of argument list {MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY} are required to be of type TrackDescriptor"
)
self.__trackDescriptors = kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY]
self.__trackDescriptors: List[TrackDescriptor] = kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY]
else:
self.__trackDescriptors = []
self.__trackDescriptors: List[TrackDescriptor] = []
def setTrackLanguage(self, language: str, index: int, trackType: TrackType = None):
@@ -107,14 +109,16 @@ class MediaDescriptor:
def setDefaultSubTrack(self, trackType: TrackType, subIndex: int):
for t in self.getAllTrackDescriptors():
# for t in self.getAllTrackDescriptors():
for t in self.getTrackDescriptors():
if t.getType() == trackType:
t.setDispositionFlag(
TrackDisposition.DEFAULT, t.getSubIndex() == int(subIndex)
)
def setForcedSubTrack(self, trackType: TrackType, subIndex: int):
for t in self.getAllTrackDescriptors():
# for t in self.getAllTrackDescriptors():
for t in self.getTrackDescriptors():
if t.getType() == trackType:
t.setDispositionFlag(
TrackDisposition.FORCED, t.getSubIndex() == int(subIndex)
@@ -190,7 +194,8 @@ class MediaDescriptor:
def applySourceIndices(self, sourceMediaDescriptor: Self):
sourceTrackDescriptors = sourceMediaDescriptor.getAllTrackDescriptors()
# sourceTrackDescriptors = sourceMediaDescriptor.getAllTrackDescriptors()
sourceTrackDescriptors = sourceMediaDescriptor.getTrackDescriptors()
numTrackDescriptors = len(self.__trackDescriptors)
if len(sourceTrackDescriptors) != numTrackDescriptors:
@@ -203,7 +208,7 @@ class MediaDescriptor:
def rearrangeTrackDescriptors(self, newOrder: List[int]):
if len(newOrder) != len(self.__trackDescriptors):
raise ValueError('Length of list with reordered indices does not match number of track descriptors')
reorderedTrackDescriptors = {}
reorderedTrackDescriptors = []
for oldIndex in newOrder:
reorderedTrackDescriptors.append(self.__trackDescriptors[oldIndex])
self.__trackDescriptors = reorderedTrackDescriptors
@@ -285,9 +290,9 @@ class MediaDescriptor:
tdList[trackIndex].setIndex(trackIndex)
def getAllTrackDescriptors(self):
"""Returns all track descriptors sorted by type: video, audio then subtitles"""
return self.getVideoTracks() + self.getAudioTracks() + self.getSubtitleTracks()
# def getAllTrackDescriptors(self):
# """Returns all track descriptors sorted by type: video, audio then subtitles"""
# return self.getVideoTracks() + self.getAudioTracks() + self.getSubtitleTracks()
def getTrackDescriptors(self,
@@ -317,82 +322,16 @@ class MediaDescriptor:
if s.getType() == TrackType.SUBTITLE
]
def compare(self, vsMediaDescriptor: Self):
if not isinstance(vsMediaDescriptor, self.__class__):
self.__logger.error(f"MediaDescriptor.compare(): Argument is required to be of type {self.__class__}")
raise click.Abort()
vsTags = vsMediaDescriptor.getTags()
tags = self.getTags()
# HINT: Some tags differ per file, for example creation_time, so these are removed before diff
for emt in MediaDescriptor.EXCLUDED_MEDIA_TAGS:
if emt in tags.keys():
del tags[emt]
if emt in vsTags.keys():
del vsTags[emt]
tagsDiff = dictDiff(vsTags, tags)
compareResult = {}
if tagsDiff:
compareResult[MediaDescriptor.TAGS_KEY] = tagsDiff
# Target track configuration (from DB)
# tracks = self.getAllTrackDescriptors()
tracks = self.getAllTrackDescriptors() # filtern
numTracks = len(tracks)
# Current track configuration (of file)
vsTracks = vsMediaDescriptor.getAllTrackDescriptors()
numVsTracks = len(vsTracks)
maxNumOfTracks = max(numVsTracks, numTracks)
trackCompareResult = {}
for tp in range(maxNumOfTracks):
#!
vsTrackIndex = tracks[tp].getSourceIndex()
# Will trigger if tracks are missing in file
if tp > (numVsTracks - 1):
if DIFF_ADDED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_ADDED_KEY] = set()
trackCompareResult[DIFF_ADDED_KEY].add(tracks[tp].getIndex())
continue
# Will trigger if tracks are missing in DB definition
# New tracks will be added per update via this way
if tp > (numTracks - 1):
if DIFF_REMOVED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_REMOVED_KEY] = {}
trackCompareResult[DIFF_REMOVED_KEY][
vsTracks[vsTrackIndex].getIndex()
] = vsTracks[vsTrackIndex]
continue
# assumption is made here that the track order will not change for all files of a sequence
trackDiff = tracks[tp].compare(vsTracks[vsTrackIndex])
if trackDiff:
if DIFF_CHANGED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_CHANGED_KEY] = {}
trackCompareResult[DIFF_CHANGED_KEY][
vsTracks[vsTrackIndex].getIndex()
] = trackDiff
if trackCompareResult:
compareResult[MediaDescriptor.TRACKS_KEY] = trackCompareResult
return compareResult
def getAttachmentTracks(self) -> List[TrackDescriptor]:
return [
s
for s in self.__trackDescriptors
if s.getType() == TrackType.ATTACHMENT
]
def getImportFileTokens(self, use_sub_index: bool = True):
"""Generate ffmpeg import options for external stream files"""
importFileTokens = []
@@ -415,76 +354,115 @@ class MediaDescriptor:
return importFileTokens
def getInputMappingTokens(self, use_sub_index: bool = True, only_video: bool = False):
def getInputMappingTokens(self,
use_sub_index: bool = True,
only_video: bool = False,
sourceMediaDescriptor: Self = None):
"""Tracks must be reordered for source index order"""
inputMappingTokens = []
sortedTrackDescriptors = sorted(self.__trackDescriptors, key=lambda d: d.getIndex())
sourceTrackDescriptorsByIndex = {
td.getIndex(): td
for td in (
sourceMediaDescriptor.getTrackDescriptors()
if sourceMediaDescriptor is not None
else sortedTrackDescriptors
)
}
# raise click.ClickException(' '.join([f"\nindex={td.getIndex()} subIndex={td.getSubIndex()} srcIndex={td.getSourceIndex()} type={td.getType().label()}" for td in self.__trackDescriptors]))
filePointer = 1
for trackIndex in range(len(self.__trackDescriptors)):
for trackIndex in range(len(sortedTrackDescriptors)):
td = self.__trackDescriptors[trackIndex]
td: TrackDescriptor = sortedTrackDescriptors[trackIndex]
stdi = self.__trackDescriptors[td.getSourceIndex()].getIndex()
stdsi = self.__trackDescriptors[td.getSourceIndex()].getSubIndex()
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec() != TrackCodec.PNG:
# sti = self.__trackDescriptors[trackIndex].getSourceIndex()
# sotd = sourceOrderTrackDescriptors[sti]
sourceTrackDescriptor = sourceTrackDescriptorsByIndex.get(td.getSourceIndex())
if sourceTrackDescriptor is None:
raise ValueError(f"No source track descriptor found for source index {td.getSourceIndex()}")
trackType = td.getType()
if (trackType == TrackType.VIDEO or not only_video):
stdi = sourceTrackDescriptor.getIndex()
stdsi = sourceTrackDescriptor.getSubIndex()
importedFilePath = td.getExternalSourceFilePath()
trackType = td.getType()
trackCodec = td.getCodec()
if use_sub_index:
if (trackType != TrackType.ATTACHMENT
and (trackType == TrackType.VIDEO or not only_video)):
if importedFilePath:
inputMappingTokens += [
"-map",
f"{filePointer}:{trackType.indicator()}:0",
]
filePointer += 1
importedFilePath = td.getExternalSourceFilePath()
else:
if use_sub_index:
if importedFilePath:
if td.getCodec() != TrackCodec.PGS:
inputMappingTokens += [
"-map",
f"0:{trackType.indicator()}:{stdsi}",
f"{filePointer}:{trackType.indicator()}:0",
]
filePointer += 1
else:
if td.getCodec() != TrackCodec.PGS:
inputMappingTokens += ["-map", f"0:{stdi}"]
else:
if not trackCodec in [TrackCodec.PGS, TrackCodec.VOBSUB]:
inputMappingTokens += [
"-map",
f"0:{trackType.indicator()}:{stdsi}",
]
else:
if not trackCodec in [TrackCodec.PGS, TrackCodec.VOBSUB]:
inputMappingTokens += ["-map", f"0:{stdi}"]
if sourceMediaDescriptor:
fontDescriptors = [ftd for ftd in sourceMediaDescriptor.getAttachmentTracks()
if ftd.getAttachmentFormat() == AttachmentFormat.TTF]
else:
fontDescriptors = [ftd for ftd in self.__trackDescriptors
if ftd.getType() == TrackType.ATTACHMENT
and ftd.getAttachmentFormat() == AttachmentFormat.TTF]
for ad in sorted(fontDescriptors, key=lambda d: d.getIndex()):
inputMappingTokens += ["-map", f"0:{ad.getIndex()}"]
return inputMappingTokens
def searchSubtitleFiles(self, searchDirectory, prefix):
sesl_match = re.compile(MediaDescriptor.SEASON_EPISODE_STREAM_LANGUAGE_MATCH)
sesld_match = re.compile(f"{prefix}_{MediaDescriptor.SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH}")
sld_match = re.compile(f"{prefix}_{MediaDescriptor.STREAM_LANGUAGE_DISPOSITIONS_MATCH}")
subtitleFileDescriptors = []
for subtitleFilename in os.listdir(searchDirectory):
if subtitleFilename.startswith(prefix) and subtitleFilename.endswith(
"." + MediaDescriptor.SUBTITLE_FILE_EXTENSION
):
sesl_result = sesl_match.search(subtitleFilename)
if sesl_result is not None:
sesld_result = sesld_match.search(subtitleFilename)
sld_result = None if not sesld_result is None else sld_match.search(subtitleFilename)
if not sesld_result is None:
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
if os.path.isfile(subtitleFilePath):
subtitleFileDescriptor = {}
subtitleFileDescriptor["path"] = subtitleFilePath
subtitleFileDescriptor["season"] = int(sesl_result.group(1))
subtitleFileDescriptor["episode"] = int(sesl_result.group(2))
subtitleFileDescriptor["index"] = int(sesl_result.group(3))
subtitleFileDescriptor["language"] = sesl_result.group(4)
subtitleFileDescriptor["season"] = int(sesld_result.group(1))
subtitleFileDescriptor["episode"] = int(sesld_result.group(2))
subtitleFileDescriptor["index"] = int(sesld_result.group(3))
subtitleFileDescriptor["language"] = sesld_result.group(4)
dispSet = set()
dispCaptGroups = sesl_result.groups()
dispCaptGroups = sesld_result.groups()
numCaptGroups = len(dispCaptGroups)
if numCaptGroups > 4:
for groupIndex in range(numCaptGroups - 4):
@@ -495,12 +473,42 @@ class MediaDescriptor:
subtitleFileDescriptors.append(subtitleFileDescriptor)
if not sld_result is None:
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
if os.path.isfile(subtitleFilePath):
subtitleFileDescriptor = {}
subtitleFileDescriptor["path"] = subtitleFilePath
subtitleFileDescriptor["index"] = int(sld_result.group(1))
subtitleFileDescriptor["language"] = sld_result.group(2)
dispSet = set()
dispCaptGroups = sld_result.groups()
numCaptGroups = len(dispCaptGroups)
if numCaptGroups > 2:
for groupIndex in range(numCaptGroups - 2):
disp = TrackDisposition.fromIndicator(dispCaptGroups[groupIndex + 2])
if disp is not None:
dispSet.add(disp)
subtitleFileDescriptor["disposition_set"] = dispSet
subtitleFileDescriptors.append(subtitleFileDescriptor)
self.__logger.debug(f"searchSubtitleFiles(): Available subtitle files {subtitleFileDescriptors}")
return subtitleFileDescriptors
def importSubtitles(self, searchDirectory, prefix, season: int = -1, episode: int = -1):
def importSubtitles(
self,
searchDirectory,
prefix,
season: int = -1,
episode: int = -1,
preserve_dispositions: bool = False,
):
# click.echo(f"Season: {season} Episode: {episode}")
self.__logger.debug(f"importSubtitles(): Season: {season} Episode: {episode}")
@@ -518,7 +526,11 @@ class MediaDescriptor:
[
d
for d in availableFileSubtitleDescriptors
if d["season"] == int(season) and d["episode"] == int(episode)
if ((season == -1 and episode == -1)
or (
d.get("season") == int(season)
and d.get("episode") == int(episode)
))
],
key=lambda d: d["index"],
)
@@ -533,15 +545,36 @@ class MediaDescriptor:
if matchingSubtitleTrackDescriptor:
# click.echo(f"Found matching subtitle file {msfd["path"]}\n")
self.__logger.debug(f"importSubtitles(): Found matching subtitle file {msfd['path']}")
matchingSubtitleTrackDescriptor[0].setExternalSourceFilePath(msfd["path"])
matchingTrack = matchingSubtitleTrackDescriptor[0]
matchingTrack.setExternalSourceFilePath(msfd["path"])
# TODO: Check if useful
# matchingSubtitleTrackDescriptor[0].setDispositionSet(msfd["disposition_set"])
# Prefer metadata coming from the external single-track source when
# it is provided explicitly by the filename contract.
matchingTrack.getTags()["language"] = msfd["language"]
if msfd["disposition_set"] and not preserve_dispositions:
matchingTrack.setDispositionSet(msfd["disposition_set"])
def getConfiguration(self, label: str = ''):
yield f"--- {label if label else 'MediaDescriptor '+str(id(self))} {' '.join([str(k)+'='+str(v) for k,v in self.__mediaTags.items()])}"
for td in self.getAllTrackDescriptors():
# for td in self.getAllTrackDescriptors():
for td in self.getTrackDescriptors():
yield (f"{td.getIndex()}:{td.getType().indicator()}:{td.getSubIndex()} "
+ '|'.join([d.indicator() for d in td.getDispositionSet()])
+ ' ' + ' '.join([str(k)+'='+str(v) for k,v in td.getTags().items()]))
def clone(self, context: dict | None = None):
kwargs = {
MediaDescriptor.TAGS_KEY: dict(self.__mediaTags),
MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY: [
trackDescriptor.clone(context=context if context is not None else self.__context)
for trackDescriptor in self.__trackDescriptors
],
}
if context is not None:
kwargs[MediaDescriptor.CONTEXT_KEY] = context
elif self.__context:
kwargs[MediaDescriptor.CONTEXT_KEY] = self.__context
return MediaDescriptor(**kwargs)

View File

@@ -0,0 +1,406 @@
import click
from ffx.iso_language import IsoLanguage
from ffx.media_descriptor import MediaDescriptor
from ffx.track_descriptor import TrackDescriptor
from ffx.helper import dictDiff, setDiff, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY, DIFF_UNCHANGED_KEY
from ffx.track_codec import TrackCodec
from ffx.track_disposition import TrackDisposition
from ffx.track_type import TrackType
class MediaDescriptorChangeSet():
TAGS_KEY = "tags"
TRACKS_KEY = "tracks"
DISPOSITION_SET_KEY = "disposition_set"
TRACK_DESCRIPTOR_KEY = "track_descriptor"
def __init__(self, context,
targetMediaDescriptor: MediaDescriptor = None,
sourceMediaDescriptor: MediaDescriptor = None):
self.__context = context
self.__logger = context['logger']
self.__configurationData = self.__context['config'].getData()
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
applyCleanup = bool(self.__context.get('apply_metadata_cleanup', True))
self.__applyMetadataNormalization = bool(
self.__context.get("apply_metadata_normalization", True)
)
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
self.__removeGlobalKeys = (
metadataConfiguration['remove']
if applyCleanup and 'remove' in metadataConfiguration.keys()
else []
)
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
self.__removeTrackKeys = (
metadataConfiguration['streams']['remove']
if (
applyCleanup
and 'streams' in metadataConfiguration.keys()
and 'remove' in metadataConfiguration['streams'].keys()
)
else []
)
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
if 'streams' in metadataConfiguration.keys()
and 'ignore' in metadataConfiguration['streams'].keys() else [])
self.__targetTrackDescriptors = targetMediaDescriptor.getTrackDescriptors() if targetMediaDescriptor is not None else []
self.__sourceTrackDescriptors = sourceMediaDescriptor.getTrackDescriptors() if sourceMediaDescriptor is not None else []
self.__targetTrackDescriptorsByIndex = {
trackDescriptor.getIndex(): trackDescriptor
for trackDescriptor in self.__targetTrackDescriptors
}
self.__sourceTrackDescriptorsByIndex = {
trackDescriptor.getIndex(): trackDescriptor
for trackDescriptor in self.__sourceTrackDescriptors
}
targetMediaTags = targetMediaDescriptor.getTags() if targetMediaDescriptor is not None else {}
sourceMediaTags = sourceMediaDescriptor.getTags() if sourceMediaDescriptor is not None else {}
self.__changeSetObj = {}
#if targetMediaDescriptor is not None:
#!!#
tagsDiff = dictDiff(sourceMediaTags,
targetMediaTags,
ignoreKeys=self.__ignoreGlobalKeys,
removeKeys=self.__removeGlobalKeys)
if tagsDiff:
self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY] = tagsDiff
self.__numTargetTracks = len(self.__targetTrackDescriptors)
# Current track configuration (of file)
self.__numSourceTracks = len(self.__sourceTrackDescriptors)
trackCompareResult = {}
for targetTrackDescriptor in self.__targetTrackDescriptors:
sourceTrackDescriptor = self.__sourceTrackDescriptorsByIndex.get(
targetTrackDescriptor.getSourceIndex()
)
if sourceTrackDescriptor is None:
if DIFF_ADDED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_ADDED_KEY] = {}
trackCompareResult[DIFF_ADDED_KEY][targetTrackDescriptor.getIndex()] = targetTrackDescriptor
continue
trackDiff = self.compareTracks(targetTrackDescriptor, sourceTrackDescriptor)
if trackDiff:
if DIFF_CHANGED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_CHANGED_KEY] = {}
trackCompareResult[DIFF_CHANGED_KEY][targetTrackDescriptor.getIndex()] = trackDiff
targetSourceIndices = {
targetTrackDescriptor.getSourceIndex()
for targetTrackDescriptor in self.__targetTrackDescriptors
}
for sourceTrackDescriptor in self.__sourceTrackDescriptors:
if sourceTrackDescriptor.getIndex() not in targetSourceIndices:
if DIFF_REMOVED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_REMOVED_KEY] = {}
trackCompareResult[DIFF_REMOVED_KEY][sourceTrackDescriptor.getIndex()] = sourceTrackDescriptor
if trackCompareResult:
self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY] = trackCompareResult
def compareTracks(self,
targetTrackDescriptor: TrackDescriptor = None,
sourceTrackDescriptor: TrackDescriptor = None):
sourceTrackTags = sourceTrackDescriptor.getTags() if sourceTrackDescriptor is not None else {}
targetTrackTags = (
self.normalizeTrackTags(
targetTrackDescriptor.getTags(),
trackDescriptor=targetTrackDescriptor,
fallbackTrackTags=sourceTrackTags,
)
if targetTrackDescriptor is not None
else {}
)
trackCompareResult = {}
tagsDiffResult = dictDiff(sourceTrackTags,
targetTrackTags,
ignoreKeys=self.__ignoreTrackKeys,
removeKeys=self.__removeTrackKeys)
if tagsDiffResult:
trackCompareResult[MediaDescriptorChangeSet.TAGS_KEY] = tagsDiffResult
sourceDispositionSet = sourceTrackDescriptor.getDispositionSet() if sourceTrackDescriptor is not None else set()
targetDispositionSet = targetTrackDescriptor.getDispositionSet() if targetTrackDescriptor is not None else set()
# if targetTrackDescriptor.getIndex() == 3:
# raise click.ClickException(f"{sourceDispositionSet} {targetDispositionSet}")
dispositionDiffResult = setDiff(sourceDispositionSet, targetDispositionSet)
if dispositionDiffResult:
trackCompareResult[MediaDescriptorChangeSet.DISPOSITION_SET_KEY] = dispositionDiffResult
return trackCompareResult
def normalizeTrackTagValue(self, tagKey, tagValue):
if not self.__applyMetadataNormalization or tagKey != "language":
return tagValue
if isinstance(tagValue, IsoLanguage):
return tagValue.threeLetter()
trackLanguage = IsoLanguage.findThreeLetter(str(tagValue))
if trackLanguage != IsoLanguage.UNDEFINED:
return trackLanguage.threeLetter()
return tagValue
def resolveTrackLanguage(self, tagValue):
if isinstance(tagValue, IsoLanguage):
return tagValue
trackLanguage = IsoLanguage.findThreeLetter(str(tagValue))
if trackLanguage != IsoLanguage.UNDEFINED:
return trackLanguage
return None
def normalizeTrackTags(
self,
trackTags: dict,
trackDescriptor: TrackDescriptor = None,
fallbackTrackTags: dict = None,
):
normalizedTrackTags = {
tagKey: self.normalizeTrackTagValue(tagKey, tagValue)
for tagKey, tagValue in trackTags.items()
}
if (
self.__applyMetadataNormalization
and trackDescriptor is not None
and trackDescriptor.getType() in (TrackType.VIDEO, TrackType.AUDIO, TrackType.SUBTITLE)
):
trackTitle = str(normalizedTrackTags.get("title", "")).strip()
fallbackTitle = str((fallbackTrackTags or {}).get("title", "")).strip()
trackLanguage = self.resolveTrackLanguage(normalizedTrackTags.get("language"))
if not trackTitle and not fallbackTitle and trackLanguage is not None:
normalizedTrackTags["title"] = trackLanguage.label()
return normalizedTrackTags
def generateDispositionTokens(self):
"""
#Example: -disposition:s:0 default -disposition:s:1 0
"""
dispositionTokens = []
# if MediaDescriptorChangeSet.TRACKS_KEY in self.__changeSetObj.keys():
#
# if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
# addedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY]
# trackDescriptor: TrackDescriptor
# for trackDescriptor in addedTracks.values():
#
# dispositionSet = trackDescriptor.getDispositionSet()
#
# if dispositionSet:
# dispositionTokens += [f"-disposition:{trackDescriptor.getType().indicator()}:{trackDescriptor.getSubIndex()}",
# '+'.join([d.label() for d in dispositionSet])]
#
# if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
# changedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
# trackDiffObj: dict
#
#
# for trackIndex, trackDiffObj in changedTracks.items():
#
# if MediaDescriptorChangeSet.DISPOSITION_SET_KEY in trackDiffObj.keys():
#
# dispositionDiffObj: dict = trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY]
#
# addedDispositions = dispositionDiffObj[DIFF_ADDED_KEY] if DIFF_ADDED_KEY in dispositionDiffObj.keys() else set()
# removedDispositions = dispositionDiffObj[DIFF_REMOVED_KEY] if DIFF_REMOVED_KEY in dispositionDiffObj.keys() else set()
# unchangedDispositions = dispositionDiffObj[DIFF_UNCHANGED_KEY] if DIFF_UNCHANGED_KEY in dispositionDiffObj.keys() else set()
#
# targetDispositions = addedDispositions | unchangedDispositions
#
# trackDescriptor = self.__targetTrackDescriptors[trackIndex]
# streamIndicator = trackDescriptor.getType().indicator()
# subIndex = trackDescriptor.getSubIndex()
#
# if targetDispositions:
# dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in targetDispositions])]
# # if not targetDispositions and removedDispositions:
# else:
# dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
for ttd in self.__targetTrackDescriptors:
if ttd.getType() == TrackType.ATTACHMENT:
continue
targetDispositions = ttd.getDispositionSet()
streamIndicator = ttd.getType().indicator()
subIndex = ttd.getSubIndex()
if targetDispositions:
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in targetDispositions])]
# if not targetDispositions and removedDispositions:
else:
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
return dispositionTokens
def generateMetadataTokens(self):
metadataTokens = []
if MediaDescriptorChangeSet.TAGS_KEY in self.__changeSetObj.keys():
addedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY]
if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
removedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY]
if DIFF_REMOVED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
changedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY]
if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
outputMediaTags = addedMediaTags | changedMediaTags
if (not 'no_signature' in self.__context.keys()
or not self.__context['no_signature']):
outputMediaTags = outputMediaTags | self.__signatureTags
# outputMediaTags = {k:v for k,v in outputMediaTags.items() if k not in self.__removeGlobalKeys}
for tagKey, tagValue in outputMediaTags.items():
metadataTokens += [f"-metadata:g",
f"{tagKey}={tagValue}"]
for tagKey, tagValue in changedMediaTags.items():
metadataTokens += [f"-metadata:g",
f"{tagKey}={tagValue}"]
for removeKey in removedMediaTags.keys():
metadataTokens += [f"-metadata:g",
f"{removeKey}="]
if MediaDescriptorChangeSet.TRACKS_KEY in self.__changeSetObj.keys():
if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
addedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY]
trackDescriptor: TrackDescriptor
for trackDescriptor in addedTracks.values():
for tagKey, tagValue in self.normalizeTrackTags(
trackDescriptor.getTags(),
trackDescriptor=trackDescriptor,
).items():
if not tagKey in self.__removeTrackKeys:
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
+ f":{trackDescriptor.getSubIndex()}",
f"{tagKey}={tagValue}"]
if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
changedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
trackDiffObj: dict
for trackIndex, trackDiffObj in changedTracks.items():
if MediaDescriptorChangeSet.TAGS_KEY in trackDiffObj.keys():
tagsDiffObj = trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY]
addedTrackTags = tagsDiffObj[DIFF_ADDED_KEY] if DIFF_ADDED_KEY in tagsDiffObj.keys() else {}
changedTrackTags = tagsDiffObj[DIFF_CHANGED_KEY] if DIFF_CHANGED_KEY in tagsDiffObj.keys() else {}
unchangedTrackTags = tagsDiffObj[DIFF_UNCHANGED_KEY] if DIFF_UNCHANGED_KEY in tagsDiffObj.keys() else {}
removedTrackTags = tagsDiffObj[DIFF_REMOVED_KEY] if DIFF_REMOVED_KEY in tagsDiffObj.keys() else {}
outputTrackTags = addedTrackTags | changedTrackTags
trackDescriptor = self.__targetTrackDescriptorsByIndex[trackIndex]
for tagKey, tagValue in self.normalizeTrackTags(
outputTrackTags,
trackDescriptor=trackDescriptor,
fallbackTrackTags=trackDescriptor.getTags(),
).items():
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
+ f":{trackDescriptor.getSubIndex()}",
f"{tagKey}={tagValue}"]
if trackDescriptor.getExternalSourceFilePath():
# When a single-track external file substitutes the
# media payload, keep metadata from the regular
# source track unless the external/target side
# overrides it explicitly.
preservedTrackTags = (
{
tagKey: tagValue
for tagKey, tagValue in removedTrackTags.items()
if tagKey not in self.__removeTrackKeys
}
| unchangedTrackTags
)
for tagKey, tagValue in self.normalizeTrackTags(
preservedTrackTags,
trackDescriptor=trackDescriptor,
fallbackTrackTags=trackDescriptor.getTags(),
).items():
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
+ f":{trackDescriptor.getSubIndex()}",
f"{tagKey}={tagValue}"]
else:
for removeKey in removedTrackTags.keys():
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
+ f":{trackDescriptor.getSubIndex()}",
f"{removeKey}="]
for tagKey, tagValue in self.__context.get('encoding_metadata_tags', {}).items():
metadataTokens += [f"-metadata:g", f"{tagKey}={tagValue}"]
metadataTokens += self.generateConfiguredRemovalMetadataTokens()
return metadataTokens
def getChangeSetObj(self):
return self.__changeSetObj
def generateConfiguredRemovalMetadataTokens(self):
metadataTokens = []
for removeKey in self.__removeGlobalKeys:
metadataTokens += ["-metadata:g", f"{removeKey}="]
for trackDescriptor in self.__targetTrackDescriptors:
for removeKey in self.__removeTrackKeys:
metadataTokens += [
f"-metadata:s:{trackDescriptor.getType().indicator()}:{trackDescriptor.getSubIndex()}",
f"{removeKey}=",
]
return metadataTokens

View File

@@ -1,684 +1 @@
import os, click, re
from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button, Input, DataTable
from textual.containers import Grid
from ffx.audio_layout import AudioLayout
from .pattern_controller import PatternController
from .show_controller import ShowController
from .track_controller import TrackController
from .tag_controller import TagController
from .show_details_screen import ShowDetailsScreen
from .pattern_details_screen import PatternDetailsScreen
from ffx.track_type import TrackType
from ffx.track_codec import TrackCodec
from ffx.model.track import Track
from ffx.track_disposition import TrackDisposition
from ffx.track_descriptor import TrackDescriptor
from ffx.show_descriptor import ShowDescriptor
from textual.widgets._data_table import CellDoesNotExist
from ffx.media_descriptor import MediaDescriptor
from ffx.file_properties import FileProperties
from ffx.helper import DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY
# Screen[dict[int, str, int]]
class MediaDetailsScreen(Screen):
CSS = """
Grid {
grid-size: 5 8;
grid-rows: 8 2 2 2 2 8 2 2 8;
grid-columns: 25 25 120 10 75;
height: 100%;
width: 100%;
padding: 1;
}
DataTable .datatable--cursor {
background: darkorange;
color: black;
}
DataTable .datatable--header {
background: steelblue;
color: white;
}
Input {
border: none;
}
Button {
border: none;
}
DataTable {
min-height: 40;
}
#toplabel {
height: 1;
}
.two {
column-span: 2;
}
.three {
column-span: 3;
}
.four {
column-span: 4;
}
.five {
column-span: 5;
}
.triple {
row-span: 3;
}
.box {
height: 100%;
border: solid green;
}
.purple {
tint: purple 40%;
}
.yellow {
tint: yellow 40%;
}
#differences-table {
row-span: 8;
/* tint: magenta 40%; */
}
/* #pattern_input {
tint: red 40%;
}*/
"""
BINDINGS = [
("n", "new_pattern", "New Pattern"),
("u", "update_pattern", "Update Pattern"),
("e", "edit_pattern", "Edit Pattern"),
]
def __init__(self):
super().__init__()
self.context = self.app.getContext()
self.Session = self.context['database']['session'] # convenience
self.__pc = PatternController(context = self.context)
self.__sc = ShowController(context = self.context)
self.__tc = TrackController(context = self.context)
self.__tac = TagController(context = self.context)
if not 'command' in self.context.keys() or self.context['command'] != 'inspect':
raise click.ClickException(f"MediaDetailsScreen.__init__(): Can only perform command 'inspect'")
if not 'arguments' in self.context.keys() or not 'filename' in self.context['arguments'].keys() or not self.context['arguments']['filename']:
raise click.ClickException(f"MediaDetailsScreen.__init__(): Argument 'filename' is required to be provided for command 'inspect'")
self.__mediaFilename = self.context['arguments']['filename']
if not os.path.isfile(self.__mediaFilename):
raise click.ClickException(f"MediaDetailsScreen.__init__(): Media file {self.__mediaFilename} does not exist")
self.loadProperties()
def removeShow(self, showId : int = -1):
"""Remove show entry from DataTable.
Removes the <New show> entry if showId is not set"""
for rowKey, row in self.showsTable.rows.items(): # dict[RowKey, Row]
rowData = self.showsTable.get_row(rowKey)
try:
if (showId == -1 and rowData[0] == ' '
or showId == int(rowData[0])):
self.showsTable.remove_row(rowKey)
return
except:
continue
def getRowIndexFromShowId(self, showId : int = -1) -> int:
"""Find the index of the row where the value in the specified column matches the target_value."""
for rowKey, row in self.showsTable.rows.items(): # dict[RowKey, Row]
rowData = self.showsTable.get_row(rowKey)
try:
if ((showId == -1 and rowData[0] == ' ')
or showId == int(rowData[0])):
return int(self.showsTable.get_row_index(rowKey))
except:
continue
return None
def loadProperties(self):
self.__mediaFileProperties = FileProperties(self.context, self.__mediaFilename)
self.__currentMediaDescriptor = self.__mediaFileProperties.getMediaDescriptor()
#HINT: This is None if the filename did not match anything in database
self.__currentPattern = self.__mediaFileProperties.getPattern()
# keine tags vorhanden
self.__targetMediaDescriptor = self.__currentPattern.getMediaDescriptor(self.context) if self.__currentPattern is not None else None
# Enumerating differences between media descriptors
# from file (=current) vs from stored in database (=target)
try:
self.__mediaDifferences = self.__targetMediaDescriptor.compare(self.__currentMediaDescriptor) if self.__currentPattern is not None else {}
except ValueError:
self.__mediaDifferences = {}
def updateDifferences(self):
self.loadProperties()
self.differencesTable.clear()
if MediaDescriptor.TAGS_KEY in self.__mediaDifferences.keys():
currentTags = self.__currentMediaDescriptor.getTags()
targetTags = self.__targetMediaDescriptor.getTags()
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for addedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
row = (f"added media tag: key='{addedTagKey}' value='{targetTags[addedTagKey]}'",)
self.differencesTable.add_row(*map(str, row))
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for removedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
row = (f"removed media tag: key='{removedTagKey}' value='{currentTags[removedTagKey]}'",)
self.differencesTable.add_row(*map(str, row))
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for changedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_CHANGED_KEY]:
row = (f"changed media tag: key='{changedTagKey}' value='{currentTags[changedTagKey]}'->'{targetTags[changedTagKey]}'",)
self.differencesTable.add_row(*map(str, row))
if MediaDescriptor.TRACKS_KEY in self.__mediaDifferences.keys():
currentTracks = self.__currentMediaDescriptor.getAllTrackDescriptors() # 0,1,2,3
targetTracks = self.__targetMediaDescriptor.getAllTrackDescriptors() # 0 <- from DB
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
#raise click.ClickException(f"add track {self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]}")
for addedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]:
addedTrack : Track = targetTracks[addedTrackIndex]
row = (f"added {addedTrack.getType().label()} track: index={addedTrackIndex} lang={addedTrack.getLanguage().threeLetter()}",)
self.differencesTable.add_row(*map(str, row))
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
for removedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_REMOVED_KEY]:
row = (f"removed track: index={removedTrackIndex}",)
self.differencesTable.add_row(*map(str, row))
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
for changedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY].keys():
changedTrack : Track = targetTracks[changedTrackIndex]
changedTrackDiff : dict = self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY][changedTrackIndex]
if MediaDescriptor.TAGS_KEY in changedTrackDiff.keys():
if DIFF_ADDED_KEY in changedTrackDiff[MediaDescriptor.TAGS_KEY]:
for addedTagKey in changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
addedTagValue = changedTrack.getTags()[addedTagKey]
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} added key={addedTagKey} value={addedTagValue}",)
self.differencesTable.add_row(*map(str, row))
if DIFF_REMOVED_KEY in changedTrackDiff[MediaDescriptor.TAGS_KEY]:
for removedTagKey in changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} removed key={removedTagKey}",)
self.differencesTable.add_row(*map(str, row))
if TrackDescriptor.DISPOSITION_SET_KEY in changedTrackDiff.keys():
if DIFF_ADDED_KEY in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]:
for addedDisposition in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY][DIFF_ADDED_KEY]:
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} added disposition={addedDisposition.label()}",)
self.differencesTable.add_row(*map(str, row))
if DIFF_REMOVED_KEY in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]:
for removedDisposition in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY][DIFF_REMOVED_KEY]:
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} removed disposition={removedDisposition.label()}",)
self.differencesTable.add_row(*map(str, row))
def on_mount(self):
if self.__currentPattern is None:
row = (' ', '<New show>', ' ') # Convert each element to a string before adding
self.showsTable.add_row(*map(str, row))
for show in self.__sc.getAllShows():
row = (int(show.id), show.name, show.year) # Convert each element to a string before adding
self.showsTable.add_row(*map(str, row))
for mediaTagKey, mediaTagValue in self.__currentMediaDescriptor.getTags().items():
row = (mediaTagKey, mediaTagValue) # Convert each element to a string before adding
self.mediaTagsTable.add_row(*map(str, row))
self.updateTracks()
if self.__currentPattern is not None:
showIdentifier = self.__currentPattern.getShowId()
showRowIndex = self.getRowIndexFromShowId(showIdentifier)
if showRowIndex is not None:
self.showsTable.move_cursor(row=showRowIndex)
self.query_one("#pattern_input", Input).value = self.__currentPattern.getPattern()
self.updateDifferences()
else:
self.query_one("#pattern_input", Input).value = self.__mediaFilename
self.highlightPattern(True)
def highlightPattern(self, state : bool):
if state:
self.query_one("#pattern_input", Input).styles.background = 'red'
else:
self.query_one("#pattern_input", Input).styles.background = None
def updateTracks(self):
self.tracksTable.clear()
trackDescriptorList = self.__currentMediaDescriptor.getAllTrackDescriptors()
typeCounter = {}
for td in trackDescriptorList:
trackType = td.getType()
if not trackType in typeCounter.keys():
typeCounter[trackType] = 0
dispoSet = td.getDispositionSet()
audioLayout = td.getAudioLayout()
row = (td.getIndex(),
trackType.label(),
typeCounter[trackType],
td.getCodec().label(),
audioLayout.label() if trackType == TrackType.AUDIO
and audioLayout != AudioLayout.LAYOUT_UNDEFINED else ' ',
td.getLanguage().label(),
td.getTitle(),
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
'Yes' if TrackDisposition.FORCED in dispoSet else 'No')
self.tracksTable.add_row(*map(str, row))
typeCounter[trackType] += 1
def compose(self):
# Create the DataTable widget
self.showsTable = DataTable(classes="two")
# Define the columns with headers
self.column_key_show_id = self.showsTable.add_column("ID", width=10)
self.column_key_show_name = self.showsTable.add_column("Name", width=50)
self.column_key_show_year = self.showsTable.add_column("Year", width=10)
self.showsTable.cursor_type = 'row'
self.mediaTagsTable = DataTable(classes="two")
# Define the columns with headers
self.column_key_track_tag_key = self.mediaTagsTable.add_column("Key", width=50)
self.column_key_track_tag_value = self.mediaTagsTable.add_column("Value", width=100)
self.mediaTagsTable.cursor_type = 'row'
self.tracksTable = DataTable(classes="two")
# Define the columns with headers
self.column_key_track_index = self.tracksTable.add_column("Index", width=5)
self.column_key_track_type = self.tracksTable.add_column("Type", width=10)
self.column_key_track_sub_index = self.tracksTable.add_column("SubIndex", width=8)
self.column_key_track_codec = self.tracksTable.add_column("Codec", width=10)
self.column_key_track_layout = self.tracksTable.add_column("Layout", width=10)
self.column_key_track_language = self.tracksTable.add_column("Language", width=15)
self.column_key_track_title = self.tracksTable.add_column("Title", width=48)
self.column_key_track_default = self.tracksTable.add_column("Default", width=8)
self.column_key_track_forced = self.tracksTable.add_column("Forced", width=8)
self.tracksTable.cursor_type = 'row'
# Create the DataTable widget
self.differencesTable = DataTable(id='differences-table') # classes="triple"
# Define the columns with headers
self.column_key_differences = self.differencesTable.add_column("Differences (file->db)", width=70)
self.differencesTable.cursor_type = 'row'
yield Header()
with Grid():
# 1
yield Static("Show")
yield self.showsTable
yield Static(" ")
yield self.differencesTable
# 2
yield Static(" ", classes="four")
# 3
yield Static(" ")
yield Button("Substitute", id="pattern_button")
yield Static(" ", classes="two")
# 4
yield Static("Pattern")
yield Input(type="text", id='pattern_input', classes="two")
yield Static(" ")
# 5
yield Static(" ", classes="four")
# 6
yield Static("Media Tags")
yield self.mediaTagsTable
yield Static(" ")
# 7
yield Static(" ", classes="four")
# 8
yield Static(" ")
yield Button("Set Default", id="select_default_button")
yield Button("Set Forced", id="select_forced_button")
yield Static(" ")
# 9
yield Static("Streams")
yield self.tracksTable
yield Static(" ")
yield Footer()
def getPatternDescriptorFromInput(self):
"""Returns show id and pattern from corresponding inputs"""
patternDescriptor = {}
try:
patternDescriptor['show_id'] = self.getSelectedShowDescriptor().getId()
patternDescriptor['pattern'] = str(self.query_one("#pattern_input", Input).value)
except:
pass
return patternDescriptor
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button.id == "pattern_button":
pattern = self.query_one("#pattern_input", Input).value
patternMatch = re.search(FileProperties.SE_INDICATOR_PATTERN, pattern)
if patternMatch:
self.query_one("#pattern_input", Input).value = pattern.replace(patternMatch.group(1), FileProperties.SE_INDICATOR_PATTERN)
if event.button.id == "select_default_button":
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
self.__currentMediaDescriptor.setDefaultSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
self.updateTracks()
if event.button.id == "select_forced_button":
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
self.__currentMediaDescriptor.setForcedSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
self.updateTracks()
def getSelectedTrackDescriptor(self):
"""Returns a partial track descriptor"""
try:
# Fetch the currently selected row when 'Enter' is pressed
#selected_row_index = self.table.cursor_row
row_key, col_key = self.tracksTable.coordinate_to_cell_key(self.tracksTable.cursor_coordinate)
if row_key is not None:
selected_track_data = self.tracksTable.get_row(row_key)
kwargs = {}
kwargs[TrackDescriptor.CONTEXT_KEY] = self.context
kwargs[TrackDescriptor.INDEX_KEY] = int(selected_track_data[0])
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.fromLabel(selected_track_data[1])
kwargs[TrackDescriptor.SUB_INDEX_KEY] = int(selected_track_data[2])
kwargs[TrackDescriptor.CODEC_KEY] = TrackCodec.fromLabel(selected_track_data[3])
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(selected_track_data[4])
return TrackDescriptor(**kwargs)
else:
return None
except CellDoesNotExist:
return None
def getSelectedShowDescriptor(self) -> ShowDescriptor:
try:
row_key, col_key = self.showsTable.coordinate_to_cell_key(self.showsTable.cursor_coordinate)
if row_key is not None:
selected_row_data = self.showsTable.get_row(row_key)
try:
kwargs = {}
kwargs[ShowDescriptor.ID_KEY] = int(selected_row_data[0])
kwargs[ShowDescriptor.NAME_KEY] = str(selected_row_data[1])
kwargs[ShowDescriptor.YEAR_KEY] = int(selected_row_data[2])
return ShowDescriptor(**kwargs)
except ValueError:
return None
except CellDoesNotExist:
return None
def handle_new_pattern(self, showDescriptor: ShowDescriptor):
if type(showDescriptor) is not ShowDescriptor:
raise TypeError("MediaDetailsScreen.handle_new_pattern(): Argument 'showDescriptor' has to be of type ShowDescriptor")
showRowIndex = self.getRowIndexFromShowId(showDescriptor.getId())
if showRowIndex is None:
show = (showDescriptor.getId(), showDescriptor.getName(), showDescriptor.getYear())
self.showsTable.add_row(*map(str, show))
showRowIndex = self.getRowIndexFromShowId(showDescriptor.getId())
if showRowIndex is not None:
self.showsTable.move_cursor(row=showRowIndex)
self.removeShow()
patternDescriptor = self.getPatternDescriptorFromInput()
if patternDescriptor:
patternId = self.__pc.addPattern(patternDescriptor)
if patternId:
self.highlightPattern(False)
for tagKey, tagValue in self.__currentMediaDescriptor.getTags().items():
self.__tac.updateMediaTag(patternId, tagKey, tagValue)
for trackDescriptor in self.__currentMediaDescriptor.getAllTrackDescriptors():
self.__tc.addTrack(trackDescriptor, patternId = patternId)
def action_new_pattern(self):
#TODO #427: Fehlermeldung in TUI
# try:
# self.__currentMediaDescriptor.checkConfiguration()
# except ValueError:
# return
selectedShowDescriptor = self.getSelectedShowDescriptor()
#HINT: Callback is invoked after this method has exited. As a workaround the callback is executed directly
# from here with a mock-up screen result containing the necessary part of keys to perform correctly.
if selectedShowDescriptor is None:
self.app.push_screen(ShowDetailsScreen(), self.handle_new_pattern)
else:
self.handle_new_pattern(selectedShowDescriptor)
def action_update_pattern(self):
"""When updating the database the actions must reverse the difference (eq to diff db->file)"""
if self.__currentPattern is not None:
patternDescriptor = self.getPatternDescriptorFromInput()
if (patternDescriptor
and self.__currentPattern.getPattern() != patternDescriptor['pattern']):
return self.__pc.updatePattern(self.__currentPattern.getId(), patternDescriptor)
self.loadProperties()
if MediaDescriptor.TAGS_KEY in self.__mediaDifferences.keys():
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for addedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
self.__tac.deleteMediaTagByKey(self.__currentPattern.getId(), addedTagKey)
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for removedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
currentTags = self.__currentMediaDescriptor.getTags()
self.__tac.updateMediaTag(self.__currentPattern.getId(), removedTagKey, currentTags[removedTagKey])
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for changedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_CHANGED_KEY]:
currentTags = self.__currentMediaDescriptor.getTags()
self.__tac.updateMediaTag(self.__currentPattern.getId(), changedTagKey, currentTags[changedTagKey])
if MediaDescriptor.TRACKS_KEY in self.__mediaDifferences.keys():
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
for addedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]:
targetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == addedTrackIndex]
if targetTracks:
self.__tc.deleteTrack(targetTracks[0].getId()) # id
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
for removedTrackIndex, removedTrack in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_REMOVED_KEY].items():
# Track per inspect/update hinzufügen
self.__tc.addTrack(removedTrack, patternId = self.__currentPattern.getId())
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
# [vsTracks[tp].getIndex()] = trackDiff
for changedTrackIndex, changedTrackDiff in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY].items():
changedTargetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == changedTrackIndex]
changedTargeTrackId = changedTargetTracks[0].getId() if changedTargetTracks else None
changedTargetTrackIndex = changedTargetTracks[0].getIndex() if changedTargetTracks else None
changedCurrentTracks = [t for t in self.__currentMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == changedTrackIndex]
# changedCurrentTrackId #HINT: Undefined as track descriptors do not come from file with track_id
if TrackDescriptor.TAGS_KEY in changedTrackDiff.keys():
changedTrackTagsDiff = changedTrackDiff[TrackDescriptor.TAGS_KEY]
if DIFF_ADDED_KEY in changedTrackTagsDiff.keys():
for addedTrackTagKey in changedTrackTagsDiff[DIFF_ADDED_KEY]:
if changedTargetTracks:
self.__tac.deleteTrackTagByKey(changedTargeTrackId, addedTrackTagKey)
if DIFF_REMOVED_KEY in changedTrackTagsDiff.keys():
for removedTrackTagKey in changedTrackTagsDiff[DIFF_REMOVED_KEY]:
if changedCurrentTracks:
self.__tac.updateTrackTag(changedTargeTrackId, removedTrackTagKey, changedCurrentTracks[0].getTags()[removedTrackTagKey])
if DIFF_CHANGED_KEY in changedTrackTagsDiff.keys():
for changedTrackTagKey in changedTrackTagsDiff[DIFF_CHANGED_KEY]:
if changedCurrentTracks:
self.__tac.updateTrackTag(changedTargeTrackId, changedTrackTagKey, changedCurrentTracks[0].getTags()[changedTrackTagKey])
if TrackDescriptor.DISPOSITION_SET_KEY in changedTrackDiff.keys():
changedTrackDispositionDiff = changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]
if DIFF_ADDED_KEY in changedTrackDispositionDiff.keys():
for changedTrackAddedDisposition in changedTrackDispositionDiff[DIFF_ADDED_KEY]:
if changedTargetTrackIndex is not None:
self.__tc.setDispositionState(self.__currentPattern.getId(), changedTargetTrackIndex, changedTrackAddedDisposition, False)
if DIFF_REMOVED_KEY in changedTrackDispositionDiff.keys():
for changedTrackRemovedDisposition in changedTrackDispositionDiff[DIFF_REMOVED_KEY]:
if changedTargetTrackIndex is not None:
self.__tc.setDispositionState(self.__currentPattern.getId(), changedTargetTrackIndex, changedTrackRemovedDisposition, True)
self.updateDifferences()
def action_edit_pattern(self):
patternDescriptor = self.getPatternDescriptorFromInput()
if patternDescriptor['pattern']:
selectedPatternId = self.__pc.findPattern(patternDescriptor)
if selectedPatternId is None:
raise click.ClickException(f"MediaDetailsScreen.action_edit_pattern(): Pattern to edit has no id")
self.app.push_screen(PatternDetailsScreen(patternId = selectedPatternId, showId = self.getSelectedShowDescriptor().getId()), self.handle_edit_pattern) # <-
def handle_edit_pattern(self, screenResult):
self.query_one("#pattern_input", Input).value = screenResult['pattern']
self.updateDifferences()
from .inspect_details_screen import InspectDetailsScreen as MediaDetailsScreen

View File

@@ -0,0 +1,531 @@
import os
from time import monotonic
from textual import events, work
from textual.containers import Grid
from textual.worker import Worker, WorkerState
from textual.widgets import Button, Footer, Header, Static
from ffx.metadata_editor import apply_metadata_edits
from ffx.track_descriptor import TrackDescriptor
from .i18n import t
from .confirm_screen import ConfirmScreen
from .media_workflow_screen_base import MediaWorkflowScreenBase
from .screen_support import build_screen_log_pane, localized_column_width
from .tag_delete_screen import TagDeleteScreen
from .tag_details_screen import TagDetailsScreen
from .track_details_screen import TrackDetailsScreen
from .helper import LogLevel
class MediaEditScreen(MediaWorkflowScreenBase):
GRID_COLUMN_LABEL_MIN = 12
GRID_COLUMN_2 = 20
GRID_COLUMN_3 = 25
GRID_COLUMN_4 = "4fr"
GRID_COLUMN_5 = 12
GRID_COLUMN_6 = "5fr"
CSS = f"""
Grid {{
grid-size: 6 10;
grid-rows: 2 2 2 8 2 2 8 2 8 2 2;
grid-columns: {GRID_COLUMN_LABEL_MIN} {GRID_COLUMN_2} {GRID_COLUMN_3} {GRID_COLUMN_4} {GRID_COLUMN_5} {GRID_COLUMN_6};
height: 100%;
width: 100%;
min-width: 120;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}}
DataTable .datatable--cursor {{
background: darkorange;
color: black;
}}
DataTable .datatable--header {{
background: steelblue;
color: white;
}}
Input {{
border: none;
}}
Button {{
border: none;
}}
DataTable {{
min-height: 24;
width: 100%;
}}
.two {{
column-span: 2;
}}
.three {{
column-span: 3;
}}
.four {{
column-span: 4;
}}
.five {{
column-span: 5;
}}
#differences-table {{
row-span: 10;
}}
#file_label {{
width: 100%;
}}
"""
@classmethod
def _grid_columns_spec(cls, label_column_width: int | None = None) -> str:
return " ".join(
[
str(
cls.GRID_COLUMN_LABEL_MIN
if label_column_width is None
else int(label_column_width)
),
str(cls.GRID_COLUMN_2),
str(cls.GRID_COLUMN_3),
str(cls.GRID_COLUMN_4),
str(cls.GRID_COLUMN_5),
str(cls.GRID_COLUMN_6),
]
)
COMMAND_NAME = "edit"
EDIT_MODE = True
DIFFERENCES_COLUMN_LABEL = "Planned Changes (file->edited output)"
BINDINGS = [
("escape", "back", t("Back")),
("q", "quit_screen", t("Quit")),
("a", "apply_changes", t("Apply")),
("r", "revert_changes", t("Revert")),
]
def compose(self):
self._build_media_tags_table()
self._build_tracks_table()
self._build_differences_table()
yield Header()
with Grid(id="main_grid"):
# Row 1
yield Static(t("File"))
yield Static(self._mediaFilename, id="file_label", classes="three", markup=False)
yield Static(" ")
yield self.differencesTable
# Row 2
yield Static(" ")
yield Button(t("Cleanup"), id="cleanup_toggle_button")
yield Button(t("Normalize"), id="normalize_toggle_button")
yield Static(" ", classes="two")
# Row 3
yield Static(t("Media Tags"))
yield Button(t("Add"), id="button_add_tag")
yield Button(t("Edit"), id="button_edit_tag")
yield Button(t("Delete"), id="button_delete_tag")
yield Static(" ")
# Row 4
yield Static(" ")
yield self.mediaTagsTable
yield Static(" ")
# Row 5
yield Static("", classes="five")
# Row 6
yield Static(t("Streams"))
yield Button(t("Edit"), id="button_edit_track")
yield Button(t("Set Default"), id="select_default_button")
yield Button(t("Set Forced"), id="select_forced_button")
yield Static(" ")
# Row 7
yield Static(" ")
yield self.tracksTable
yield Static(" ")
# Row 8
yield Static("", classes="five")
# Row 9
yield Static(" ")
yield Button(t("Apply"), id="apply_button")
yield Button(t("Revert"), id="revert_button")
yield Button(t("Quit"), id="quit_button")
yield Static(" ")
yield build_screen_log_pane()
yield Footer()
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
self._update_grid_layout()
self.updateMediaTags()
self.updateTracks()
self.updateDifferences()
self.updateToggleButtons()
self._applyChangesWorker = None
def on_screen_resume(self, _event: events.ScreenResume) -> None:
if not hasattr(self, "tracksTable"):
return
self.refreshAfterDraftChange()
self.updateToggleButtons()
def _update_grid_layout(self) -> None:
leftColumnWidth = max(
localized_column_width(t("File"), self.GRID_COLUMN_LABEL_MIN),
localized_column_width(t("Media Tags"), self.GRID_COLUMN_LABEL_MIN),
localized_column_width(t("Streams"), self.GRID_COLUMN_LABEL_MIN),
)
grid = self.query_one("#main_grid", Grid)
grid.styles.grid_columns = self._grid_columns_spec(leftColumnWidth)
def action_back(self):
self.action_quit_screen()
def setMessage(self, message: str):
self._messageText = str(message)
if self._messageText:
self.notify(self._messageText)
def workerLoggingHandler(self,
message: str,
level: LogLevel = LogLevel.INFO) -> None:
if level == LogLevel.DEBUG:
self.context["logger"].debug(str(message))
elif level == LogLevel.INFO:
self.context["logger"].info(str(message))
elif level == LogLevel.WARNING:
self.context["logger"].warning(str(message))
elif level == LogLevel.ERROR:
self.context["logger"].error(str(message))
elif level == LogLevel.CRITICAL:
self.context["logger"].critical(str(message))
else:
raise Exception(f"Undefined Logging Level (msg={message})")
def _report_apply_timings(self, applyResult: dict, reloadSeconds: float = 0.0) -> None:
timings = dict(applyResult.get("timings", {}))
ffmpegSeconds = float(timings.get("ffmpeg_seconds", 0.0))
replaceSeconds = float(timings.get("replace_seconds", 0.0))
writeSeconds = float(timings.get("write_seconds", ffmpegSeconds + replaceSeconds))
reloadSeconds = float(reloadSeconds)
totalSeconds = writeSeconds + reloadSeconds
timingSummary = (
f"ffx edit timings: ffmpeg={ffmpegSeconds:.2f}s "
+ f"replace={replaceSeconds:.2f}s "
+ f"reload={reloadSeconds:.2f}s "
+ f"total={totalSeconds:.2f}s"
)
self.context["logger"].info(timingSummary)
def updateToggleButtons(self):
self._set_toggle_button_state(
"#cleanup_toggle_button",
t("Cleanup"),
self._applyCleanup,
)
self._set_toggle_button_state(
"#normalize_toggle_button",
t("Normalize"),
self._applyNormalization,
)
def _set_toggle_button_state(self, selector: str, label: str, enabled: bool):
try:
button = self.query_one(selector, Button)
except Exception:
return
button.label = label
button.styles.color = "black" if enabled else "white"
button.styles.background = "darkorange" if enabled else "black"
def refreshAfterDraftChange(self):
self.updateMediaTags()
self.updateTracks()
self.updateDifferences()
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button.id == "select_default_button":
if self.setSelectedTrackDefault():
self.refreshAfterDraftChange()
if event.button.id == "select_forced_button":
if self.setSelectedTrackForced():
self.refreshAfterDraftChange()
if event.button.id == "button_add_tag":
self.app.push_screen(TagDetailsScreen(), self.handle_update_media_tag)
if event.button.id == "button_edit_tag":
selectedTag = self.getSelectedMediaTag()
if selectedTag is not None:
self.app.push_screen(
TagDetailsScreen(key=selectedTag[0], value=selectedTag[1]),
self.handle_update_media_tag,
)
if event.button.id == "button_delete_tag":
selectedTag = self.getSelectedMediaTag()
if selectedTag is not None:
self.app.push_screen(
TagDeleteScreen(key=selectedTag[0], value=selectedTag[1]),
self.handle_delete_media_tag,
)
if event.button.id == "button_edit_track":
self.action_edit_selected_track()
if event.button.id == "cleanup_toggle_button":
self.action_toggle_cleanup()
if event.button.id == "normalize_toggle_button":
self.action_toggle_normalization()
if event.button.id == "apply_button":
self.action_apply_changes()
if event.button.id == "revert_button":
self.action_revert_changes()
if event.button.id == "quit_button":
self.action_quit_screen()
def action_edit_selected_track(self):
selectedTrack = self.getSelectedTrackDescriptor()
if selectedTrack is None:
self.setMessage(t("Select a stream first."))
return
self.app.push_screen(
TrackDetailsScreen(
trackDescriptor=selectedTrack,
patternLabel=os.path.basename(self._mediaFilename),
siblingTrackDescriptors=self._sourceMediaDescriptor.getTrackDescriptors(),
metadata_only=True,
),
self.handle_edit_track,
)
def action_toggle_cleanup(self):
self.setApplyCleanup(not self._applyCleanup)
self.updateToggleButtons()
self.updateMediaTags()
self.updateDifferences()
self.setMessage(
t("Cleanup enabled.") if self._applyCleanup else t("Cleanup disabled.")
)
def action_toggle_normalization(self):
self.setApplyNormalization(not self._applyNormalization)
self.updateToggleButtons()
self.updateTracks()
self.updateDifferences()
self.setMessage(
t("Normalization enabled.")
if self._applyNormalization
else t("Normalization disabled.")
)
def handle_update_media_tag(self, tag):
if tag is None:
return
self._sourceMediaDescriptor.getTags()[str(tag[0])] = str(tag[1])
self.setMessage(t("Updated media tag {tag!r}.", tag=tag[0]))
self.refreshAfterDraftChange()
def handle_delete_media_tag(self, tag):
if tag is None:
return
self._sourceMediaDescriptor.getTags().pop(str(tag[0]), None)
self.setMessage(t("Deleted media tag {tag!r}.", tag=tag[0]))
self.refreshAfterDraftChange()
def handle_edit_track(self, trackDescriptor: TrackDescriptor):
if trackDescriptor is None:
return
nextSourceMediaDescriptor = self._sourceMediaDescriptor.clone(context=self.context)
updatedTracks = nextSourceMediaDescriptor.getTrackDescriptors()
replacementTrack = trackDescriptor.clone(context=self.context)
replaced = False
for trackIndex, currentTrack in enumerate(updatedTracks):
sameSourceTrack = (
currentTrack.getSourceIndex() == replacementTrack.getSourceIndex()
and currentTrack.getType() == replacementTrack.getType()
)
sameVisibleTrack = (
currentTrack.getIndex() == replacementTrack.getIndex()
and currentTrack.getSubIndex() == replacementTrack.getSubIndex()
)
if sameSourceTrack or sameVisibleTrack:
updatedTracks[trackIndex] = replacementTrack
replaced = True
break
if not replaced:
self.setMessage(t("Unable to update selected stream."))
return
self._sourceMediaDescriptor = nextSourceMediaDescriptor
self.setMessage(
t(
"Updated stream #{index} ({track_type}).",
index=replacementTrack.getIndex(),
track_type=t(replacementTrack.getType().label()),
)
)
self.refreshAfterDraftChange()
def action_apply_changes(self):
if not self.hasPendingChanges():
self.setMessage(t("No changes to apply."))
return
if self._applyChangesWorker is not None and self._applyChangesWorker.is_running:
self.setMessage(t("Apply already running."))
return
self.context["logger"].info(
t("Starting metadata apply for {filename}.", filename=self._mediaFilename)
)
self._applyChangesWorker = self.run_apply_changes_worker()
@work(
thread=True,
exclusive=True,
group="media-edit-apply",
exit_on_error=False,
)
def run_apply_changes_worker(self):
return apply_metadata_edits(
self.context,
self._mediaFilename,
self._baselineMediaDescriptor,
self._sourceMediaDescriptor,
loggingHandler = self.workerLoggingHandler,
)
def on_worker_state_changed(self, event: Worker.StateChanged) -> None:
if event.worker is not self._applyChangesWorker:
return
if event.state == WorkerState.ERROR:
error = event.worker.error
if error is not None:
self.context["logger"].error(
"Failed to apply metadata edits for %s",
self._mediaFilename,
exc_info=(type(error), error, error.__traceback__),
)
self.setMessage(t("Apply failed: {error}", error=error))
self._applyChangesWorker = None
return
if event.state != WorkerState.SUCCESS:
return
applyResult = event.worker.result or {}
if applyResult.get("dry_run", False):
self._report_apply_timings(applyResult, reloadSeconds=0.0)
self.context["logger"].info(
t(
"Dry-run prepared temporary output {target_path}.",
target_path=applyResult["target_path"],
),
)
self.setMessage(
t(
"Dry-run: would rewrite via temporary file {target_path}",
target_path=applyResult["target_path"],
)
)
self._applyChangesWorker = None
return
reloadStart = monotonic()
self.context["logger"].info(t("Reloading file after metadata write."))
self.reloadProperties(reset_draft=True)
self.refreshAfterDraftChange()
reloadSeconds = monotonic() - reloadStart
self._report_apply_timings(applyResult, reloadSeconds=reloadSeconds)
self.context["logger"].info(t("Changes applied and file reloaded."))
self.setMessage(t("Changes applied and file reloaded."))
self._applyChangesWorker = None
def action_revert_changes(self):
if not self.hasPendingChanges():
self.setMessage(t("No changes to revert."))
return
self.app.push_screen(
ConfirmScreen(
t("Discard pending metadata changes and reload the file state?"),
confirm_label=t("Discard"),
cancel_label=t("Keep Editing"),
),
self.handle_revert_confirmation,
)
def handle_revert_confirmation(self, confirmed):
if not confirmed:
self.setMessage(t("Keeping pending changes."))
return
self.reloadProperties(reset_draft=True)
self.refreshAfterDraftChange()
self.setMessage(t("Reverted pending changes."))
def action_quit_screen(self):
if self.hasPendingChanges():
self.app.push_screen(
ConfirmScreen(
t("Discard pending metadata changes and quit?"),
confirm_label=t("Discard"),
cancel_label=t("Stay"),
),
self.handle_quit_confirmation,
)
return
self.app.exit()
def handle_quit_confirmation(self, confirmed):
if confirmed:
self.app.exit()
else:
self.setMessage(t("Continuing edit session."))

View File

@@ -0,0 +1,425 @@
import os
import click
from textual.screen import Screen
from textual.widgets import DataTable
from textual.widgets._data_table import CellDoesNotExist
from ffx.audio_layout import AudioLayout
from ffx.file_properties import FileProperties
from ffx.helper import DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY
from ffx.iso_language import IsoLanguage
from ffx.media_descriptor import MediaDescriptor
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
from ffx.track_descriptor import TrackDescriptor
from ffx.track_disposition import TrackDisposition
from ffx.track_type import TrackType
from .i18n import t
from .screen_support import add_auto_table_column, build_screen_bootstrap, populate_tag_table
class MediaWorkflowScreenBase(Screen):
TRACKS_TABLE_INDEX_COLUMN_LABEL = "Index"
TRACKS_TABLE_TYPE_COLUMN_LABEL = "Type"
TRACKS_TABLE_SUB_INDEX_COLUMN_LABEL = "SubIndex"
TRACKS_TABLE_CODEC_COLUMN_LABEL = "Codec"
TRACKS_TABLE_LAYOUT_COLUMN_LABEL = "Layout"
TRACKS_TABLE_LANGUAGE_COLUMN_LABEL = "Language"
TRACKS_TABLE_TITLE_COLUMN_LABEL = "Title"
TRACKS_TABLE_DEFAULT_COLUMN_LABEL = "Default"
TRACKS_TABLE_FORCED_COLUMN_LABEL = "Forced"
DIFFERENCES_COLUMN_LABEL = "Differences"
COMMAND_NAME = ""
EDIT_MODE = False
def __init__(self):
super().__init__()
bootstrap = build_screen_bootstrap(self.app.getContext())
self.context = bootstrap.context
self._applyCleanup = False
self._applyNormalization = bool(self.context.get("apply_metadata_normalization", True))
self._removeGlobalKeys = []
self._ignoreGlobalKeys = []
self._apply_bootstrap_settings(bootstrap)
command = self.context.get("command")
if command != self.COMMAND_NAME:
raise click.ClickException(
f"{type(self).__name__}.__init__(): Can only perform command '{self.COMMAND_NAME}'"
)
arguments = self.context.get("arguments", {})
self._mediaFilename = arguments.get("filename", "")
if not self._mediaFilename:
raise click.ClickException(
f"{type(self).__name__}.__init__(): Argument 'filename' is required"
)
if not os.path.isfile(self._mediaFilename):
raise click.ClickException(
f"{type(self).__name__}.__init__(): Media file {self._mediaFilename} does not exist"
)
self._baselineMediaDescriptor = None
self._sourceMediaDescriptor = None
self._targetMediaDescriptor = None
self._currentPattern = None
self._mediaChangeSetObj = {}
self._messageText = ""
self._trackRowData: dict[object, TrackDescriptor] = {}
self._sourceMediaTagRowData: dict[object, tuple[str, str]] = {}
self.reloadProperties(reset_draft=True)
def _apply_bootstrap_settings(self, bootstrap) -> None:
self._applyCleanup = bootstrap.apply_cleanup
self._removeGlobalKeys = bootstrap.remove_global_keys
self._ignoreGlobalKeys = bootstrap.ignore_global_keys
def refreshCleanupSettings(self) -> None:
self._apply_bootstrap_settings(build_screen_bootstrap(self.context))
def setApplyCleanup(self, enabled: bool) -> None:
self.context["apply_metadata_cleanup"] = bool(enabled)
self.refreshCleanupSettings()
def refreshNormalizationSettings(self) -> None:
self._applyNormalization = bool(
self.context.get("apply_metadata_normalization", True)
)
def setApplyNormalization(self, enabled: bool) -> None:
self.context["apply_metadata_normalization"] = bool(enabled)
self.refreshNormalizationSettings()
def _build_media_tags_table(self):
self.mediaTagsTable = DataTable(classes="three")
add_auto_table_column(self.mediaTagsTable, t("Key"))
add_auto_table_column(self.mediaTagsTable, t("Value"))
self.mediaTagsTable.cursor_type = "row"
def _build_tracks_table(self):
self.tracksTable = DataTable(classes="three")
self._configure_tracks_table_columns()
self.tracksTable.cursor_type = "row"
def _configure_tracks_table_columns(self):
add_auto_table_column(self.tracksTable, t(self.TRACKS_TABLE_INDEX_COLUMN_LABEL))
add_auto_table_column(self.tracksTable, t(self.TRACKS_TABLE_TYPE_COLUMN_LABEL))
add_auto_table_column(self.tracksTable, t(self.TRACKS_TABLE_SUB_INDEX_COLUMN_LABEL))
add_auto_table_column(self.tracksTable, t(self.TRACKS_TABLE_CODEC_COLUMN_LABEL))
add_auto_table_column(self.tracksTable, t(self.TRACKS_TABLE_LAYOUT_COLUMN_LABEL))
add_auto_table_column(self.tracksTable, t(self.TRACKS_TABLE_LANGUAGE_COLUMN_LABEL))
add_auto_table_column(self.tracksTable, t(self.TRACKS_TABLE_TITLE_COLUMN_LABEL))
add_auto_table_column(self.tracksTable, t(self.TRACKS_TABLE_DEFAULT_COLUMN_LABEL))
add_auto_table_column(self.tracksTable, t(self.TRACKS_TABLE_FORCED_COLUMN_LABEL))
def _build_differences_table(self):
self.differencesTable = DataTable(id="differences-table")
add_auto_table_column(self.differencesTable, t(self.DIFFERENCES_COLUMN_LABEL))
self.differencesTable.cursor_type = "row"
def _track_codec_cell_value(self, trackDescriptor: TrackDescriptor) -> str:
if trackDescriptor.getType() == TrackType.ATTACHMENT:
return " "
return trackDescriptor.getFormatDescriptor().label()
def _track_disposition_cell_value(
self,
trackDescriptor: TrackDescriptor,
disposition: TrackDisposition,
) -> str:
if trackDescriptor.getType() == TrackType.ATTACHMENT:
return " "
return (
t("Yes")
if disposition in trackDescriptor.getDispositionSet()
else t("No")
)
def reloadProperties(self, reset_draft: bool = True):
self._mediaFileProperties = FileProperties(self.context, self._mediaFilename)
probedMediaDescriptor = self._mediaFileProperties.getMediaDescriptor()
if self.EDIT_MODE:
self._baselineMediaDescriptor = probedMediaDescriptor
if reset_draft or self._sourceMediaDescriptor is None:
self._sourceMediaDescriptor = probedMediaDescriptor.clone(context=self.context)
self._targetMediaDescriptor = self._sourceMediaDescriptor
self._currentPattern = None
else:
self._baselineMediaDescriptor = probedMediaDescriptor
self._sourceMediaDescriptor = probedMediaDescriptor
self._currentPattern = self._mediaFileProperties.getPattern()
self._targetMediaDescriptor = (
self._currentPattern.getMediaDescriptor(self.context)
if self._currentPattern is not None
else None
)
self.rebuildChangeSet()
def rebuildChangeSet(self):
try:
if self.EDIT_MODE:
mdcs = MediaDescriptorChangeSet(
self.context,
self._sourceMediaDescriptor,
self._baselineMediaDescriptor,
)
else:
if self._targetMediaDescriptor is None:
self._mediaChangeSetObj = {}
return
mdcs = MediaDescriptorChangeSet(
self.context,
self._targetMediaDescriptor,
self._sourceMediaDescriptor,
)
self._mediaChangeSetObj = mdcs.getChangeSetObj()
except ValueError:
self._mediaChangeSetObj = {}
def hasPendingChanges(self) -> bool:
return bool(self._mediaChangeSetObj)
def getDisplayedMediaDescriptor(self) -> MediaDescriptor | None:
return self._sourceMediaDescriptor
def getTrackEditSourceDescriptor(self) -> TrackDescriptor | None:
return self.getSelectedTrackDescriptor()
def updateMediaTags(self):
displayedMediaDescriptor = self.getDisplayedMediaDescriptor()
self._sourceMediaTagRowData = populate_tag_table(
self.mediaTagsTable,
displayedMediaDescriptor.getTags() if displayedMediaDescriptor is not None else {},
ignore_keys=self._ignoreGlobalKeys,
remove_keys=self._removeGlobalKeys,
)
def updateTracks(self):
self.tracksTable.clear(columns=True)
self._configure_tracks_table_columns()
self._trackRowData = {}
displayedMediaDescriptor = self.getDisplayedMediaDescriptor()
trackDescriptorList = (
displayedMediaDescriptor.getTrackDescriptors()
if displayedMediaDescriptor is not None
else []
)
typeCounter = {}
applyNormalization = bool(getattr(self, "_applyNormalization", False))
for trackDescriptor in trackDescriptorList:
trackType = trackDescriptor.getType()
if trackType not in typeCounter:
typeCounter[trackType] = 0
dispositionSet = trackDescriptor.getDispositionSet()
audioLayout = trackDescriptor.getAudioLayout()
trackTitle = trackDescriptor.getTitle()
if (
applyNormalization
and not str(trackTitle).strip()
and trackType in (TrackType.VIDEO, TrackType.AUDIO, TrackType.SUBTITLE)
):
trackLanguage = trackDescriptor.getLanguage()
if trackLanguage != IsoLanguage.UNDEFINED:
trackTitle = trackLanguage.label()
row = (
trackDescriptor.getIndex(),
t(trackType.label()),
typeCounter[trackType],
self._track_codec_cell_value(trackDescriptor),
t(audioLayout.label())
if trackType == TrackType.AUDIO
and audioLayout != AudioLayout.LAYOUT_UNDEFINED
else " ",
trackDescriptor.getLanguage().label(),
trackTitle,
self._track_disposition_cell_value(
trackDescriptor,
TrackDisposition.DEFAULT,
),
self._track_disposition_cell_value(
trackDescriptor,
TrackDisposition.FORCED,
),
)
row_key = self.tracksTable.add_row(*map(str, row))
self._trackRowData[row_key] = trackDescriptor
typeCounter[trackType] += 1
def updateDifferences(self):
self.rebuildChangeSet()
self.differencesTable.clear()
if not self.EDIT_MODE and self._currentPattern is None:
return
targetDescriptor = (
self._sourceMediaDescriptor
if self.EDIT_MODE
else self._targetMediaDescriptor
)
targetTrackDescriptorsByIndex = {
trackDescriptor.getIndex(): trackDescriptor
for trackDescriptor in (
targetDescriptor.getTrackDescriptors()
if targetDescriptor is not None
else []
)
}
tagDifferences = self._mediaChangeSetObj.get(MediaDescriptorChangeSet.TAGS_KEY, {})
for tagKey, tagValue in tagDifferences.get(DIFF_ADDED_KEY, {}).items():
if tagKey not in self._ignoreGlobalKeys:
self.differencesTable.add_row(
t("add media tag: key='{key}' value='{value}'", key=tagKey, value=tagValue)
)
for tagKey, tagValue in tagDifferences.get(DIFF_REMOVED_KEY, {}).items():
if tagKey in self._ignoreGlobalKeys:
continue
if not self.EDIT_MODE and tagKey in self._removeGlobalKeys:
continue
self.differencesTable.add_row(
t("remove media tag: key='{key}' value='{value}'", key=tagKey, value=tagValue)
)
for tagKey, tagValue in tagDifferences.get(DIFF_CHANGED_KEY, {}).items():
if tagKey not in self._ignoreGlobalKeys:
self.differencesTable.add_row(
t("change media tag: key='{key}' value='{value}'", key=tagKey, value=tagValue)
)
trackDifferences = self._mediaChangeSetObj.get(MediaDescriptorChangeSet.TRACKS_KEY, {})
for trackDescriptor in trackDifferences.get(DIFF_ADDED_KEY, {}).values():
self.differencesTable.add_row(
t(
"add {track_type} track: index={index} lang={language}",
track_type=t(trackDescriptor.getType().label()),
index=trackDescriptor.getIndex(),
language=trackDescriptor.getLanguage().threeLetter(),
)
)
for trackIndex in trackDifferences.get(DIFF_REMOVED_KEY, {}).keys():
self.differencesTable.add_row(t("remove stream #{index}", index=trackIndex))
for trackIndex, trackDiffObj in trackDifferences.get(DIFF_CHANGED_KEY, {}).items():
targetTrackDescriptor = targetTrackDescriptorsByIndex.get(trackIndex)
if targetTrackDescriptor is None:
continue
tagsDiff = trackDiffObj.get(MediaDescriptorChangeSet.TAGS_KEY, {})
for tagKey, tagValue in tagsDiff.get(DIFF_REMOVED_KEY, {}).items():
self.differencesTable.add_row(
t(
"change stream #{index} ({track_type}:{sub_index}) remove key={key} value={value}",
index=targetTrackDescriptor.getIndex(),
track_type=t(targetTrackDescriptor.getType().label()),
sub_index=targetTrackDescriptor.getSubIndex(),
key=tagKey,
value=tagValue,
)
)
for tagKey, tagValue in tagsDiff.get(DIFF_ADDED_KEY, {}).items():
self.differencesTable.add_row(
t(
"change stream #{index} ({track_type}:{sub_index}) add key={key} value={value}",
index=targetTrackDescriptor.getIndex(),
track_type=t(targetTrackDescriptor.getType().label()),
sub_index=targetTrackDescriptor.getSubIndex(),
key=tagKey,
value=tagValue,
)
)
for tagKey, tagValue in tagsDiff.get(DIFF_CHANGED_KEY, {}).items():
self.differencesTable.add_row(
t(
"change stream #{index} ({track_type}:{sub_index}) change key={key} value={value}",
index=targetTrackDescriptor.getIndex(),
track_type=t(targetTrackDescriptor.getType().label()),
sub_index=targetTrackDescriptor.getSubIndex(),
key=tagKey,
value=tagValue,
)
)
dispositionDiff = trackDiffObj.get(MediaDescriptorChangeSet.DISPOSITION_SET_KEY, {})
for addedDisposition in dispositionDiff.get(DIFF_ADDED_KEY, set()):
self.differencesTable.add_row(
t(
"change stream #{index} ({track_type}:{sub_index}) add disposition={disposition}",
index=targetTrackDescriptor.getIndex(),
track_type=t(targetTrackDescriptor.getType().label()),
sub_index=targetTrackDescriptor.getSubIndex(),
disposition=t(addedDisposition.label()),
)
)
for removedDisposition in dispositionDiff.get(DIFF_REMOVED_KEY, set()):
self.differencesTable.add_row(
t(
"change stream #{index} ({track_type}:{sub_index}) remove disposition={disposition}",
index=targetTrackDescriptor.getIndex(),
track_type=t(targetTrackDescriptor.getType().label()),
sub_index=targetTrackDescriptor.getSubIndex(),
disposition=t(removedDisposition.label()),
)
)
def getSelectedMediaTag(self):
try:
row_key, _ = self.mediaTagsTable.coordinate_to_cell_key(
self.mediaTagsTable.cursor_coordinate
)
if row_key is not None:
return self._sourceMediaTagRowData.get(row_key)
return None
except CellDoesNotExist:
return None
def getSelectedTrackDescriptor(self):
try:
row_key, _ = self.tracksTable.coordinate_to_cell_key(
self.tracksTable.cursor_coordinate
)
if row_key is not None:
return self._trackRowData.get(row_key)
return None
except CellDoesNotExist:
return None
def setSelectedTrackDefault(self):
selectedTrackDescriptor = self.getTrackEditSourceDescriptor()
if selectedTrackDescriptor is None:
return False
self._sourceMediaDescriptor.setDefaultSubTrack(
selectedTrackDescriptor.getType(),
selectedTrackDescriptor.getSubIndex(),
)
return True
def setSelectedTrackForced(self):
selectedTrackDescriptor = self.getTrackEditSourceDescriptor()
if selectedTrackDescriptor is None:
return False
self._sourceMediaDescriptor.setForcedSubTrack(
selectedTrackDescriptor.getType(),
selectedTrackDescriptor.getSubIndex(),
)
return True

177
src/ffx/metadata_editor.py Normal file
View File

@@ -0,0 +1,177 @@
from __future__ import annotations
import click
import os
import tempfile
from time import monotonic
from .constants import (
DEFAULT_AC3_BANDWIDTH,
DEFAULT_DTS_BANDWIDTH,
DEFAULT_STEREO_BANDWIDTH,
FFMPEG_COMMAND_TOKENS,
)
from .media_descriptor import MediaDescriptor
from .media_descriptor_change_set import MediaDescriptorChangeSet
from .process import executeProcess, formatCommandSequence
from .video_encoder import VideoEncoder
from .helper import LogLevel
def create_temporary_output_path(source_path: str) -> str:
sourceDirectory = os.path.dirname(os.path.abspath(source_path)) or "."
sourceBasename = os.path.basename(source_path)
sourceStem, sourceExtension = os.path.splitext(sourceBasename)
descriptor, temporaryPath = tempfile.mkstemp(
prefix=f".{sourceStem}.ffx-edit-",
suffix=sourceExtension or ".tmp",
dir=sourceDirectory,
)
os.close(descriptor)
os.unlink(temporaryPath)
return temporaryPath
def build_metadata_edit_context(context: dict) -> dict:
editContext = dict(context)
editContext["video_encoder"] = VideoEncoder.COPY
editContext["perform_cut"] = False
editContext["no_signature"] = bool(editContext.get("no_signature", True))
editContext["resource_limits"] = dict(editContext.get("resource_limits", {}))
editContext["bitrates"] = dict(
editContext.get(
"bitrates",
{
"stereo": f"{DEFAULT_STEREO_BANDWIDTH}k",
"ac3": f"{DEFAULT_AC3_BANDWIDTH}k",
"dts": f"{DEFAULT_DTS_BANDWIDTH}k",
},
)
)
editContext["encoding_metadata_tags"] = {}
return editContext
def build_metadata_edit_command(
context: dict,
source_path: str,
target_path: str,
baseline_descriptor: MediaDescriptor,
draft_descriptor: MediaDescriptor,
) -> list[str]:
changeSet = MediaDescriptorChangeSet(context, draft_descriptor, baseline_descriptor)
return (
list(FFMPEG_COMMAND_TOKENS)
+ ["-i", source_path, "-map", "0", "-c", "copy"]
+ changeSet.generateMetadataTokens()
+ changeSet.generateDispositionTokens()
+ [target_path]
)
def notify_ffmpeg_invocation(
context: dict,
command_sequence: list[str],
*,
loggingHandler = None,
dry_run: bool = False,
) -> None:
loggingCallback = loggingHandler or context.get("logging_handler")
if not callable(loggingCallback):
return
verbosity = int(context.get("verbosity", 0) or 0)
if verbosity > 0:
if dry_run:
loggingCallback(f"ffmpeg dry-run: {formatCommandSequence(command_sequence)}", level = LogLevel.DEBUG)
else:
loggingCallback(f"ffmpeg: {formatCommandSequence(command_sequence)}", level = LogLevel.DEBUG)
return
loggingCallback("ffmpeg dry-run prepared.") if dry_run else loggingCallback(
"ffmpeg metadata write started."
)
def apply_metadata_edits(
context: dict,
source_path: str,
baseline_descriptor: MediaDescriptor,
draft_descriptor: MediaDescriptor,
*,
loggingHandler = None,
) -> dict[str, object]:
temporaryOutputPath = create_temporary_output_path(source_path)
editContext = build_metadata_edit_context(context)
commandSequence = build_metadata_edit_command(
editContext,
source_path,
temporaryOutputPath,
baseline_descriptor,
draft_descriptor,
)
ffmpegSeconds = 0.0
replaceSeconds = 0.0
try:
if editContext.get("dry_run", False):
notify_ffmpeg_invocation(
editContext,
commandSequence,
loggingHandler = loggingHandler,
dry_run=True,
)
return {
"applied": False,
"dry_run": True,
"target_path": temporaryOutputPath,
"command_sequence": commandSequence,
"timings": {
"ffmpeg_seconds": ffmpegSeconds,
"replace_seconds": replaceSeconds,
"write_seconds": ffmpegSeconds + replaceSeconds,
},
}
notify_ffmpeg_invocation(editContext,
commandSequence,
loggingHandler = loggingHandler)
ffmpegStart = monotonic()
_out, err, rc = executeProcess(commandSequence, context=editContext)
ffmpegSeconds = monotonic() - ffmpegStart
if rc:
raise click.ClickException(f"ffmpeg edit failed: rc={rc} error={err}")
replaceStart = monotonic()
os.replace(temporaryOutputPath, source_path)
replaceSeconds = monotonic() - replaceStart
return {
"applied": True,
"dry_run": False,
"target_path": source_path,
"command_sequence": commandSequence,
"timings": {
"ffmpeg_seconds": ffmpegSeconds,
"replace_seconds": replaceSeconds,
"write_seconds": ffmpegSeconds + replaceSeconds,
},
}
except Exception:
if os.path.exists(temporaryOutputPath):
os.remove(temporaryOutputPath)
raise

View File

@@ -0,0 +1,20 @@
"""Load ORM model modules so SQLAlchemy relationship strings can resolve."""
from .show import Base, Show
from .pattern import Pattern
from .track import Track
from .track_tag import TrackTag
from .media_tag import MediaTag
from .shifted_season import ShiftedSeason
from .property import Property
__all__ = [
'Base',
'Show',
'Pattern',
'Track',
'TrackTag',
'MediaTag',
'ShiftedSeason',
'Property',
]

View File

@@ -1,47 +0,0 @@
import os, sys, importlib, inspect, glob, re
from ffx.configuration_controller import ConfigurationController
from ffx.database import databaseContext
from sqlalchemy import Engine
from sqlalchemy.orm import sessionmaker
class Conversion():
def __init__(self):
self._context = {}
self._context['config'] = ConfigurationController()
self._context['database'] = databaseContext(databasePath=self._context['config'].getDatabaseFilePath())
self.__databaseSession: sessionmaker = self._context['database']['session']
self.__databaseEngine: Engine = self._context['database']['engine']
@staticmethod
def list():
basePath = os.path.dirname(__file__)
filenamePattern = re.compile("conversion_([0-9]+)_([0-9]+)\\.py")
filenameList = [os.path.basename(fp) for fp in glob.glob(f"{ basePath }/*.py") if fp != __file__]
versionTupleList = [(fm.group(1), fm.group(2)) for fn in filenameList if (fm := filenamePattern.search(fn))]
return versionTupleList
@staticmethod
def getClassReference(versionFrom, versionTo):
importlib.import_module(f"ffx.model.conversions.conversion_{ versionFrom }_{ versionTo }")
for name, obj in inspect.getmembers(sys.modules[f"ffx.model.conversions.conversion_{ versionFrom }_{ versionTo }"]):
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
if inspect.isclass(obj) and name != 'Conversion' and name.startswith('Conversion'):
return obj
@staticmethod
def getAllClassReferences():
return [Conversion.getClassReference(verFrom, verTo) for verFrom, verTo in Conversion.list()]

View File

@@ -1,17 +0,0 @@
import os, sys, importlib, inspect, glob, re
from .conversion import Conversion
class Conversion_2_3(Conversion):
def __init__(self):
super().__init__()
def applyConversion(self):
s = self.__databaseSession()
e = self.__databaseEngine
with e.connect() as c:
c.execute("ALTER TABLE user ADD COLUMN email VARCHAR(255)")

View File

@@ -1,7 +0,0 @@
import os, sys, importlib, inspect, glob, re
from .conversion import Conversion
class Conversion_3_4(Conversion):
pass

View File

@@ -0,0 +1,82 @@
from __future__ import annotations
from dataclasses import dataclass
import importlib
import importlib.util
class DatabaseVersionException(Exception):
def __init__(self, errorMessage):
super().__init__(errorMessage)
@dataclass(frozen=True)
class MigrationStep:
versionFrom: int
versionTo: int
moduleName: str
modulePresent: bool
def getMigrationStepModuleName(versionFrom: int, versionTo: int) -> str:
return f"ffx.model.migration.step_{int(versionFrom)}_{int(versionTo)}"
def migrationStepModuleExists(versionFrom: int, versionTo: int) -> bool:
moduleName = getMigrationStepModuleName(versionFrom, versionTo)
try:
return importlib.util.find_spec(moduleName) is not None
except ModuleNotFoundError:
return False
def getMigrationPlan(currentVersion: int, targetVersion: int) -> list[MigrationStep]:
version = int(currentVersion)
target = int(targetVersion)
migrationPlan = []
while version < target:
nextVersion = version + 1
migrationPlan.append(
MigrationStep(
versionFrom=version,
versionTo=nextVersion,
moduleName=getMigrationStepModuleName(version, nextVersion),
modulePresent=migrationStepModuleExists(version, nextVersion),
)
)
version = nextVersion
return migrationPlan
def loadMigrationStep(versionFrom: int, versionTo: int):
moduleName = getMigrationStepModuleName(versionFrom, versionTo)
try:
module = importlib.import_module(moduleName)
except ModuleNotFoundError as ex:
if ex.name == moduleName:
raise DatabaseVersionException(
f"No migration path from database version {versionFrom} to {versionTo}"
) from ex
raise
migrationStep = getattr(module, "applyMigration", None)
if migrationStep is None:
raise DatabaseVersionException(
f"Migration module {moduleName} does not define applyMigration()"
)
return migrationStep
def migrateDatabase(databaseContext, currentVersion: int, targetVersion: int, setDatabaseVersion):
for migrationStepInfo in getMigrationPlan(currentVersion, targetVersion):
migrationStep = loadMigrationStep(
migrationStepInfo.versionFrom,
migrationStepInfo.versionTo,
)
migrationStep(databaseContext)
setDatabaseVersion(databaseContext, migrationStepInfo.versionTo)

View File

@@ -0,0 +1,84 @@
from sqlalchemy import inspect, text
def applyMigration(databaseContext):
engine = databaseContext['engine']
inspector = inspect(engine)
shiftedSeasonColumns = {
column['name']
for column in inspector.get_columns('shifted_seasons')
}
showColumns = {
column['name']
for column in inspector.get_columns('shows')
}
with engine.begin() as connection:
if 'pattern_id' not in shiftedSeasonColumns:
connection.execute(text("PRAGMA foreign_keys=OFF"))
connection.execute(
text(
"""
CREATE TABLE shifted_seasons_v3 (
id INTEGER PRIMARY KEY,
show_id INTEGER,
pattern_id INTEGER,
original_season INTEGER,
first_episode INTEGER DEFAULT -1,
last_episode INTEGER DEFAULT -1,
season_offset INTEGER DEFAULT 0,
episode_offset INTEGER DEFAULT 0,
FOREIGN KEY(show_id) REFERENCES shows(id) ON DELETE CASCADE,
FOREIGN KEY(pattern_id) REFERENCES patterns(id) ON DELETE CASCADE,
CHECK (
(show_id IS NOT NULL AND pattern_id IS NULL)
OR (show_id IS NULL AND pattern_id IS NOT NULL)
)
)
"""
)
)
connection.execute(
text(
"""
INSERT INTO shifted_seasons_v3 (
id,
show_id,
pattern_id,
original_season,
first_episode,
last_episode,
season_offset,
episode_offset
)
SELECT
id,
show_id,
NULL,
original_season,
first_episode,
last_episode,
season_offset,
episode_offset
FROM shifted_seasons
"""
)
)
connection.execute(text("DROP TABLE shifted_seasons"))
connection.execute(text("ALTER TABLE shifted_seasons_v3 RENAME TO shifted_seasons"))
connection.execute(
text("CREATE INDEX ix_shifted_seasons_show_id ON shifted_seasons(show_id)")
)
connection.execute(
text("CREATE INDEX ix_shifted_seasons_pattern_id ON shifted_seasons(pattern_id)")
)
connection.execute(text("PRAGMA foreign_keys=ON"))
if 'quality' not in showColumns:
connection.execute(
text("ALTER TABLE shows ADD COLUMN quality INTEGER DEFAULT 0")
)
if 'notes' not in showColumns:
connection.execute(
text("ALTER TABLE shows ADD COLUMN notes TEXT DEFAULT ''")
)

View File

@@ -1,6 +1,6 @@
import click
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy import Column, Integer, String, Text, ForeignKey, UniqueConstraint
from sqlalchemy.orm import relationship
from .show import Base, Show
@@ -12,6 +12,9 @@ from ffx.show_descriptor import ShowDescriptor
class Pattern(Base):
__tablename__ = 'patterns'
__table_args__ = (
UniqueConstraint('show_id', 'pattern', name='uq_patterns_show_id_pattern'),
)
# v1.x
id = Column(Integer, primary_key=True)
@@ -31,8 +34,13 @@ class Pattern(Base):
tracks = relationship('Track', back_populates='pattern', cascade="all, delete", lazy='joined')
media_tags = relationship('MediaTag', back_populates='pattern', cascade="all, delete", lazy='joined')
shifted_seasons = relationship('ShiftedSeason', back_populates='pattern', cascade="all, delete", lazy='joined')
quality = Column(Integer, default=0)
notes = Column(Text, default='')
def getId(self):

View File

@@ -1,6 +1,6 @@
import click
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy import CheckConstraint, Column, ForeignKey, Index, Integer
from sqlalchemy.orm import relationship
from .show import Base, Show
@@ -9,6 +9,14 @@ from .show import Base, Show
class ShiftedSeason(Base):
__tablename__ = 'shifted_seasons'
__table_args__ = (
CheckConstraint(
"(show_id IS NOT NULL AND pattern_id IS NULL) OR (show_id IS NULL AND pattern_id IS NOT NULL)",
name="ck_shifted_seasons_single_owner",
),
Index("ix_shifted_seasons_show_id", "show_id"),
Index("ix_shifted_seasons_pattern_id", "pattern_id"),
)
# v1.x
id = Column(Integer, primary_key=True)
@@ -19,9 +27,12 @@ class ShiftedSeason(Base):
# pattern: Mapped[str] = mapped_column(String, nullable=False)
# v1.x
show_id = Column(Integer, ForeignKey('shows.id', ondelete="CASCADE"))
show_id = Column(Integer, ForeignKey('shows.id', ondelete="CASCADE"), nullable=True)
show = relationship(Show, back_populates='shifted_seasons', lazy='joined')
pattern_id = Column(Integer, ForeignKey('patterns.id', ondelete="CASCADE"), nullable=True)
pattern = relationship('Pattern', back_populates='shifted_seasons', lazy='joined')
# v2.0
# show_id: Mapped[int] = mapped_column(ForeignKey("shows.id", ondelete="CASCADE"))
# show: Mapped["Show"] = relationship(back_populates="patterns")
@@ -39,6 +50,12 @@ class ShiftedSeason(Base):
def getId(self):
return self.id
def getShowId(self):
return self.show_id
def getPatternId(self):
return self.pattern_id
def getOriginalSeason(self):
return self.original_season
@@ -61,6 +78,8 @@ class ShiftedSeason(Base):
shiftedSeasonObj = {}
shiftedSeasonObj['show_id'] = self.getShowId()
shiftedSeasonObj['pattern_id'] = self.getPatternId()
shiftedSeasonObj['original_season'] = self.getOriginalSeason()
shiftedSeasonObj['first_episode'] = self.getFirstEpisode()
shiftedSeasonObj['last_episode'] = self.getLastEpisode()
@@ -68,4 +87,3 @@ class ShiftedSeason(Base):
shiftedSeasonObj['episode_offset'] = self.getEpisodeOffset()
return shiftedSeasonObj

View File

@@ -1,5 +1,5 @@
# from typing import List
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy import create_engine, Column, Integer, String, Text, ForeignKey
from sqlalchemy.orm import relationship, declarative_base, sessionmaker
from ffx.show_descriptor import ShowDescriptor
@@ -45,6 +45,8 @@ class Show(Base):
index_episode_digits = Column(Integer, default=ShowDescriptor.DEFAULT_INDEX_EPISODE_DIGITS)
indicator_season_digits = Column(Integer, default=ShowDescriptor.DEFAULT_INDICATOR_SEASON_DIGITS)
indicator_episode_digits = Column(Integer, default=ShowDescriptor.DEFAULT_INDICATOR_EPISODE_DIGITS)
quality = Column(Integer, default=0)
notes = Column(Text, default='')
def getDescriptor(self, context):
@@ -58,5 +60,7 @@ class Show(Base):
kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY] = int(self.index_episode_digits)
kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY] = int(self.indicator_season_digits)
kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY] = int(self.indicator_episode_digits)
kwargs[ShowDescriptor.QUALITY_KEY] = int(self.quality or 0)
kwargs[ShowDescriptor.NOTES_KEY] = str(self.notes or '')
return ShowDescriptor(**kwargs)

View File

@@ -4,6 +4,7 @@ from sqlalchemy.orm import relationship, declarative_base, sessionmaker
from .show import Base
from ffx.attachment_format import AttachmentFormat
from ffx.track_type import TrackType
from ffx.iso_language import IsoLanguage
@@ -132,9 +133,16 @@ class Track(Base):
if trackType in [t.label() for t in TrackType]:
if trackType == TrackType.ATTACHMENT.label():
storedFormatIdentifier = AttachmentFormat.identifyFfprobeStream(streamObj).identifier()
else:
storedFormatIdentifier = TrackCodec.identify(
streamObj.get(TrackDescriptor.FFPROBE_CODEC_KEY)
).identifier()
return cls(pattern_id = patternId,
track_type = trackType,
codec_name = streamObj[TrackDescriptor.FFPROBE_CODEC_NAME_KEY],
codec_name = storedFormatIdentifier,
disposition_flags = sum([2**t.index() for (k,v) in streamObj[TrackDescriptor.FFPROBE_DISPOSITION_KEY].items()
if v and (t := TrackDisposition.find(k)) is not None]),
audio_layout = AudioLayout.identify(streamObj))
@@ -153,8 +161,20 @@ class Track(Base):
return TrackType.fromIndex(self.track_type)
def getCodec(self) -> TrackCodec:
if self.getType() == TrackType.ATTACHMENT:
return TrackCodec.UNKNOWN
return TrackCodec.identify(self.codec_name)
def getAttachmentFormat(self) -> AttachmentFormat:
if self.getType() != TrackType.ATTACHMENT:
return AttachmentFormat.UNKNOWN
return AttachmentFormat.identify(self.codec_name)
def getFormatDescriptor(self):
if self.getType() == TrackType.ATTACHMENT:
return self.getAttachmentFormat()
return self.getCodec()
def getIndex(self):
return int(self.index) if self.index is not None else -1
@@ -206,7 +226,10 @@ class Track(Base):
kwargs[TrackDescriptor.SUB_INDEX_KEY] = subIndex
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = self.getType()
kwargs[TrackDescriptor.CODEC_KEY] = self.getCodec()
if self.getType() == TrackType.ATTACHMENT:
kwargs[TrackDescriptor.ATTACHMENT_FORMAT_KEY] = self.getAttachmentFormat()
else:
kwargs[TrackDescriptor.CODEC_KEY] = self.getCodec()
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = self.getDispositionSet()
kwargs[TrackDescriptor.TAGS_KEY] = self.getTags()

View File

@@ -1,156 +1,411 @@
import click, re
import re
import click
from ffx.model.media_tag import MediaTag
from ffx.model.pattern import Pattern
from ffx.model.track import Track
from ffx.model.track_tag import TrackTag
from ffx.track_descriptor import TrackDescriptor
from ffx.track_disposition import TrackDisposition
class PatternController():
class DuplicatePatternMatchError(click.ClickException):
pass
class InvalidPatternSchemaError(click.ClickException):
pass
class PatternController:
_compiled_regex_cache: dict[str, re.Pattern] = {}
def __init__(self, context):
self.context = context
self.Session = self.context['database']['session'] # convenience
self.Session = self.context["database"]["session"]
self.__configurationData = self.context["config"].getData()
def addPattern(self, patternDescriptor):
metadataConfiguration = (
self.__configurationData["metadata"]
if "metadata" in self.__configurationData.keys()
else {}
)
self.__removeTrackKeys = (
metadataConfiguration["streams"]["remove"]
if "streams" in metadataConfiguration.keys()
and "remove" in metadataConfiguration["streams"].keys()
else []
)
self.__ignoreTrackKeys = (
metadataConfiguration["streams"]["ignore"]
if "streams" in metadataConfiguration.keys()
and "ignore" in metadataConfiguration["streams"].keys()
else []
)
@classmethod
def _clear_regex_cache(cls):
cls._compiled_regex_cache.clear()
@classmethod
def _compile_pattern_expression(cls, pattern_id: int, expression: str) -> re.Pattern:
expression_text = str(expression)
compiled = cls._compiled_regex_cache.get(expression_text)
if compiled is None:
try:
compiled = re.compile(expression_text)
except re.error as ex:
raise click.ClickException(
f"Pattern #{pattern_id} contains an invalid regex {expression_text!r}: {ex}"
)
cls._compiled_regex_cache[expression_text] = compiled
return compiled
def _coerce_pattern_fields(self, patternObj):
return {
"show_id": int(patternObj["show_id"]),
"pattern": str(patternObj["pattern"]),
"quality": int(patternObj.get("quality", 0) or 0),
"notes": str(patternObj.get("notes", "")),
}
def _coerce_media_tags(self, mediaTags):
return {
str(tagKey): str(tagValue)
for tagKey, tagValue in (mediaTags or {}).items()
}
def _normalize_track_descriptors(self, trackDescriptors):
if trackDescriptors is None:
raise InvalidPatternSchemaError(
"Patterns must define at least one track before they can be stored."
)
normalized_descriptors = []
for trackDescriptor in trackDescriptors:
if type(trackDescriptor) is not TrackDescriptor:
raise TypeError(
"PatternController: All track descriptors are required to be of type TrackDescriptor"
)
normalized_descriptors.append(trackDescriptor)
if not normalized_descriptors:
raise InvalidPatternSchemaError(
"Patterns must define at least one track before they can be stored."
)
normalized_descriptors = sorted(
normalized_descriptors, key=lambda descriptor: descriptor.getIndex()
)
index_set = {descriptor.getIndex() for descriptor in normalized_descriptors}
expected_indexes = set(range(len(normalized_descriptors)))
if index_set != expected_indexes:
raise click.ClickException(
"Pattern tracks must use a contiguous zero-based index order."
)
return normalized_descriptors
def _ensure_unique_pattern_definition(
self,
session,
show_id: int,
pattern_expression: str,
exclude_pattern_id: int | None = None,
):
query = session.query(Pattern).filter(
Pattern.show_id == show_id,
Pattern.pattern == pattern_expression,
)
if exclude_pattern_id is not None:
query = query.filter(Pattern.id != int(exclude_pattern_id))
existing_pattern = query.first()
if existing_pattern is not None:
raise click.ClickException(
f"Pattern {pattern_expression!r} already exists for show #{show_id}."
)
def _build_track_row(self, trackDescriptor: TrackDescriptor) -> Track:
track = Track(
track_type=int(trackDescriptor.getType().index()),
codec_name=str(trackDescriptor.getFormatDescriptor().identifier()),
index=int(trackDescriptor.getIndex()),
source_index=int(trackDescriptor.getSourceIndex()),
disposition_flags=int(
TrackDisposition.toFlags(trackDescriptor.getDispositionSet())
),
audio_layout=trackDescriptor.getAudioLayout().index(),
)
for tagKey, tagValue in trackDescriptor.getTags().items():
if tagKey in self.__ignoreTrackKeys or tagKey in self.__removeTrackKeys:
continue
track.track_tags.append(TrackTag(key=str(tagKey), value=str(tagValue)))
return track
def _replace_pattern_schema(
self,
session,
pattern: Pattern,
mediaTags: dict[str, str],
trackDescriptors: list[TrackDescriptor],
):
for mediaTag in list(pattern.media_tags):
session.delete(mediaTag)
for track in list(pattern.tracks):
session.delete(track)
session.flush()
for tagKey, tagValue in mediaTags.items():
pattern.media_tags.append(MediaTag(key=str(tagKey), value=str(tagValue)))
for trackDescriptor in trackDescriptors:
pattern.tracks.append(self._build_track_row(trackDescriptor))
def _validate_persisted_pattern(self, pattern: Pattern):
if not pattern.tracks:
raise InvalidPatternSchemaError(
f"Pattern #{pattern.getId()} ({pattern.getPattern()!r}) is invalid because it has no tracks."
)
def savePatternSchema(
self,
patternObj,
trackDescriptors,
mediaTags=None,
patternId: int | None = None,
) -> int:
fields = self._coerce_pattern_fields(patternObj)
normalized_tracks = self._normalize_track_descriptors(trackDescriptors)
normalized_tags = self._coerce_media_tags(mediaTags)
session = None
try:
session = self.Session()
self._ensure_unique_pattern_definition(
session,
fields["show_id"],
fields["pattern"],
exclude_pattern_id=patternId,
)
s = self.Session()
q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']),
Pattern.pattern == str(patternDescriptor['pattern']))
if not q.count():
pattern = Pattern(show_id = int(patternDescriptor['show_id']),
pattern = str(patternDescriptor['pattern']))
s.add(pattern)
s.commit()
return pattern.getId()
if patternId is None:
pattern = Pattern(
show_id=fields["show_id"],
pattern=fields["pattern"],
quality=fields["quality"],
notes=fields["notes"],
)
session.add(pattern)
session.flush()
else:
return 0
pattern = session.query(Pattern).filter(Pattern.id == int(patternId)).first()
if pattern is None:
raise click.ClickException(
f"PatternController.savePatternSchema(): Pattern #{patternId} not found"
)
pattern.show_id = fields["show_id"]
pattern.pattern = fields["pattern"]
pattern.quality = fields["quality"]
pattern.notes = fields["notes"]
self._replace_pattern_schema(
session,
pattern,
normalized_tags,
normalized_tracks,
)
session.commit()
self._clear_regex_cache()
return pattern.getId()
except click.ClickException:
raise
except Exception as ex:
raise click.ClickException(f"PatternController.addPattern(): {repr(ex)}")
raise click.ClickException(
f"PatternController.savePatternSchema(): {repr(ex)}"
)
finally:
s.close()
if session is not None:
session.close()
def addPattern(self, patternObj, trackDescriptors=None, mediaTags=None):
return self.savePatternSchema(
patternObj,
trackDescriptors=trackDescriptors,
mediaTags=mediaTags,
)
def updatePattern(self, patternId, patternDescriptor):
def updatePattern(self, patternId, patternObj):
fields = self._coerce_pattern_fields(patternObj)
session = None
try:
s = self.Session()
q = s.query(Pattern).filter(Pattern.id == int(patternId))
session = self.Session()
pattern = session.query(Pattern).filter(Pattern.id == int(patternId)).first()
if q.count():
if pattern is not None:
self._ensure_unique_pattern_definition(
session,
fields["show_id"],
fields["pattern"],
exclude_pattern_id=patternId,
)
self._validate_persisted_pattern(pattern)
pattern = q.first()
pattern.show_id = fields["show_id"]
pattern.pattern = fields["pattern"]
pattern.quality = fields["quality"]
pattern.notes = fields["notes"]
pattern.show_id = int(patternDescriptor['show_id'])
pattern.pattern = str(patternDescriptor['pattern'])
s.commit()
session.commit()
self._clear_regex_cache()
return True
else:
return False
return False
except click.ClickException:
raise
except Exception as ex:
raise click.ClickException(f"PatternController.updatePattern(): {repr(ex)}")
finally:
s.close()
if session is not None:
session.close()
def findPattern(self, patternObj):
session = None
def findPattern(self, patternDescriptor):
try:
s = self.Session()
q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']), Pattern.pattern == str(patternDescriptor['pattern']))
session = self.Session()
pattern = (
session.query(Pattern)
.filter(
Pattern.show_id == int(patternObj["show_id"]),
Pattern.pattern == str(patternObj["pattern"]),
)
.first()
)
if q.count():
pattern = q.first()
if pattern is not None:
return int(pattern.id)
else:
return None
return None
except Exception as ex:
raise click.ClickException(f"PatternController.findPattern(): {repr(ex)}")
finally:
s.close()
if session is not None:
session.close()
def getPatternsForShow(self, showId: int) -> list[Pattern]:
def getPattern(self, patternId : int):
if type(showId) is not int:
raise ValueError(
"PatternController.getPatternsForShow(): Argument showId is required to be of type int"
)
session = None
try:
session = self.Session()
return (
session.query(Pattern)
.filter(Pattern.show_id == int(showId))
.order_by(Pattern.id)
.all()
)
except Exception as ex:
raise click.ClickException(f"PatternController.getPatternsForShow(): {repr(ex)}")
finally:
if session is not None:
session.close()
def getPattern(self, patternId: int):
if type(patternId) is not int:
raise ValueError(f"PatternController.getPattern(): Argument patternId is required to be of type int")
raise ValueError(
"PatternController.getPattern(): Argument patternId is required to be of type int"
)
session = None
try:
s = self.Session()
q = s.query(Pattern).filter(Pattern.id == int(patternId))
return q.first() if q.count() else None
session = self.Session()
return session.query(Pattern).filter(Pattern.id == int(patternId)).first()
except Exception as ex:
raise click.ClickException(f"PatternController.getPattern(): {repr(ex)}")
finally:
s.close()
if session is not None:
session.close()
def deletePattern(self, patternId):
session = None
try:
s = self.Session()
q = s.query(Pattern).filter(Pattern.id == int(patternId))
session = self.Session()
pattern = session.query(Pattern).filter(Pattern.id == int(patternId)).first()
if q.count():
#DAFUQ: https://stackoverflow.com/a/19245058
# q.delete()
pattern = q.first()
s.delete(pattern)
s.commit()
if pattern is not None:
session.delete(pattern)
session.commit()
self._clear_regex_cache()
return True
return False
except Exception as ex:
raise click.ClickException(f"PatternController.deletePattern(): {repr(ex)}")
finally:
s.close()
if session is not None:
session.close()
def matchFilename(self, filename : str) -> dict:
"""Returns dict {'match': <a regex match obj>, 'pattern': <ffx pattern obj>} or empty dict of no pattern was found"""
def matchFilename(self, filename: str) -> dict:
"""Return {'match': regex match, 'pattern': Pattern} or {} when unmatched."""
session = None
try:
s = self.Session()
q = s.query(Pattern)
session = self.Session()
matches = []
query = session.query(Pattern).order_by(Pattern.show_id, Pattern.id)
matchResult = {}
for pattern in q.all():
patternMatch = re.search(str(pattern.pattern), str(filename))
if patternMatch is not None:
matchResult['match'] = patternMatch
matchResult['pattern'] = pattern
for pattern in query.all():
compiled = self._compile_pattern_expression(
pattern.getId(),
pattern.getPattern(),
)
patternMatch = compiled.search(str(filename))
if patternMatch is None:
continue
return matchResult
self._validate_persisted_pattern(pattern)
matches.append({"match": patternMatch, "pattern": pattern})
if not matches:
return {}
if len(matches) > 1:
duplicateDescriptions = ", ".join(
[
f"show #{match['pattern'].getShowId()} pattern #{match['pattern'].getId()} {match['pattern'].getPattern()!r}"
for match in matches
]
)
raise DuplicatePatternMatchError(
f"Filename {filename!r} matched more than one pattern: {duplicateDescriptions}"
)
return matches[0]
except click.ClickException:
raise
except Exception as ex:
raise click.ClickException(f"PatternController.matchFilename(): {repr(ex)}")
finally:
s.close()
# def getMediaDescriptor(self, context, patternId):
#
# try:
# s = self.Session()
# q = s.query(Pattern).filter(Pattern.id == int(patternId))
#
# if q.count():
# return q.first().getMediaDescriptor(context)
# else:
# return None
#
# except Exception as ex:
# raise click.ClickException(f"PatternController.getMediaDescriptor(): {repr(ex)}")
# finally:
# s.close()
if session is not None:
session.close()

View File

@@ -4,8 +4,10 @@ from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button
from textual.containers import Grid
from .i18n import t
from .show_controller import ShowController
from .pattern_controller import PatternController
from .screen_support import build_screen_log_pane, go_back_or_exit
from ffx.model.pattern import Pattern
@@ -13,15 +15,22 @@ from ffx.model.pattern import Pattern
# Screen[dict[int, str, int]]
class PatternDeleteScreen(Screen):
BINDINGS = [
("escape", "back", t("Back")),
]
CSS = """
Grid {
grid-size: 2;
grid-rows: 2 auto;
grid-columns: 30 330;
grid-columns: 18 5fr;
height: 100%;
width: 100%;
min-width: 90;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}
Input {
@@ -59,6 +68,10 @@ class PatternDeleteScreen(Screen):
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
if self.__showDescriptor:
self.query_one("#showlabel", Static).update(f"{self.__showDescriptor.getId()} - {self.__showDescriptor.getName()} ({self.__showDescriptor.getYear()})")
if not self.__pattern is None:
@@ -70,24 +83,31 @@ class PatternDeleteScreen(Screen):
yield Header()
with Grid():
# Row 1
yield Static(t("Are you sure to delete the following filename pattern?"), id="toplabel", classes="two")
yield Static("Are you sure to delete the following filename pattern?", id="toplabel", classes="two")
# Row 2
yield Static("", classes="two")
yield Static("Pattern")
# Row 3
yield Static(t("Pattern"))
yield Static("", id="patternlabel")
# Row 4
yield Static("", classes="two")
yield Static("from show")
# Row 5
yield Static(t("from show"))
yield Static("", id="showlabel")
# Row 6
yield Static("", classes="two")
yield Button("Delete", id="delete_button")
yield Button("Cancel", id="cancel_button")
# Row 7
yield Button(t("Delete"), id="delete_button")
yield Button(t("Cancel"), id="cancel_button")
yield build_screen_log_pane()
yield Footer()
@@ -109,3 +129,5 @@ class PatternDeleteScreen(Screen):
if event.button.id == "cancel_button":
self.app.pop_screen()
def action_back(self):
go_back_or_exit(self)

View File

@@ -1,23 +1,28 @@
import click, re
from typing import List
from textual import events
from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button, Input, DataTable
from textual.widgets import Header, Footer, Static, Button, Input, DataTable, TextArea
from textual.containers import Grid
from ffx.model.pattern import Pattern
from ffx.model.track import Track
from .pattern_controller import PatternController
from .show_controller import ShowController
from .track_controller import TrackController
from .tag_controller import TagController
from .track_details_screen import TrackDetailsScreen
from .track_delete_screen import TrackDeleteScreen
from .shifted_season_delete_screen import ShiftedSeasonDeleteScreen
from .shifted_season_details_screen import ShiftedSeasonDetailsScreen
from .tag_details_screen import TagDetailsScreen
from .tag_delete_screen import TagDeleteScreen
from .screen_support import (
add_auto_table_column,
build_screen_bootstrap,
build_screen_controllers,
build_screen_log_pane,
go_back_or_exit,
populate_tag_table,
)
from ffx.track_type import TrackType
@@ -29,20 +34,29 @@ from textual.widgets._data_table import CellDoesNotExist
from ffx.file_properties import FileProperties
from ffx.iso_language import IsoLanguage
from ffx.audio_layout import AudioLayout
from ffx.model.shifted_season import ShiftedSeason
from .i18n import t
# Screen[dict[int, str, int]]
class PatternDetailsScreen(Screen):
BINDINGS = [
("escape", "back", t("Back")),
]
CSS = """
Grid {
grid-size: 7 13;
grid-rows: 2 2 2 2 2 8 2 2 8 2 2 2 2;
grid-columns: 25 25 25 25 25 25 25;
grid-size: 7 20;
grid-rows: 2 2 2 2 2 2 6 2 2 8 2 2 8 2 2 8 2 2 2 2;
grid-columns: 18 1fr 1fr 1fr 1fr 1fr 1fr;
height: 100%;
width: 100%;
min-width: 140;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}
Input {
@@ -54,6 +68,7 @@ class PatternDetailsScreen(Screen):
DataTable {
min-height: 6;
width: 100%;
}
DataTable .datatable--cursor {
@@ -87,6 +102,12 @@ class PatternDetailsScreen(Screen):
column-span: 7;
}
.four_box {
min-height: 6;
}
.box {
height: 100%;
border: solid green;
@@ -100,54 +121,49 @@ class PatternDetailsScreen(Screen):
def __init__(self, patternId = None, showId = None):
super().__init__()
self.context = self.app.getContext()
self.Session = self.context['database']['session'] # convenience
bootstrap = build_screen_bootstrap(self.app.getContext())
self.context = bootstrap.context
self.__pc = PatternController(context = self.context)
self.__sc = ShowController(context = self.context)
self.__tc = TrackController(context = self.context)
self.__tac = TagController(context = self.context)
self.__removeGlobalKeys = bootstrap.remove_global_keys
self.__ignoreGlobalKeys = bootstrap.ignore_global_keys
controllers = build_screen_controllers(
self.context,
pattern=True,
show=True,
track=True,
tag=True,
shifted_season=True,
)
self.__pc = controllers['pattern']
self.__sc = controllers['show']
self.__tc = controllers['track']
self.__tac = controllers['tag']
self.__ssc = controllers['shifted_season']
self.__pattern : Pattern = self.__pc.getPattern(patternId) if patternId is not None else None
self.__showDescriptor = self.__sc.getShowDescriptor(showId) if showId is not None else None
#TODO: per controller
def loadTracks(self, show_id):
try:
tracks = {}
tracks['audio'] = {}
tracks['subtitle'] = {}
s = self.Session()
q = s.query(Pattern).filter(Pattern.show_id == int(show_id))
return [{'id': int(p.id), 'pattern': p.pattern} for p in q.all()]
except Exception as ex:
raise click.ClickException(f"loadTracks(): {repr(ex)}")
finally:
s.close()
self.__draftTracks : List[TrackDescriptor] = []
self.__draftTags : dict[str, str] = {}
self.__trackRowData: dict[object, TrackDescriptor] = {}
self.__tagRowData: dict[object, tuple[str, str]] = {}
self.__shiftedSeasonRowData: dict[object, dict[str, int | None]] = {}
def updateTracks(self):
self.tracksTable.clear()
self.__trackRowData = {}
if self.__pattern is not None:
tracks = self.getCurrentTrackDescriptors()
tracks = self.__tc.findTracks(self.__pattern.getId())
typeCounter = {}
typeCounter = {}
td: TrackDescriptor
for td in tracks:
tr: Track
for tr in tracks:
if (trackType := td.getType()) != TrackType.ATTACHMENT:
td : TrackDescriptor = tr.getDescriptor(self.context)
trackType = td.getType()
if not trackType in typeCounter.keys():
typeCounter[trackType] = 0
@@ -155,28 +171,66 @@ class PatternDetailsScreen(Screen):
trackLanguage = td.getLanguage()
audioLayout = td.getAudioLayout()
row = (td.getIndex(),
trackType.label(),
t(trackType.label()),
typeCounter[trackType],
td.getCodec().label(),
audioLayout.label() if trackType == TrackType.AUDIO
td.getFormatDescriptor().label(),
t(audioLayout.label()) if trackType == TrackType.AUDIO
and audioLayout != AudioLayout.LAYOUT_UNDEFINED else ' ',
trackLanguage.label() if trackLanguage != IsoLanguage.UNDEFINED else ' ',
td.getTitle(),
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
'Yes' if TrackDisposition.FORCED in dispoSet else 'No',
t('Yes') if TrackDisposition.DEFAULT in dispoSet else t('No'),
t('Yes') if TrackDisposition.FORCED in dispoSet else t('No'),
td.getSourceIndex())
self.tracksTable.add_row(*map(str, row))
row_key = self.tracksTable.add_row(*map(str, row))
self.__trackRowData[row_key] = td
typeCounter[trackType] += 1
def getCurrentTrackDescriptors(self) -> List[TrackDescriptor]:
if self.__pattern is not None:
return self.__tc.findSiblingDescriptors(self.__pattern.getId())
return list(self.__draftTracks)
def normalizeDraftTracks(self):
typeCounter = {}
for index, trackDescriptor in enumerate(self.__draftTracks):
trackDescriptor.setIndex(index)
trackType = trackDescriptor.getType()
subIndex = typeCounter.get(trackType, 0)
trackDescriptor.setSubIndex(subIndex)
typeCounter[trackType] = subIndex + 1
if trackDescriptor.getSourceIndex() < 0:
trackDescriptor.setSourceIndex(index)
def swapTracks(self, trackIndex1: int, trackIndex2: int):
ti1 = int(trackIndex1)
ti2 = int(trackIndex2)
if self.__pattern is None:
numSiblings = len(self.__draftTracks)
if ti1 < 0 or ti1 >= numSiblings:
raise ValueError(f"PatternDetailsScreen.swapTracks(): trackIndex1 ({ti1}) is out of range ({numSiblings})")
if ti2 < 0 or ti2 >= numSiblings:
raise ValueError(f"PatternDetailsScreen.swapTracks(): trackIndex2 ({ti2}) is out of range ({numSiblings})")
self.__draftTracks[ti1], self.__draftTracks[ti2] = self.__draftTracks[ti2], self.__draftTracks[ti1]
self.normalizeDraftTracks()
self.updateTracks()
return
siblingDescriptors: List[TrackDescriptor] = self.__tc.findSiblingDescriptors(self.__pattern.getId())
numSiblings = len(siblingDescriptors)
@@ -209,20 +263,72 @@ class PatternDetailsScreen(Screen):
def updateTags(self):
tags = (
self.__tac.findAllMediaTags(self.__pattern.getId())
if self.__pattern is not None
else self.__draftTags
)
self.tagsTable.clear()
self.__tagRowData = populate_tag_table(
self.tagsTable,
tags,
ignore_keys=self.__ignoreGlobalKeys,
remove_keys=self.__removeGlobalKeys,
)
if self.__pattern is not None:
def updateShiftedSeasons(self):
tags = self.__tac.findAllMediaTags(self.__pattern.getId())
self.shiftedSeasonsTable.clear()
self.__shiftedSeasonRowData = {}
for tagKey, tagValue in tags.items():
row = (tagKey, tagValue)
self.tagsTable.add_row(*map(str, row))
if self.__pattern is None:
return
shiftedSeason: ShiftedSeason
for shiftedSeason in self.__ssc.getShiftedSeasonSiblings(patternId=self.__pattern.getId()):
shiftedSeasonObj = shiftedSeason.getObj()
shiftedSeasonObj['id'] = shiftedSeason.getId()
firstEpisode = shiftedSeasonObj['first_episode']
firstEpisodeStr = str(firstEpisode) if firstEpisode != -1 else ''
lastEpisode = shiftedSeasonObj['last_episode']
lastEpisodeStr = str(lastEpisode) if lastEpisode != -1 else ''
row = (
shiftedSeasonObj['original_season'],
firstEpisodeStr,
lastEpisodeStr,
shiftedSeasonObj['season_offset'],
shiftedSeasonObj['episode_offset'],
)
row_key = self.shiftedSeasonsTable.add_row(*map(str, row))
self.__shiftedSeasonRowData[row_key] = shiftedSeasonObj
def getSelectedShiftedSeasonObjFromInput(self):
shiftedSeasonObj = {}
try:
row_key, col_key = self.shiftedSeasonsTable.coordinate_to_cell_key(
self.shiftedSeasonsTable.cursor_coordinate
)
if row_key is not None:
shiftedSeasonObj = dict(self.__shiftedSeasonRowData.get(row_key, {}))
except CellDoesNotExist:
pass
return shiftedSeasonObj
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
if not self.__showDescriptor is None:
self.query_one("#showlabel", Static).update(f"{self.__showDescriptor.getId()} - {self.__showDescriptor.getName()} ({self.__showDescriptor.getYear()})")
@@ -230,8 +336,25 @@ class PatternDetailsScreen(Screen):
self.query_one("#pattern_input", Input).value = str(self.__pattern.getPattern())
if self.__pattern and self.__pattern.quality:
self.query_one("#quality_input", Input).value = str(self.__pattern.quality)
if self.__pattern and self.__pattern.notes:
self.query_one("#notes_textarea", TextArea).text = str(self.__pattern.notes)
self.updateTags()
self.updateTracks()
self.updateShiftedSeasons()
def on_screen_resume(self, _event: events.ScreenResume) -> None:
if not hasattr(self, "tracksTable") or not hasattr(self, "tagsTable"):
return
self.updateTags()
self.updateTracks()
if self.__pattern is not None and hasattr(self, "shiftedSeasonsTable"):
self.updateShiftedSeasons()
def compose(self):
@@ -239,54 +362,83 @@ class PatternDetailsScreen(Screen):
self.tagsTable = DataTable(classes="seven")
# Define the columns with headers
self.column_key_tag_key = self.tagsTable.add_column("Key", width=50)
self.column_key_tag_value = self.tagsTable.add_column("Value", width=100)
self.column_key_tag_key = add_auto_table_column(self.tagsTable, t("Key"))
self.column_key_tag_value = add_auto_table_column(self.tagsTable, t("Value"))
self.tagsTable.cursor_type = 'row'
self.tracksTable = DataTable(id="tracks_table", classes="seven")
self.column_key_track_index = self.tracksTable.add_column("Index", width=5)
self.column_key_track_type = self.tracksTable.add_column("Type", width=10)
self.column_key_track_sub_index = self.tracksTable.add_column("SubIndex", width=8)
self.column_key_track_codec = self.tracksTable.add_column("Codec", width=10)
self.column_key_track_audio_layout = self.tracksTable.add_column("Layout", width=10)
self.column_key_track_language = self.tracksTable.add_column("Language", width=15)
self.column_key_track_title = self.tracksTable.add_column("Title", width=48)
self.column_key_track_default = self.tracksTable.add_column("Default", width=8)
self.column_key_track_forced = self.tracksTable.add_column("Forced", width=8)
self.column_key_track_source_index = self.tracksTable.add_column("SrcIndex", width=8)
self.column_key_track_index = add_auto_table_column(self.tracksTable, t("Index"))
self.column_key_track_type = add_auto_table_column(self.tracksTable, t("Type"))
self.column_key_track_sub_index = add_auto_table_column(self.tracksTable, t("SubIndex"))
self.column_key_track_codec = add_auto_table_column(self.tracksTable, t("Codec"))
self.column_key_track_audio_layout = add_auto_table_column(self.tracksTable, t("Layout"))
self.column_key_track_language = add_auto_table_column(self.tracksTable, t("Language"))
self.column_key_track_title = add_auto_table_column(self.tracksTable, t("Title"))
self.column_key_track_default = add_auto_table_column(self.tracksTable, t("Default"))
self.column_key_track_forced = add_auto_table_column(self.tracksTable, t("Forced"))
self.column_key_track_source_index = add_auto_table_column(self.tracksTable, t("SrcIndex"))
self.tracksTable.cursor_type = 'row'
self.shiftedSeasonsTable = DataTable(classes="seven")
self.column_key_original_season = add_auto_table_column(self.shiftedSeasonsTable, t("Source Season"))
self.column_key_first_episode = add_auto_table_column(self.shiftedSeasonsTable, t("First Episode"))
self.column_key_last_episode = add_auto_table_column(self.shiftedSeasonsTable, t("Last Episode"))
self.column_key_season_offset = add_auto_table_column(self.shiftedSeasonsTable, t("Season Offset"))
self.column_key_episode_offset = add_auto_table_column(self.shiftedSeasonsTable, t("Episode Offset"))
self.shiftedSeasonsTable.cursor_type = 'row'
yield Header()
with Grid():
# 1
yield Static("Edit filename pattern" if self.__pattern is not None else "New filename pattern", id="toplabel")
# Row 1
yield Static(t("Edit filename pattern") if self.__pattern is not None else t("New filename pattern"), id="toplabel")
yield Input(type="text", id="pattern_input", classes="six")
# 2
yield Static("from show")
# Row 2
yield Static(t("from show"))
yield Static("", id="showlabel", classes="five")
yield Button("Substitute pattern", id="pattern_button")
yield Button(t("Substitute pattern"), id="pattern_button")
# 3
yield Static(" ", classes="seven")
# 4
# Row 3
yield Static(" ", classes="seven")
# 5
yield Static("Media Tags")
# Row 4
yield Static(t("Quality"))
yield Input(type="integer", id="quality_input")
yield Static(' ', classes="five")
# Row 5
yield Static(" ", classes="seven")
# Row 6
yield Static(t("Notes"))
yield Static(" ", classes="six")
# Row 7
yield TextArea(id="notes_textarea", classes="four_box seven")
# Row 8
yield Static(" ", classes="seven")
# Row 9
yield Static(t("Numbering Mapping"))
if self.__pattern is not None:
yield Button("Add", id="button_add_tag")
yield Button("Edit", id="button_edit_tag")
yield Button("Delete", id="button_delete_tag")
yield Button(t("Add"), id="button_add_shifted_season")
yield Button(t("Edit"), id="button_edit_shifted_season")
yield Button(t("Delete"), id="button_delete_shifted_season")
else:
yield Static(" ")
yield Static(" ")
@@ -296,75 +448,82 @@ class PatternDetailsScreen(Screen):
yield Static(" ")
yield Static(" ")
# 6
# Row 10
yield self.shiftedSeasonsTable
# Row 11
yield Static(" ", classes="seven")
# Row 12
yield Static(t("Media Tags"))
yield Button(t("Add"), id="button_add_tag")
yield Button(t("Edit"), id="button_edit_tag")
yield Button(t("Delete"), id="button_delete_tag")
yield Static(" ")
yield Static(" ")
yield Static(" ")
# Row 13
yield self.tagsTable
# 7
# Row 14
yield Static(" ", classes="seven")
# 8
yield Static("Streams")
if self.__pattern is not None:
yield Button("Add", id="button_add_track")
yield Button("Edit", id="button_edit_track")
yield Button("Delete", id="button_delete_track")
else:
yield Static(" ")
yield Static(" ")
yield Static(" ")
# Row 15
yield Static(t("Streams"))
yield Button(t("Add"), id="button_add_track")
yield Button(t("Edit"), id="button_edit_track")
yield Button(t("Delete"), id="button_delete_track")
yield Static(" ")
yield Button("Up", id="button_track_up")
yield Button("Down", id="button_track_down")
yield Button(t("Up"), id="button_track_up")
yield Button(t("Down"), id="button_track_down")
# 9
# Row 16
yield self.tracksTable
# 10
# Row 17
yield Static(" ", classes="seven")
# 11
# Row 18
yield Static(" ", classes="seven")
# 12
yield Button("Save", id="save_button")
yield Button("Cancel", id="cancel_button")
# Row 19
yield Button(t("Save"), id="save_button")
yield Button(t("Cancel"), id="cancel_button")
yield Static(" ", classes="five")
# 13
# Row 20
yield Static(" ", classes="seven")
yield build_screen_log_pane()
yield Footer()
def getPatternFromInput(self):
return str(self.query_one("#pattern_input", Input).value)
def getQualityFromInput(self):
try:
return int(self.query_one("#quality_input", Input).value)
except ValueError:
return 0
def getNotesFromInput(self):
return str(self.query_one("#notes_textarea", TextArea).text)
def getSelectedTrackDescriptor(self):
if not self.__pattern:
return None
try:
# Fetch the currently selected row when 'Enter' is pressed
#selected_row_index = self.table.cursor_row
row_key, col_key = self.tracksTable.coordinate_to_cell_key(self.tracksTable.cursor_coordinate)
if row_key is not None:
selected_track_data = self.tracksTable.get_row(row_key)
return self.__trackRowData.get(row_key)
trackIndex = int(selected_track_data[0])
trackSubIndex = int(selected_track_data[2])
return self.__tc.getTrack(self.__pattern.getId(), trackIndex).getDescriptor(self.context, subIndex=trackSubIndex)
else:
return None
return None
except CellDoesNotExist:
return None
@@ -380,12 +539,7 @@ class PatternDetailsScreen(Screen):
row_key, col_key = self.tagsTable.coordinate_to_cell_key(self.tagsTable.cursor_coordinate)
if row_key is not None:
selected_tag_data = self.tagsTable.get_row(row_key)
tagKey = str(selected_tag_data[0])
tagValue = str(selected_tag_data[1])
return tagKey, tagValue
return self.__tagRowData.get(row_key)
else:
return None
@@ -403,6 +557,8 @@ class PatternDetailsScreen(Screen):
patternDescriptor = {}
patternDescriptor['show_id'] = self.__showDescriptor.getId()
patternDescriptor['pattern'] = self.getPatternFromInput()
patternDescriptor['quality'] = self.getQualityFromInput()
patternDescriptor['notes'] = self.getNotesFromInput()
if self.__pattern is not None:
@@ -413,7 +569,11 @@ class PatternDetailsScreen(Screen):
self.app.pop_screen()
else:
patternId = self.__pc.addPattern(patternDescriptor)
patternId = self.__pc.savePatternSchema(
patternDescriptor,
trackDescriptors=self.__draftTracks,
mediaTags=self.__draftTags,
)
if patternId:
self.dismiss(patternDescriptor)
else:
@@ -424,34 +584,82 @@ class PatternDetailsScreen(Screen):
if event.button.id == "cancel_button":
self.app.pop_screen()
if event.button.id == "button_add_shifted_season":
if self.__pattern is not None:
self.app.push_screen(
ShiftedSeasonDetailsScreen(patternId=self.__pattern.getId()),
self.handle_update_shifted_season,
)
# Save pattern when just created before adding streams
if self.__pattern is not None:
if event.button.id == "button_edit_shifted_season":
selectedShiftedSeasonObj = self.getSelectedShiftedSeasonObjFromInput()
if 'id' in selectedShiftedSeasonObj.keys():
self.app.push_screen(
ShiftedSeasonDetailsScreen(
patternId=self.__pattern.getId(),
shiftedSeasonId=selectedShiftedSeasonObj['id'],
),
self.handle_update_shifted_season,
)
numTracks = len(self.tracksTable.rows)
if event.button.id == "button_delete_shifted_season":
selectedShiftedSeasonObj = self.getSelectedShiftedSeasonObjFromInput()
if 'id' in selectedShiftedSeasonObj.keys():
self.app.push_screen(
ShiftedSeasonDeleteScreen(
patternId=self.__pattern.getId(),
shiftedSeasonId=selectedShiftedSeasonObj['id'],
),
self.handle_delete_shifted_season,
)
if event.button.id == "button_add_track":
self.app.push_screen(TrackDetailsScreen(patternId = self.__pattern.getId(), index = numTracks), self.handle_add_track)
selectedTrack = self.getSelectedTrackDescriptor()
if selectedTrack is not None:
if event.button.id == "button_edit_track":
self.app.push_screen(TrackDetailsScreen(trackDescriptor = selectedTrack), self.handle_edit_track)
if event.button.id == "button_delete_track":
self.app.push_screen(TrackDeleteScreen(trackDescriptor = selectedTrack), self.handle_delete_track)
numTracks = len(self.getCurrentTrackDescriptors())
if event.button.id == "button_add_track":
self.app.push_screen(
TrackDetailsScreen(
patternId=self.__pattern.getId() if self.__pattern is not None else None,
patternLabel=self.getPatternFromInput(),
siblingTrackDescriptors=self.getCurrentTrackDescriptors(),
index=numTracks,
),
self.handle_add_track,
)
selectedTrack = self.getSelectedTrackDescriptor()
if selectedTrack is not None:
if event.button.id == "button_edit_track":
self.app.push_screen(
TrackDetailsScreen(
trackDescriptor=selectedTrack,
patternId=self.__pattern.getId() if self.__pattern is not None else None,
patternLabel=self.getPatternFromInput(),
siblingTrackDescriptors=self.getCurrentTrackDescriptors(),
),
self.handle_edit_track,
)
if event.button.id == "button_delete_track":
self.app.push_screen(
TrackDeleteScreen(trackDescriptor = selectedTrack),
self.handle_delete_track,
)
if event.button.id == "button_add_tag":
if self.__pattern is not None:
self.app.push_screen(TagDetailsScreen(), self.handle_update_tag)
self.app.push_screen(TagDetailsScreen(), self.handle_update_tag)
if event.button.id == "button_edit_tag":
tagKey, tagValue = self.getSelectedTag()
self.app.push_screen(TagDetailsScreen(key=tagKey, value=tagValue), self.handle_update_tag)
selectedTag = self.getSelectedTag()
if selectedTag is not None:
tagKey, tagValue = selectedTag
self.app.push_screen(TagDetailsScreen(key=tagKey, value=tagValue), self.handle_update_tag)
if event.button.id == "button_delete_tag":
tagKey, tagValue = self.getSelectedTag()
self.app.push_screen(TagDeleteScreen(key=tagKey, value=tagValue), self.handle_delete_tag)
selectedTag = self.getSelectedTag()
if selectedTag is not None:
tagKey, tagValue = selectedTag
self.app.push_screen(TagDeleteScreen(key=tagKey, value=tagValue), self.handle_delete_tag)
if event.button.id == "pattern_button":
@@ -468,81 +676,117 @@ class PatternDetailsScreen(Screen):
if event.button.id == "button_track_up":
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
selectedTrackIndex = selectedTrackDescriptor.getIndex()
if selectedTrackDescriptor is not None:
selectedTrackIndex = selectedTrackDescriptor.getIndex()
if selectedTrackIndex > 0 and selectedTrackIndex < self.tracksTable.row_count:
correspondingTrackIndex = selectedTrackIndex - 1
self.swapTracks(selectedTrackIndex, correspondingTrackIndex)
if selectedTrackIndex > 0 and selectedTrackIndex < self.tracksTable.row_count:
correspondingTrackIndex = selectedTrackIndex - 1
self.swapTracks(selectedTrackIndex, correspondingTrackIndex)
if event.button.id == "button_track_down":
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
selectedTrackIndex = selectedTrackDescriptor.getIndex()
if selectedTrackDescriptor is not None:
selectedTrackIndex = selectedTrackDescriptor.getIndex()
if selectedTrackIndex >= 0 and selectedTrackIndex < (self.tracksTable.row_count - 1):
correspondingTrackIndex = selectedTrackIndex + 1
self.swapTracks(selectedTrackIndex, correspondingTrackIndex)
if selectedTrackIndex >= 0 and selectedTrackIndex < (self.tracksTable.row_count - 1):
correspondingTrackIndex = selectedTrackIndex + 1
self.swapTracks(selectedTrackIndex, correspondingTrackIndex)
def handle_add_track(self, trackDescriptor : TrackDescriptor):
if trackDescriptor is None:
return
dispoSet = trackDescriptor.getDispositionSet()
trackType = trackDescriptor.getType()
index = trackDescriptor.getIndex()
subIndex = trackDescriptor.getSubIndex()
codec = trackDescriptor.getCodec()
language = trackDescriptor.getLanguage()
title = trackDescriptor.getTitle()
if self.__pattern is not None:
self.__tc.addTrack(trackDescriptor, patternId=self.__pattern.getId())
else:
self.__draftTracks.append(trackDescriptor)
self.normalizeDraftTracks()
row = (index,
trackType.label(),
subIndex,
codec.label(),
language.label(),
title,
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
'Yes' if TrackDisposition.FORCED in dispoSet else 'No')
self.tracksTable.add_row(*map(str, row))
self.updateTracks()
def handle_edit_track(self, trackDescriptor : TrackDescriptor):
if trackDescriptor is None:
return
try:
if self.__pattern is not None:
if not self.__tc.updateTrack(trackDescriptor.getId(), trackDescriptor):
raise click.ClickException("PatternDetailsScreen.handle_edit_track(): track update failed")
else:
selectedTrack = self.getSelectedTrackDescriptor()
for index, currentTrack in enumerate(self.__draftTracks):
if (selectedTrack is not None
and currentTrack.getIndex() == selectedTrack.getIndex()
and currentTrack.getSubIndex() == selectedTrack.getSubIndex()):
self.__draftTracks[index] = trackDescriptor
break
self.normalizeDraftTracks()
row_key, col_key = self.tracksTable.coordinate_to_cell_key(self.tracksTable.cursor_coordinate)
self.tracksTable.update_cell(row_key, self.column_key_track_audio_layout,
trackDescriptor.getAudioLayout().label()
if trackDescriptor.getType() == TrackType.AUDIO else ' ')
self.tracksTable.update_cell(row_key, self.column_key_track_language, trackDescriptor.getLanguage().label())
self.tracksTable.update_cell(row_key, self.column_key_track_title, trackDescriptor.getTitle())
self.tracksTable.update_cell(row_key, self.column_key_track_default, 'Yes' if TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet() else 'No')
self.tracksTable.update_cell(row_key, self.column_key_track_forced, 'Yes' if TrackDisposition.FORCED in trackDescriptor.getDispositionSet() else 'No')
except CellDoesNotExist:
pass
self.updateTracks()
def handle_delete_track(self, trackDescriptor : TrackDescriptor):
if trackDescriptor is None:
return
if self.__pattern is not None:
track = self.__tc.getTrack(trackDescriptor.getPatternId(), trackDescriptor.getIndex())
if track is None:
raise click.ClickException(
f"Track is none: patternId={trackDescriptor.getPatternId()} type={trackDescriptor.getType()} subIndex={trackDescriptor.getSubIndex()}"
)
self.__tc.deleteTrack(track.getId())
else:
self.__draftTracks = [
currentTrack
for currentTrack in self.__draftTracks
if not (
currentTrack.getIndex() == trackDescriptor.getIndex()
and currentTrack.getSubIndex() == trackDescriptor.getSubIndex()
)
]
self.normalizeDraftTracks()
self.updateTracks()
def handle_update_tag(self, tag):
if tag is None:
return
if self.__pattern is None:
raise click.ClickException(f"PatternDetailsScreen.handle_update_tag: pattern not set")
self.__draftTags[str(tag[0])] = str(tag[1])
else:
if self.__tac.updateMediaTag(self.__pattern.getId(), tag[0], tag[1]) is None:
raise click.ClickException("PatternDetailsScreen.handle_update_tag(): tag update failed")
if self.__tac.updateMediaTag(self.__pattern.getId(), tag[0], tag[1]) is not None:
self.updateTags()
self.updateTags()
def handle_delete_tag(self, tag):
if tag is None:
return
if self.__pattern is None:
raise click.ClickException(f"PatternDetailsScreen.handle_delete_tag: pattern not set")
self.__draftTags.pop(str(tag[0]), None)
self.updateTags()
return
if self.__tac.deleteMediaTagByKey(self.__pattern.getId(), tag[0]):
self.updateTags()
self.updateTags()
else:
raise click.ClickException('tag delete failed')
def handle_update_shifted_season(self, screenResult):
self.updateShiftedSeasons()
def action_back(self):
go_back_or_exit(self)
def handle_delete_shifted_season(self, screenResult):
self.updateShiftedSeasons()

View File

@@ -1,39 +1,343 @@
import subprocess, logging
from typing import List
import os
import shlex
import signal
import subprocess
import threading
import time
from typing import Callable, Iterable, List
def executeProcess(commandSequence: List[str], directory: str = None, context: dict = None):
from .logging_utils import get_ffx_logger
COMMAND_TIMED_OUT_RETURN_CODE = 124
COMMAND_NOT_FOUND_RETURN_CODE = 127
MIN_NICENESS = -20
MAX_NICENESS = 19
DISABLED_NICENESS_SENTINEL = 99
DISABLED_CPU_PERCENT_SENTINEL = 0
MIN_CPU_PERCENT = 1
MAX_CPU_PERCENT = 100
def formatCommandSequence(commandSequence: Iterable[str]) -> str:
return shlex.join([str(token) for token in commandSequence])
def normalizeNiceness(niceness) -> int | None:
if niceness is None:
return None
niceness = int(niceness)
if niceness == DISABLED_NICENESS_SENTINEL:
return None
if niceness < MIN_NICENESS or niceness > MAX_NICENESS:
raise ValueError(
f"Niceness must be between {MIN_NICENESS} and {MAX_NICENESS}, "
+ f"or {DISABLED_NICENESS_SENTINEL} to disable."
)
return niceness
def getPresentCpuCount() -> int:
if hasattr(os, 'sched_getaffinity'):
affinity = os.sched_getaffinity(0)
if affinity:
return len(affinity)
cpuCount = os.cpu_count()
return cpuCount if cpuCount and cpuCount > 0 else 1
def normalizeCpuPercent(cpuPercent) -> int | None:
if cpuPercent is None:
return None
cpuPercent = str(cpuPercent).strip()
if cpuPercent.endswith('%'):
percentValue = int(cpuPercent[:-1].strip())
if percentValue == DISABLED_CPU_PERCENT_SENTINEL:
return None
if percentValue < MIN_CPU_PERCENT or percentValue > MAX_CPU_PERCENT:
raise ValueError(
f"CPU percentage must be between {MIN_CPU_PERCENT}% and {MAX_CPU_PERCENT}%, "
+ f"or {DISABLED_CPU_PERCENT_SENTINEL} to disable."
)
return percentValue * getPresentCpuCount()
cpuPercent = int(cpuPercent)
if cpuPercent == DISABLED_CPU_PERCENT_SENTINEL:
return None
if cpuPercent < MIN_CPU_PERCENT:
raise ValueError(
"CPU limit must be a positive absolute value such as 200, "
+ f"a percentage such as 25%, or {DISABLED_CPU_PERCENT_SENTINEL} to disable."
)
return cpuPercent
def getWrappedCommandSequence(commandSequence: List[str], context: dict = None) -> List[str]:
"""
niceness -20 bis +19
cpu_percent: 1 bis 99
niceness: -20 to 19, disabled when unset
cpu limit: positive absolute cpulimit value, or a machine-wide percentage
When both limits are configured, cpulimit wraps a nice-adjusted command:
cpulimit -l <cpu> -- nice -n <niceness> <command>
"""
if context is None:
logger = logging.getLogger('FFX')
logger.addHandler(logging.NullHandler())
else:
logger = context['logger']
resourceLimits = (context or {}).get('resource_limits', {})
niceness = normalizeNiceness(resourceLimits.get('niceness'))
cpu_percent = normalizeCpuPercent(
resourceLimits.get('cpu_limit', resourceLimits.get('cpu_percent'))
)
wrappedCommandSequence = [str(token) for token in commandSequence]
niceSequence = []
if niceness is not None:
wrappedCommandSequence = ['nice', '-n', str(niceness)] + wrappedCommandSequence
if cpu_percent is not None:
wrappedCommandSequence = ['cpulimit', '-l', str(cpu_percent), '--'] + wrappedCommandSequence
niceness = (int(context['resource_limits']['niceness'])
if not context is None
and 'resource_limits' in context.keys()
and 'niceness' in context['resource_limits'].keys() else 99)
cpu_percent = (int(context['resource_limits']['cpu_percent'])
if not context is None
and 'resource_limits' in context.keys()
and 'cpu_percent' in context['resource_limits'].keys() else 0)
return wrappedCommandSequence
if niceness >= -20 and niceness <= 19:
niceSequence += ['nice', '-n', str(niceness)]
if cpu_percent >= 1:
niceSequence += ['cpulimit', '-l', str(cpu_percent), '--']
niceCommand = niceSequence + commandSequence
def getProcessTimeoutSeconds(context: dict = None, timeoutSeconds: float = None):
if timeoutSeconds is None:
timeoutSeconds = (context or {}).get('resource_limits', {}).get('timeout_seconds')
logger.debug(f"executeProcess() command sequence: {' '.join(niceCommand)}")
if timeoutSeconds is None:
return None
process = subprocess.Popen(niceCommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8', cwd = directory)
output, error = process.communicate()
return output, error, process.returncode
timeoutSeconds = float(timeoutSeconds)
return timeoutSeconds if timeoutSeconds > 0 else None
def executeProcess(
commandSequence: List[str],
directory: str = None,
context: dict = None,
timeoutSeconds: float = None,
stdoutLineHandler: Callable[[str], bool] | None = None,
stderrLineHandler: Callable[[str], bool] | None = None,
):
logger = context['logger'] if context is not None and 'logger' in context else get_ffx_logger()
wrappedCommandSequence = getWrappedCommandSequence(commandSequence, context=context)
timeoutSeconds = getProcessTimeoutSeconds(context=context, timeoutSeconds=timeoutSeconds)
logger.debug(
"executeProcess() cwd=%s timeout=%s command=%s",
directory or '.',
timeoutSeconds if timeoutSeconds is not None else 'none',
formatCommandSequence(wrappedCommandSequence),
)
if stdoutLineHandler is not None or stderrLineHandler is not None:
return executeStreamingProcess(
wrappedCommandSequence,
directory=directory,
logger=logger,
timeoutSeconds=timeoutSeconds,
stdoutLineHandler=stdoutLineHandler,
stderrLineHandler=stderrLineHandler,
)
try:
completed = subprocess.run(
wrappedCommandSequence,
capture_output=True,
text=True,
cwd=directory,
timeout=timeoutSeconds,
check=False,
)
except FileNotFoundError as ex:
error = (
"Command not found while running "
+ f"{formatCommandSequence(wrappedCommandSequence)}: {ex.filename or ex}"
)
logger.error(error)
return '', error, COMMAND_NOT_FOUND_RETURN_CODE
except subprocess.TimeoutExpired as ex:
stdout = ex.stdout or ''
stderr = ex.stderr or ''
error = (
f"Command timed out after {timeoutSeconds} seconds while running "
+ formatCommandSequence(wrappedCommandSequence)
)
if stderr:
error = f"{error}\n{stderr}"
logger.error(error)
return stdout, error, COMMAND_TIMED_OUT_RETURN_CODE
if completed.returncode != 0:
logger.warning(
"executeProcess() rc=%s command=%s",
completed.returncode,
formatCommandSequence(wrappedCommandSequence),
)
return completed.stdout, completed.stderr, completed.returncode
def terminateProcess(process: subprocess.Popen, *, killAfterSeconds: float = 1.0) -> None:
if process.poll() is not None:
return
try:
if hasattr(os, "killpg"):
os.killpg(process.pid, signal.SIGTERM)
else:
process.terminate()
except ProcessLookupError:
return
deadline = time.monotonic() + killAfterSeconds
while process.poll() is None and time.monotonic() < deadline:
time.sleep(0.05)
if process.poll() is not None:
return
try:
if hasattr(os, "killpg"):
os.killpg(process.pid, signal.SIGKILL)
else:
process.kill()
except ProcessLookupError:
return
def readProcessStream(
stream,
outputParts: list[str],
lineHandler: Callable[[str], bool] | None,
stopRequested: threading.Event,
logger,
) -> None:
try:
for line in iter(stream.readline, ''):
outputParts.append(line)
if lineHandler is None:
continue
try:
if lineHandler(line):
stopRequested.set()
except Exception:
logger.exception("Process line handler raised an exception")
finally:
stream.close()
def executeStreamingProcess(
commandSequence: List[str],
*,
directory: str = None,
logger = None,
timeoutSeconds: float = None,
stdoutLineHandler: Callable[[str], bool] | None = None,
stderrLineHandler: Callable[[str], bool] | None = None,
):
logger = logger or get_ffx_logger()
try:
process = subprocess.Popen(
commandSequence,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
cwd=directory,
bufsize=1,
start_new_session=True,
)
except FileNotFoundError as ex:
error = (
"Command not found while running "
+ f"{formatCommandSequence(commandSequence)}: {ex.filename or ex}"
)
logger.error(error)
return '', error, COMMAND_NOT_FOUND_RETURN_CODE
stdoutParts: list[str] = []
stderrParts: list[str] = []
stopRequested = threading.Event()
timedOut = False
stdoutThread = threading.Thread(
target=readProcessStream,
args=(
process.stdout,
stdoutParts,
stdoutLineHandler,
stopRequested,
logger,
),
daemon=True,
)
stderrThread = threading.Thread(
target=readProcessStream,
args=(
process.stderr,
stderrParts,
stderrLineHandler,
stopRequested,
logger,
),
daemon=True,
)
stdoutThread.start()
stderrThread.start()
deadline = (
time.monotonic() + float(timeoutSeconds)
if timeoutSeconds is not None
else None
)
terminationRequested = False
while process.poll() is None:
if stopRequested.is_set():
terminationRequested = True
terminateProcess(process)
break
if deadline is not None and time.monotonic() >= deadline:
timedOut = True
terminationRequested = True
terminateProcess(process)
break
time.sleep(0.05)
returnCode = process.wait()
stdoutThread.join()
stderrThread.join()
stdout = ''.join(stdoutParts)
stderr = ''.join(stderrParts)
if timedOut:
error = (
f"Command timed out after {timeoutSeconds} seconds while running "
+ formatCommandSequence(commandSequence)
)
if stderr:
error = f"{error}\n{stderr}"
logger.error(error)
return stdout, error, COMMAND_TIMED_OUT_RETURN_CODE
if returnCode != 0 and not terminationRequested:
logger.warning(
"executeProcess() rc=%s command=%s",
returnCode,
formatCommandSequence(commandSequence),
)
return stdout, stderr, returnCode

393
src/ffx/screen_support.py Normal file
View File

@@ -0,0 +1,393 @@
from __future__ import annotations
import logging
import weakref
from collections.abc import Mapping
from dataclasses import dataclass
from rich.cells import cell_len
from rich.measure import measure_renderables
from rich.text import Text
from textual import events
from textual.widgets import Collapsible, RichLog, Static
from .helper import formatRichColor
from .i18n import t
from .pattern_controller import PatternController
from .show_controller import ShowController
from .shifted_season_controller import ShiftedSeasonController
from .tag_controller import TagController
from .tmdb_controller import TmdbController
from .track_controller import TrackController
SCREEN_LOG_PANE_ID = "screen_log_pane"
SCREEN_LOG_VIEW_ID = "screen_log_view"
SCREEN_LOG_RESIZE_HANDLE_ID = "screen_log_resize_handle"
SCREEN_LOG_HANDLER_NAME = "ffx-screen-log"
SCREEN_LOG_DEFAULT_HEIGHT = 8
SCREEN_LOG_MIN_HEIGHT = 4
SCREEN_LOG_COMPONENT_WIDTH = 16
SCREEN_LOG_LEVEL_WIDTH = 8
_SCREEN_LOG_PANE_ENABLED = False
class ScreenLogHandler(logging.Handler):
"""Mirror logger output into the active screen log pane when available."""
def __init__(self, app) -> None:
super().__init__(level=logging.DEBUG)
self.set_name(SCREEN_LOG_HANDLER_NAME)
self.set_app(app)
def set_app(self, app) -> None:
self._app_ref = weakref.ref(app) if app is not None else lambda: None
def emit(self, record: logging.LogRecord) -> None:
app = self._app_ref()
if app is None:
return
try:
message = str(self.format(record)).strip()
except Exception:
self.handleError(record)
return
if not message:
return
try:
app.call_from_thread(write_screen_log, app.screen, message)
except RuntimeError:
write_screen_log(app.screen, message)
except Exception:
self.handleError(record)
class ScreenLogResizeHandle(Static):
DEFAULT_CSS = """
ScreenLogResizeHandle {
width: 100%;
height: 1;
content-align: center middle;
color: $text-muted;
background: $panel-lighten-1;
}
ScreenLogResizeHandle:hover {
color: $text;
background: $panel-lighten-2;
}
"""
def __init__(self) -> None:
super().__init__(" drag to resize ", id=SCREEN_LOG_RESIZE_HANDLE_ID)
self._drag_active = False
self._drag_origin_screen_y = 0
self._drag_origin_height = SCREEN_LOG_DEFAULT_HEIGHT
def _get_log_pane(self):
return self.parent.parent if self.parent is not None else None
def on_mouse_down(self, event: events.MouseDown) -> None:
if event.button != 1:
return
log_pane = self._get_log_pane()
if log_pane is None:
return
self._drag_active = True
self._drag_origin_screen_y = event.screen_y
self._drag_origin_height = log_pane.get_log_height()
self.capture_mouse()
event.stop()
def on_mouse_move(self, event: events.MouseMove) -> None:
if not self._drag_active:
return
log_pane = self._get_log_pane()
if log_pane is None:
return
next_height = self._drag_origin_height + (
self._drag_origin_screen_y - event.screen_y
)
log_pane.set_log_height(next_height)
event.stop()
def on_mouse_up(self, event: events.MouseUp) -> None:
if not self._drag_active:
return
self._drag_active = False
self.release_mouse()
event.stop()
class ResizableScreenLogPane(Collapsible):
def __init__(self) -> None:
self._log_view = RichLog(
id=SCREEN_LOG_VIEW_ID,
wrap=True,
markup=False,
highlight=False,
auto_scroll=True,
)
self._log_height = SCREEN_LOG_DEFAULT_HEIGHT
self._apply_log_height()
super().__init__(
ScreenLogResizeHandle(),
self._log_view,
title=t("Log"),
collapsed=True,
id=SCREEN_LOG_PANE_ID,
)
self.styles.width = "100%"
def _apply_log_height(self) -> None:
self._log_view.styles.height = self._log_height
self._log_view.styles.width = "100%"
def get_log_height(self) -> int:
return int(self._log_height)
def set_log_height(self, height: int) -> None:
next_height = max(SCREEN_LOG_MIN_HEIGHT, int(height))
try:
available_height = int(self.app.size.height) - 8
except Exception:
available_height = next_height
if available_height > 0:
next_height = min(next_height, available_height)
self._log_height = next_height
self._apply_log_height()
@dataclass(frozen=True)
class ScreenBootstrap:
context: dict
configuration_data: dict
signature_tags: dict
apply_cleanup: bool
remove_global_keys: list
ignore_global_keys: list
remove_track_keys: list
ignore_track_keys: list
def build_screen_bootstrap(context: dict) -> ScreenBootstrap:
configurationData = context['config'].getData()
metadataConfiguration = configurationData.get('metadata', {})
streamMetadataConfiguration = metadataConfiguration.get('streams', {})
applyCleanup = bool(context.get('apply_metadata_cleanup', True))
return ScreenBootstrap(
context=context,
configuration_data=configurationData,
signature_tags=metadataConfiguration.get('signature', {}),
apply_cleanup=applyCleanup,
remove_global_keys=metadataConfiguration.get('remove', []) if applyCleanup else [],
ignore_global_keys=metadataConfiguration.get('ignore', []),
remove_track_keys=streamMetadataConfiguration.get('remove', []) if applyCleanup else [],
ignore_track_keys=streamMetadataConfiguration.get('ignore', []),
)
def set_screen_log_pane_enabled(enabled: bool) -> None:
global _SCREEN_LOG_PANE_ENABLED
_SCREEN_LOG_PANE_ENABLED = bool(enabled)
def is_screen_log_pane_enabled() -> bool:
return bool(_SCREEN_LOG_PANE_ENABLED)
def configure_screen_log_handler(logger, app, *, enabled: bool):
if logger is None:
return None
screen_log_handler = next(
(handler for handler in logger.handlers if handler.get_name() == SCREEN_LOG_HANDLER_NAME),
None,
)
if not enabled:
if screen_log_handler is not None:
logger.removeHandler(screen_log_handler)
screen_log_handler.close()
return None
if screen_log_handler is None:
screen_log_handler = ScreenLogHandler(app)
logger.addHandler(screen_log_handler)
elif isinstance(screen_log_handler, ScreenLogHandler):
screen_log_handler.set_app(app)
screen_log_handler.setLevel(logging.DEBUG)
screen_log_handler.setFormatter(
logging.Formatter(
f"%(name)-{SCREEN_LOG_COMPONENT_WIDTH}s "
+ f"%(levelname)-{SCREEN_LOG_LEVEL_WIDTH}s "
+ "%(asctime)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
return screen_log_handler
def build_screen_controllers(
context: dict,
*,
pattern: bool = False,
show: bool = False,
track: bool = False,
tag: bool = False,
tmdb: bool = False,
shifted_season: bool = False,
) -> dict[str, object]:
controllers = {}
if pattern:
controllers['pattern'] = PatternController(context=context)
if show:
controllers['show'] = ShowController(context=context)
if track:
controllers['track'] = TrackController(context=context)
if tag:
controllers['tag'] = TagController(context=context)
if tmdb:
controllers['tmdb'] = TmdbController()
if shifted_season:
controllers['shifted_season'] = ShiftedSeasonController(context=context)
return controllers
def populate_tag_table(
table,
tags: Mapping[str, object],
*,
ignore_keys: list[str],
remove_keys: list[str],
) -> dict[object, tuple[str, str]]:
"""Render display rows while keeping raw tag data addressable by row key."""
table.clear()
row_data: dict[object, tuple[str, str]] = {}
for tag_key, tag_value in tags.items():
raw_key = str(tag_key)
raw_value = str(tag_value)
text_color = None
if raw_key in ignore_keys:
text_color = "blue"
if raw_key in remove_keys:
text_color = "red"
row_key = table.add_row(
str(formatRichColor(raw_key, text_color)),
str(formatRichColor(raw_value, text_color)),
)
row_data[row_key] = (raw_key, raw_value)
return row_data
def localized_column_width(label: str, minimum: int, *, padding: int = 2) -> int:
"""Ensure translated table headers fit within their visible column width."""
text = str(label)
return max(
int(minimum),
len(text) + int(padding),
int(cell_len(text)) + int(padding),
)
def add_auto_table_column(table, label, *, key=None, default=None):
"""Add a DataTable column that sizes itself from header and cell content."""
return table.add_column(label, key=key, default=default)
def update_table_column_label(table, column_key, label) -> None:
"""Update a column label and keep auto-width columns in sync with it."""
column = table.columns.get(column_key)
if column is None:
return
text_label = Text.from_markup(label) if isinstance(label, str) else label
column.label = text_label
if column.auto_width:
measured = measure_renderables(
table.app.console,
table.app.console.options,
[text_label],
).maximum
column.content_width = max(column.content_width, measured)
table.refresh()
def build_screen_log_pane() -> ResizableScreenLogPane | Static:
"""Create a shared collapsible log pane for screen-local diagnostics."""
if not is_screen_log_pane_enabled():
hidden = Static("", id=f"{SCREEN_LOG_PANE_ID}_disabled")
hidden.display = False
return hidden
return ResizableScreenLogPane()
def toggle_screen_log_pane(screen) -> bool:
"""Toggle the current screen log pane when present."""
try:
logPane = screen.query_one(f"#{SCREEN_LOG_PANE_ID}", Collapsible)
except Exception:
return False
logPane.collapsed = not bool(logPane.collapsed)
return True
def write_screen_log(screen, message: str) -> bool:
"""Append a line to the current screen log pane when present."""
if message is None:
return False
text = str(message).strip()
if not text:
return False
try:
logView = screen.query_one(f"#{SCREEN_LOG_VIEW_ID}", RichLog)
except Exception:
return False
logView.write(text)
return True
def go_back_or_exit(screen) -> None:
"""Pop the current screen when possible, otherwise exit the app."""
screen_stack = getattr(screen.app, "screen_stack", ())
if len(screen_stack) > 2:
screen.app.pop_screen()
return
screen.app.exit()

View File

@@ -2,11 +2,30 @@ from textual.app import ComposeResult
from textual.screen import Screen
from textual.widgets import Footer, Placeholder
from .i18n import t
from .screen_support import build_screen_log_pane, go_back_or_exit
class SettingsScreen(Screen):
BINDINGS = [
("escape", "back", t("Back")),
]
def __init__(self):
super().__init__()
context = self.app.getContext()
def compose(self) -> ComposeResult:
yield Placeholder("Settings Screen")
# Row 1
yield Placeholder(t("Settings Screen"))
yield build_screen_log_pane()
yield Footer()
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
def action_back(self):
go_back_or_exit(self)

View File

@@ -6,218 +6,445 @@ from ffx.model.shifted_season import ShiftedSeason
class EpisodeOrderException(Exception):
pass
class RangeOverlapException(Exception):
pass
class ShiftedSeasonController():
class ShiftedSeasonOwnerException(Exception):
pass
class ShiftedSeasonController:
def __init__(self, context):
self.context = context
self.Session = self.context['database']['session'] # convenience
self.Session = self.context['database']['session'] # convenience
def checkShiftedSeason(self, showId: int, shiftedSeasonObj: dict, shiftedSeasonId: int = 0):
def _resolve_owner(self, showId=None, patternId=None):
hasShow = showId is not None
hasPattern = patternId is not None
if hasShow == hasPattern:
raise ShiftedSeasonOwnerException(
"ShiftedSeason rules require exactly one owner: either showId or patternId."
)
if hasShow:
if type(showId) is not int:
raise ValueError(
"ShiftedSeasonController: Argument showId is required to be of type int"
)
return {
'show_id': int(showId),
'pattern_id': None,
'label': f"show #{int(showId)}",
}
if type(patternId) is not int:
raise ValueError(
"ShiftedSeasonController: Argument patternId is required to be of type int"
)
return {
'show_id': None,
'pattern_id': int(patternId),
'label': f"pattern #{int(patternId)}",
}
def _apply_owner_filter(self, query, owner):
if owner['pattern_id'] is not None:
return query.filter(ShiftedSeason.pattern_id == owner['pattern_id'])
return query.filter(ShiftedSeason.show_id == owner['show_id'])
def _normalize_shifted_season_fields(self, shiftedSeasonObj: dict):
if type(shiftedSeasonObj) is not dict:
raise ValueError(
"ShiftedSeasonController: Argument shiftedSeasonObj is required to be of type dict"
)
fields = {
'original_season': int(shiftedSeasonObj['original_season']),
'first_episode': int(shiftedSeasonObj['first_episode']),
'last_episode': int(shiftedSeasonObj['last_episode']),
'season_offset': int(shiftedSeasonObj['season_offset']),
'episode_offset': int(shiftedSeasonObj['episode_offset']),
}
firstEpisode = fields['first_episode']
lastEpisode = fields['last_episode']
if firstEpisode != -1 and lastEpisode != -1 and lastEpisode < firstEpisode:
raise EpisodeOrderException(
"ShiftedSeason last_episode must be greater than or equal to first_episode."
)
return fields
def _ranges_overlap(self, firstEpisodeA, lastEpisodeA, firstEpisodeB, lastEpisodeB):
startA = float('-inf') if int(firstEpisodeA) == -1 else int(firstEpisodeA)
endA = float('inf') if int(lastEpisodeA) == -1 else int(lastEpisodeA)
startB = float('-inf') if int(firstEpisodeB) == -1 else int(firstEpisodeB)
endB = float('inf') if int(lastEpisodeB) == -1 else int(lastEpisodeB)
return startA <= endB and startB <= endA
def _ordered_query(self, session, owner):
q = self._apply_owner_filter(session.query(ShiftedSeason), owner)
return q.order_by(
ShiftedSeason.original_season.asc(),
ShiftedSeason.first_episode.asc(),
ShiftedSeason.last_episode.asc(),
ShiftedSeason.id.asc(),
)
def _find_matching_rule(self, session, owner, season: int, episode: int):
for shiftedSeasonEntry in self._ordered_query(session, owner).all():
if (
season == shiftedSeasonEntry.getOriginalSeason()
and (
shiftedSeasonEntry.getFirstEpisode() == -1
or episode >= shiftedSeasonEntry.getFirstEpisode()
)
and (
shiftedSeasonEntry.getLastEpisode() == -1
or episode <= shiftedSeasonEntry.getLastEpisode()
)
):
return shiftedSeasonEntry
return None
def checkShiftedSeason(
self,
showId: int | None = None,
shiftedSeasonObj: dict | None = None,
shiftedSeasonId: int = 0,
patternId: int | None = None,
):
"""
Check whether a shifted-season rule is valid within one owner scope.
"""
session = None
try:
s = self.Session()
owner = self._resolve_owner(showId=showId, patternId=patternId)
fields = self._normalize_shifted_season_fields(shiftedSeasonObj)
session = self.Session()
firstEpisode = int(shiftedSeasonObj['first_episode'])
lastEpisode = int(shiftedSeasonObj['last_episode'])
q = s.query(ShiftedSeason).filter(ShiftedSeason.show_id == int(showId))
q = self._ordered_query(session, owner)
if shiftedSeasonId:
q = q.filter(ShiftedSeason.id != int(shiftedSeasonId))
siblingShiftedSeason: ShiftedSeason
for siblingShiftedSeason in q.all():
siblingFirstEpisode = siblingShiftedSeason.getFirstEpisode()
siblingLastEpisode = siblingShiftedSeason.getLastEpisode()
if (lastEpisode >= siblingFirstEpisode
and siblingLastEpisode >= firstEpisode):
if fields['original_season'] != siblingShiftedSeason.getOriginalSeason():
continue
if self._ranges_overlap(
fields['first_episode'],
fields['last_episode'],
siblingShiftedSeason.getFirstEpisode(),
siblingShiftedSeason.getLastEpisode(),
):
return False
return True
except (EpisodeOrderException, ShiftedSeasonOwnerException) as ex:
raise click.ClickException(str(ex))
except Exception as ex:
raise click.ClickException(f"ShiftedSeasonController.addShiftedSeason(): {repr(ex)}")
raise click.ClickException(
f"ShiftedSeasonController.checkShiftedSeason(): {repr(ex)}"
)
finally:
s.close()
if session is not None:
session.close()
def addShiftedSeason(
self,
showId: int | None = None,
shiftedSeasonObj: dict | None = None,
patternId: int | None = None,
):
def addShiftedSeason(self, showId: int, shiftedSeasonObj: dict):
if type(showId) is not int:
raise ValueError(f"ShiftedSeasonController.addShiftedSeason(): Argument showId is required to be of type int")
if type(shiftedSeasonObj) is not dict:
raise ValueError(f"ShiftedSeasonController.addShiftedSeason(): Argument shiftedSeasonObj is required to be of type dict")
session = None
try:
s = self.Session()
owner = self._resolve_owner(showId=showId, patternId=patternId)
fields = self._normalize_shifted_season_fields(shiftedSeasonObj)
firstEpisode = int(shiftedSeasonObj['first_episode'])
lastEpisode = int(shiftedSeasonObj['last_episode'])
if not self.checkShiftedSeason(
showId=owner['show_id'],
patternId=owner['pattern_id'],
shiftedSeasonObj=fields,
):
raise RangeOverlapException(
f"ShiftedSeason rule overlaps with an existing rule for {owner['label']}."
)
if lastEpisode < firstEpisode:
raise EpisodeOrderException()
q = s.query(ShiftedSeason).filter(ShiftedSeason.show_id == int(showId))
shiftedSeason = ShiftedSeason(show_id = int(showId),
original_season = int(shiftedSeasonObj['original_season']),
first_episode = firstEpisode,
last_episode = lastEpisode,
season_offset = int(shiftedSeasonObj['season_offset']),
episode_offset = int(shiftedSeasonObj['episode_offset']))
s.add(shiftedSeason)
s.commit()
session = self.Session()
shiftedSeason = ShiftedSeason(
show_id=owner['show_id'],
pattern_id=owner['pattern_id'],
original_season=fields['original_season'],
first_episode=fields['first_episode'],
last_episode=fields['last_episode'],
season_offset=fields['season_offset'],
episode_offset=fields['episode_offset'],
)
session.add(shiftedSeason)
session.commit()
return shiftedSeason.getId()
except (EpisodeOrderException, RangeOverlapException, ShiftedSeasonOwnerException) as ex:
raise click.ClickException(str(ex))
except Exception as ex:
raise click.ClickException(f"ShiftedSeasonController.addShiftedSeason(): {repr(ex)}")
raise click.ClickException(
f"ShiftedSeasonController.addShiftedSeason(): {repr(ex)}"
)
finally:
s.close()
if session is not None:
session.close()
def updateShiftedSeason(self, shiftedSeasonId: int, shiftedSeasonObj: dict):
if type(shiftedSeasonId) is not int:
raise ValueError(f"ShiftedSeasonController.updateShiftedSeason(): Argument shiftedSeasonId is required to be of type int")
if type(shiftedSeasonObj) is not dict:
raise ValueError(f"ShiftedSeasonController.updateShiftedSeason(): Argument shiftedSeasonObj is required to be of type dict")
raise ValueError(
"ShiftedSeasonController.updateShiftedSeason(): Argument shiftedSeasonId is required to be of type int"
)
session = None
try:
s = self.Session()
fields = self._normalize_shifted_season_fields(shiftedSeasonObj)
session = self.Session()
q = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId))
shiftedSeason = (
session.query(ShiftedSeason)
.filter(ShiftedSeason.id == int(shiftedSeasonId))
.first()
)
if q.count():
shiftedSeason = q.first()
shiftedSeason.original_season = int(shiftedSeasonObj['original_season'])
shiftedSeason.first_episode = int(shiftedSeasonObj['first_episode'])
shiftedSeason.last_episode = int(shiftedSeasonObj['last_episode'])
shiftedSeason.season_offset = int(shiftedSeasonObj['season_offset'])
shiftedSeason.episode_offset = int(shiftedSeasonObj['episode_offset'])
s.commit()
return True
else:
if shiftedSeason is None:
return False
owner = self._resolve_owner(
showId=shiftedSeason.getShowId(),
patternId=shiftedSeason.getPatternId(),
)
if not self.checkShiftedSeason(
showId=owner['show_id'],
patternId=owner['pattern_id'],
shiftedSeasonObj=fields,
shiftedSeasonId=shiftedSeasonId,
):
raise RangeOverlapException(
f"ShiftedSeason rule overlaps with an existing rule for {owner['label']}."
)
shiftedSeason.original_season = fields['original_season']
shiftedSeason.first_episode = fields['first_episode']
shiftedSeason.last_episode = fields['last_episode']
shiftedSeason.season_offset = fields['season_offset']
shiftedSeason.episode_offset = fields['episode_offset']
session.commit()
return True
except (EpisodeOrderException, RangeOverlapException, ShiftedSeasonOwnerException) as ex:
raise click.ClickException(str(ex))
except Exception as ex:
raise click.ClickException(f"ShiftedSeasonController.updateShiftedSeason(): {repr(ex)}")
raise click.ClickException(
f"ShiftedSeasonController.updateShiftedSeason(): {repr(ex)}"
)
finally:
s.close()
if session is not None:
session.close()
def findShiftedSeason(self, showId: int, originalSeason: int, firstEpisode: int, lastEpisode: int):
if type(showId) is not int:
raise ValueError(f"ShiftedSeasonController.findShiftedSeason(): Argument shiftedSeasonId is required to be of type int")
def findShiftedSeason(
self,
showId: int | None = None,
originalSeason: int | None = None,
firstEpisode: int | None = None,
lastEpisode: int | None = None,
patternId: int | None = None,
):
if type(originalSeason) is not int:
raise ValueError(f"ShiftedSeasonController.findShiftedSeason(): Argument originalSeason is required to be of type int")
raise ValueError(
"ShiftedSeasonController.findShiftedSeason(): Argument originalSeason is required to be of type int"
)
if type(firstEpisode) is not int:
raise ValueError(f"ShiftedSeasonController.findShiftedSeason(): Argument firstEpisode is required to be of type int")
raise ValueError(
"ShiftedSeasonController.findShiftedSeason(): Argument firstEpisode is required to be of type int"
)
if type(lastEpisode) is not int:
raise ValueError(f"ShiftedSeasonController.findShiftedSeason(): Argument lastEpisode is required to be of type int")
raise ValueError(
"ShiftedSeasonController.findShiftedSeason(): Argument lastEpisode is required to be of type int"
)
session = None
try:
owner = self._resolve_owner(showId=showId, patternId=patternId)
session = self.Session()
shiftedSeason = (
self._apply_owner_filter(session.query(ShiftedSeason), owner)
.filter(
ShiftedSeason.original_season == int(originalSeason),
ShiftedSeason.first_episode == int(firstEpisode),
ShiftedSeason.last_episode == int(lastEpisode),
)
.first()
)
return shiftedSeason.getId() if shiftedSeason is not None else None
except ShiftedSeasonOwnerException as ex:
raise click.ClickException(str(ex))
except Exception as ex:
raise click.ClickException(
f"ShiftedSeasonController.findShiftedSeason(): {repr(ex)}"
)
finally:
if session is not None:
session.close()
def getShiftedSeasonSiblings(
self,
showId: int | None = None,
patternId: int | None = None,
):
session = None
try:
s = self.Session()
q = s.query(ShiftedSeason).filter(ShiftedSeason.show_id == int(showId),
ShiftedSeason.original_season == int(originalSeason),
ShiftedSeason.first_episode == int(firstEpisode),
ShiftedSeason.last_episode == int(lastEpisode))
return q.first().getId() if q.count() else None
owner = self._resolve_owner(showId=showId, patternId=patternId)
session = self.Session()
return self._ordered_query(session, owner).all()
except ShiftedSeasonOwnerException as ex:
raise click.ClickException(str(ex))
except Exception as ex:
raise click.ClickException(f"PatternController.findShiftedSeason(): {repr(ex)}")
raise click.ClickException(
f"ShiftedSeasonController.getShiftedSeasonSiblings(): {repr(ex)}"
)
finally:
s.close()
def getShiftedSeasonSiblings(self, showId: int):
if type(showId) is not int:
raise ValueError(f"ShiftedSeasonController.getShiftedSeasonSiblings(): Argument shiftedSeasonId is required to be of type int")
try:
s = self.Session()
q = s.query(ShiftedSeason).filter(ShiftedSeason.show_id == int(showId))
return q.all()
except Exception as ex:
raise click.ClickException(f"PatternController.getShiftedSeasonSiblings(): {repr(ex)}")
finally:
s.close()
if session is not None:
session.close()
def getShiftedSeason(self, shiftedSeasonId: int):
if type(shiftedSeasonId) is not int:
raise ValueError(f"ShiftedSeasonController.getShiftedSeason(): Argument shiftedSeasonId is required to be of type int")
raise ValueError(
"ShiftedSeasonController.getShiftedSeason(): Argument shiftedSeasonId is required to be of type int"
)
session = None
try:
s = self.Session()
q = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId))
return q.first() if q.count() else None
session = self.Session()
return (
session.query(ShiftedSeason)
.filter(ShiftedSeason.id == int(shiftedSeasonId))
.first()
)
except Exception as ex:
raise click.ClickException(f"ShiftedSeasonController.getShiftedSeason(): {repr(ex)}")
raise click.ClickException(
f"ShiftedSeasonController.getShiftedSeason(): {repr(ex)}"
)
finally:
s.close()
if session is not None:
session.close()
def deleteShiftedSeason(self, shiftedSeasonId):
if type(shiftedSeasonId) is not int:
raise ValueError(f"ShiftedSeasonController.deleteShiftedSeason(): Argument shiftedSeasonId is required to be of type int")
raise ValueError(
"ShiftedSeasonController.deleteShiftedSeason(): Argument shiftedSeasonId is required to be of type int"
)
session = None
try:
s = self.Session()
q = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId))
session = self.Session()
shiftedSeason = (
session.query(ShiftedSeason)
.filter(ShiftedSeason.id == int(shiftedSeasonId))
.first()
)
if q.count():
#DAFUQ: https://stackoverflow.com/a/19245058
# q.delete()
shiftedSeason = q.first()
s.delete(shiftedSeason)
s.commit()
if shiftedSeason is not None:
session.delete(shiftedSeason)
session.commit()
return True
return False
except Exception as ex:
raise click.ClickException(f"ShiftedSeasonController.deleteShiftedSeason(): {repr(ex)}")
raise click.ClickException(
f"ShiftedSeasonController.deleteShiftedSeason(): {repr(ex)}"
)
finally:
s.close()
if session is not None:
session.close()
def shiftSeason(self, showId, season, episode, patternId=None):
if season == -1 or episode == -1:
return season, episode
def shiftSeason(self, showId, season, episode):
shiftedSeason, shiftedEpisode, sourceLabel = self.resolveShiftSeason(
showId,
season,
episode,
patternId=patternId,
)
shiftedSeasonEntry: ShiftedSeason
for shiftedSeasonEntry in self.getShiftedSeasonSiblings(showId):
if shiftedSeason != season or shiftedEpisode != episode:
self.context['logger'].info(
f"Setting season shift {season}/{episode} -> {shiftedSeason}/{shiftedEpisode} from {sourceLabel}"
)
if (season == shiftedSeasonEntry.getOriginalSeason()
and (shiftedSeasonEntry.getFirstEpisode() == -1 or episode >= shiftedSeasonEntry.getFirstEpisode())
and (shiftedSeasonEntry.getLastEpisode() == -1 or episode <= shiftedSeasonEntry.getLastEpisode())):
return shiftedSeason, shiftedEpisode
shiftedSeason = season + shiftedSeasonEntry.getSeasonOffset()
shiftedEpisode = episode + shiftedSeasonEntry.getEpisodeOffset()
def resolveShiftSeason(self, showId, season, episode, patternId=None):
if season == -1 or episode == -1:
return season, episode, "unrecognized"
self.context['logger'].info(f"Shifting season: {season} episode: {episode} "
+f"-> season: {shiftedSeason} episode: {shiftedEpisode}")
session = None
try:
session = self.Session()
activeShift = None
return shiftedSeason, shiftedEpisode
return season, episode
if patternId is not None:
activeShift = self._find_matching_rule(
session,
self._resolve_owner(patternId=patternId),
season=int(season),
episode=int(episode),
)
if activeShift is None and showId is not None and showId != -1:
activeShift = self._find_matching_rule(
session,
self._resolve_owner(showId=showId),
season=int(season),
episode=int(episode),
)
if activeShift is None:
shiftedSeason = season
shiftedEpisode = episode
sourceLabel = "default"
else:
shiftedSeason = season + activeShift.getSeasonOffset()
shiftedEpisode = episode + activeShift.getEpisodeOffset()
sourceLabel = (
"pattern"
if activeShift.getPatternId() is not None
else "show"
)
return shiftedSeason, shiftedEpisode, sourceLabel
except ShiftedSeasonOwnerException as ex:
raise click.ClickException(str(ex))
except Exception as ex:
raise click.ClickException(
f"ShiftedSeasonController.shiftSeason(): {repr(ex)}"
)
finally:
if session is not None:
session.close()

View File

@@ -4,7 +4,9 @@ from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button
from textual.containers import Grid
from .i18n import t
from .shifted_season_controller import ShiftedSeasonController
from .screen_support import build_screen_log_pane, go_back_or_exit
from ffx.model.shifted_season import ShiftedSeason
@@ -12,15 +14,22 @@ from ffx.model.shifted_season import ShiftedSeason
# Screen[dict[int, str, int]]
class ShiftedSeasonDeleteScreen(Screen):
BINDINGS = [
("escape", "back", t("Back")),
]
CSS = """
Grid {
grid-size: 2;
grid-rows: 2 auto;
grid-columns: 30 330;
grid-columns: 18 5fr;
height: 100%;
width: 100%;
min-width: 90;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}
Input {
@@ -43,7 +52,7 @@ class ShiftedSeasonDeleteScreen(Screen):
}
"""
def __init__(self, showId = None, shiftedSeasonId = None):
def __init__(self, showId = None, patternId = None, shiftedSeasonId = None):
super().__init__()
self.context = self.app.getContext()
@@ -52,14 +61,23 @@ class ShiftedSeasonDeleteScreen(Screen):
self.__ssc = ShiftedSeasonController(context = self.context)
self._showId = showId
self._patternId = patternId
self.__shiftedSeasonId = shiftedSeasonId
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
shiftedSeason: ShiftedSeason = self.__ssc.getShiftedSeason(self.__shiftedSeasonId)
self.query_one("#static_show_id", Static).update(str(self._showId))
ownerLabel = (
t("pattern #{id}", id=self._patternId)
if self._patternId is not None
else t("show #{id}", id=self._showId)
)
self.query_one("#static_owner", Static).update(ownerLabel)
self.query_one("#static_original_season", Static).update(str(shiftedSeason.getOriginalSeason()))
self.query_one("#static_first_episode", Static).update(str(shiftedSeason.getFirstEpisode()))
self.query_one("#static_last_episode", Static).update(str(shiftedSeason.getLastEpisode()))
@@ -72,36 +90,47 @@ class ShiftedSeasonDeleteScreen(Screen):
yield Header()
with Grid():
# Row 1
yield Static(t("Are you sure to delete the following shifted season?"), id="toplabel", classes="two")
yield Static("Are you sure to delete the following shifted season?", id="toplabel", classes="two")
# Row 2
yield Static(" ", classes="two")
yield Static("from show")
yield Static(" ", id="static_show_id")
# Row 3
yield Static(t("from"))
yield Static(" ", id="static_owner")
# Row 4
yield Static(" ", classes="two")
yield Static("Original season")
# Row 5
yield Static(t("Source Season"))
yield Static(" ", id="static_original_season")
yield Static("First episode")
# Row 6
yield Static(t("First episode"))
yield Static(" ", id="static_first_episode")
yield Static("Last episode")
# Row 7
yield Static(t("Last episode"))
yield Static(" ", id="static_last_episode")
yield Static("Season offset")
# Row 8
yield Static(t("Season Offset"))
yield Static(" ", id="static_season_offset")
yield Static("Episode offset")
# Row 9
yield Static(t("Episode offset"))
yield Static(" ", id="static_episode_offset")
# Row 10
yield Static(" ", classes="two")
yield Button("Delete", id="delete_button")
yield Button("Cancel", id="cancel_button")
# Row 11
yield Button(t("Delete"), id="delete_button")
yield Button(t("Cancel"), id="cancel_button")
yield build_screen_log_pane()
yield Footer()
@@ -123,3 +152,5 @@ class ShiftedSeasonDeleteScreen(Screen):
if event.button.id == "cancel_button":
self.app.pop_screen()
def action_back(self):
go_back_or_exit(self)

View File

@@ -4,7 +4,9 @@ from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button, Input
from textual.containers import Grid
from .i18n import t
from .shifted_season_controller import ShiftedSeasonController
from .screen_support import build_screen_log_pane, go_back_or_exit
from ffx.model.shifted_season import ShiftedSeason
@@ -12,15 +14,22 @@ from ffx.model.shifted_season import ShiftedSeason
# Screen[dict[int, str, int]]
class ShiftedSeasonDetailsScreen(Screen):
BINDINGS = [
("escape", "back", t("Back")),
]
CSS = """
Grid {
grid-size: 3 10;
grid-rows: 2 2 2 2 2 2 2 2 2 2;
grid-columns: 40 40 40;
grid-columns: 20 1fr 1fr;
height: 100%;
width: 100%;
min-width: 80;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}
Input {
@@ -81,7 +90,7 @@ class ShiftedSeasonDetailsScreen(Screen):
}
"""
def __init__(self, showId = None, shiftedSeasonId = None):
def __init__(self, showId = None, patternId = None, shiftedSeasonId = None):
super().__init__()
self.context = self.app.getContext()
@@ -90,10 +99,19 @@ class ShiftedSeasonDetailsScreen(Screen):
self.__ssc = ShiftedSeasonController(context = self.context)
self.__showId = showId
self.__patternId = patternId
self.__shiftedSeasonId = shiftedSeasonId
def _owner_kwargs(self):
if self.__patternId is not None:
return {'patternId': self.__patternId}
return {'showId': self.__showId}
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
if self.__shiftedSeasonId is not None:
shiftedSeason: ShiftedSeason = self.__ssc.getShiftedSeason(self.__shiftedSeasonId)
@@ -119,43 +137,48 @@ class ShiftedSeasonDetailsScreen(Screen):
with Grid():
# 1
yield Static("Edit shifted season" if self.__shiftedSeasonId is not None else "New shifted season", id="toplabel", classes="three")
# Row 1
yield Static(
t("Edit shifted season") if self.__shiftedSeasonId is not None else t("New shifted season"),
id="toplabel",
classes="three",
)
# 2
# Row 2
yield Static(" ", classes="three")
# 3
yield Static("Original season")
# Row 3
yield Static(t("Source Season"))
yield Input(id="input_original_season", classes="two")
# 4
yield Static("First Episode")
# Row 4
yield Static(t("First Episode"))
yield Input(id="input_first_episode", classes="two")
# 5
yield Static("Last Episode")
# Row 5
yield Static(t("Last Episode"))
yield Input(id="input_last_episode", classes="two")
# 6
yield Static("Season offset")
# Row 6
yield Static(t("Season Offset"))
yield Input(id="input_season_offset", classes="two")
# 7
yield Static("Episode offset")
# Row 7
yield Static(t("Episode offset"))
yield Input(id="input_episode_offset", classes="two")
# 8
# Row 8
yield Static(" ", classes="three")
# 9
yield Button("Save", id="save_button")
yield Button("Cancel", id="cancel_button")
# Row 9
yield Button(t("Save"), id="save_button")
yield Button(t("Cancel"), id="cancel_button")
yield Static(" ")
# 10
# Row 10
yield Static(" ", classes="three")
yield build_screen_log_pane()
yield Footer()
@@ -190,6 +213,9 @@ class ShiftedSeasonDetailsScreen(Screen):
return shiftedSeasonObj
def action_back(self):
go_back_or_exit(self)
# Event handler for button press
def on_button_pressed(self, event: Button.Pressed) -> None:
@@ -203,8 +229,11 @@ class ShiftedSeasonDetailsScreen(Screen):
if self.__shiftedSeasonId is not None:
if self.__ssc.checkShiftedSeason(self.__showId, shiftedSeasonObj,
shiftedSeasonId = self.__shiftedSeasonId):
if self.__ssc.checkShiftedSeason(
shiftedSeasonObj=shiftedSeasonObj,
shiftedSeasonId=self.__shiftedSeasonId,
**self._owner_kwargs(),
):
if self.__ssc.updateShiftedSeason(self.__shiftedSeasonId, shiftedSeasonObj):
self.dismiss((self.__shiftedSeasonId, shiftedSeasonObj))
else:
@@ -212,8 +241,14 @@ class ShiftedSeasonDetailsScreen(Screen):
self.app.pop_screen()
else:
if self.__ssc.checkShiftedSeason(self.__showId, shiftedSeasonObj):
self.__shiftedSeasonId = self.__ssc.addShiftedSeason(self.__showId, shiftedSeasonObj)
if self.__ssc.checkShiftedSeason(
shiftedSeasonObj=shiftedSeasonObj,
**self._owner_kwargs(),
):
self.__shiftedSeasonId = self.__ssc.addShiftedSeason(
shiftedSeasonObj=shiftedSeasonObj,
**self._owner_kwargs(),
)
self.dismiss((self.__shiftedSeasonId, shiftedSeasonObj))

View File

@@ -16,10 +16,9 @@ class ShowController():
try:
s = self.Session()
q = s.query(Show).filter(Show.id == showId)
show = s.query(Show).filter(Show.id == showId).first()
if q.count():
show: Show = q.first()
if show is not None:
return show.getDescriptor(self.context)
except Exception as ex:
@@ -31,9 +30,7 @@ class ShowController():
try:
s = self.Session()
q = s.query(Show).filter(Show.id == showId)
return q.first() if q.count() else None
return s.query(Show).filter(Show.id == showId).first()
except Exception as ex:
raise click.ClickException(f"ShowController.getShow(): {repr(ex)}")
@@ -44,12 +41,7 @@ class ShowController():
try:
s = self.Session()
q = s.query(Show)
if q.count():
return q.all()
else:
return []
return s.query(Show).all()
except Exception as ex:
raise click.ClickException(f"ShowController.getAllShows(): {repr(ex)}")
@@ -61,24 +53,23 @@ class ShowController():
try:
s = self.Session()
q = s.query(Show).filter(Show.id == showDescriptor.getId())
currentShow = s.query(Show).filter(Show.id == showDescriptor.getId()).first()
if not q.count():
if currentShow is None:
show = Show(id = int(showDescriptor.getId()),
name = str(showDescriptor.getName()),
year = int(showDescriptor.getYear()),
index_season_digits = showDescriptor.getIndexSeasonDigits(),
index_episode_digits = showDescriptor.getIndexEpisodeDigits(),
indicator_season_digits = showDescriptor.getIndicatorSeasonDigits(),
indicator_episode_digits = showDescriptor.getIndicatorEpisodeDigits())
indicator_episode_digits = showDescriptor.getIndicatorEpisodeDigits(),
quality = showDescriptor.getQuality(),
notes = showDescriptor.getNotes())
s.add(show)
s.commit()
return True
else:
currentShow = q.first()
changed = False
if currentShow.name != str(showDescriptor.getName()):
currentShow.name = str(showDescriptor.getName())
@@ -99,6 +90,12 @@ class ShowController():
if currentShow.indicator_episode_digits != int(showDescriptor.getIndicatorEpisodeDigits()):
currentShow.indicator_episode_digits = int(showDescriptor.getIndicatorEpisodeDigits())
changed = True
if int(currentShow.quality or 0) != int(showDescriptor.getQuality()):
currentShow.quality = int(showDescriptor.getQuality())
changed = True
if str(currentShow.notes or '') != str(showDescriptor.getNotes()):
currentShow.notes = str(showDescriptor.getNotes())
changed = True
if changed:
s.commit()
@@ -113,14 +110,12 @@ class ShowController():
def deleteShow(self, show_id):
try:
s = self.Session()
q = s.query(Show).filter(Show.id == int(show_id))
show = s.query(Show).filter(Show.id == int(show_id)).first()
if q.count():
if show is not None:
#DAFUQ: https://stackoverflow.com/a/19245058
# q.delete()
show = q.first()
s.delete(show)
s.commit()

View File

@@ -2,20 +2,29 @@ from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button
from textual.containers import Grid
from .i18n import t
from .show_controller import ShowController
from .screen_support import build_screen_log_pane, go_back_or_exit
# Screen[dict[int, str, int]]
class ShowDeleteScreen(Screen):
BINDINGS = [
("escape", "back", t("Back")),
]
CSS = """
Grid {
grid-size: 2;
grid-rows: 2 auto;
grid-columns: 30 auto;
grid-columns: 18 4fr;
height: 100%;
width: 100%;
min-width: 80;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}
Input {
@@ -59,22 +68,28 @@ class ShowDeleteScreen(Screen):
yield Header()
with Grid():
# Row 1
yield Static(t("Are you sure to delete the following show?"), id="toplabel", classes="two")
yield Static("Are you sure to delete the following show?", id="toplabel", classes="two")
# Row 2
yield Static("", classes="two")
# Row 3
yield Static("", id="showlabel")
yield Static("")
# Row 4
yield Static("", classes="two")
# Row 5
yield Static("", classes="two")
yield Button("Delete", id="delete_button")
yield Button("Cancel", id="cancel_button")
# Row 6
yield Button(t("Delete"), id="delete_button")
yield Button(t("Cancel"), id="cancel_button")
yield build_screen_log_pane()
yield Footer()
@@ -93,3 +108,13 @@ class ShowDeleteScreen(Screen):
if event.button.id == "cancel_button":
self.app.pop_screen()
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
def action_back(self):
go_back_or_exit(self)

View File

@@ -1,4 +1,11 @@
import logging
from .configuration_controller import ConfigurationController
from .constants import (
DEFAULT_SHOW_INDEX_EPISODE_DIGITS,
DEFAULT_SHOW_INDEX_SEASON_DIGITS,
DEFAULT_SHOW_INDICATOR_EPISODE_DIGITS,
DEFAULT_SHOW_INDICATOR_SEASON_DIGITS,
)
from .logging_utils import get_ffx_logger
class ShowDescriptor():
@@ -14,11 +21,45 @@ class ShowDescriptor():
INDEX_EPISODE_DIGITS_KEY = 'index_episode_digits'
INDICATOR_SEASON_DIGITS_KEY = 'indicator_season_digits'
INDICATOR_EPISODE_DIGITS_KEY = 'indicator_episode_digits'
QUALITY_KEY = 'quality'
NOTES_KEY = 'notes'
DEFAULT_INDEX_SEASON_DIGITS = 2
DEFAULT_INDEX_EPISODE_DIGITS = 2
DEFAULT_INDICATOR_SEASON_DIGITS = 2
DEFAULT_INDICATOR_EPISODE_DIGITS = 2
DEFAULT_INDEX_SEASON_DIGITS = DEFAULT_SHOW_INDEX_SEASON_DIGITS
DEFAULT_INDEX_EPISODE_DIGITS = DEFAULT_SHOW_INDEX_EPISODE_DIGITS
DEFAULT_INDICATOR_SEASON_DIGITS = DEFAULT_SHOW_INDICATOR_SEASON_DIGITS
DEFAULT_INDICATOR_EPISODE_DIGITS = DEFAULT_SHOW_INDICATOR_EPISODE_DIGITS
@classmethod
def getDefaultDigitLengths(cls, context: dict | None = None) -> dict[str, int]:
configurationData = {}
if context is not None:
configController = context.get('config')
if configController is not None and hasattr(configController, 'getData'):
configurationData = configController.getData()
return {
cls.INDEX_SEASON_DIGITS_KEY: ConfigurationController.getConfiguredIntegerValue(
configurationData,
ConfigurationController.DEFAULT_INDEX_SEASON_DIGITS_CONFIG_KEY,
cls.DEFAULT_INDEX_SEASON_DIGITS,
),
cls.INDEX_EPISODE_DIGITS_KEY: ConfigurationController.getConfiguredIntegerValue(
configurationData,
ConfigurationController.DEFAULT_INDEX_EPISODE_DIGITS_CONFIG_KEY,
cls.DEFAULT_INDEX_EPISODE_DIGITS,
),
cls.INDICATOR_SEASON_DIGITS_KEY: ConfigurationController.getConfiguredIntegerValue(
configurationData,
ConfigurationController.DEFAULT_INDICATOR_SEASON_DIGITS_CONFIG_KEY,
cls.DEFAULT_INDICATOR_SEASON_DIGITS,
),
cls.INDICATOR_EPISODE_DIGITS_KEY: ConfigurationController.getConfiguredIntegerValue(
configurationData,
ConfigurationController.DEFAULT_INDICATOR_EPISODE_DIGITS_CONFIG_KEY,
cls.DEFAULT_INDICATOR_EPISODE_DIGITS,
),
}
def __init__(self, **kwargs):
@@ -32,8 +73,7 @@ class ShowDescriptor():
self.__logger = self.__context['logger']
else:
self.__context = {}
self.__logger = logging.getLogger('FFX')
self.__logger.addHandler(logging.NullHandler())
self.__logger = get_ffx_logger()
if ShowDescriptor.ID_KEY in kwargs.keys():
if type(kwargs[ShowDescriptor.ID_KEY]) is not int:
@@ -54,36 +94,51 @@ class ShowDescriptor():
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.YEAR_KEY} is required to be of type int")
self.__showYear = kwargs[ShowDescriptor.YEAR_KEY]
else:
self.__showYear = -1
self.__showYear = -1
defaultDigitLengths = self.getDefaultDigitLengths(self.__context)
if ShowDescriptor.INDEX_SEASON_DIGITS_KEY in kwargs.keys():
if type(kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]) is not int:
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDEX_SEASON_DIGITS_KEY} is required to be of type int")
self.__indexSeasonDigits = kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]
else:
self.__indexSeasonDigits = ShowDescriptor.DEFAULT_INDEX_SEASON_DIGITS
self.__indexSeasonDigits = defaultDigitLengths[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]
if ShowDescriptor.INDEX_EPISODE_DIGITS_KEY in kwargs.keys():
if type(kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]) is not int:
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDEX_EPISODE_DIGITS_KEY} is required to be of type int")
self.__indexEpisodeDigits = kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]
else:
self.__indexEpisodeDigits = ShowDescriptor.DEFAULT_INDEX_EPISODE_DIGITS
self.__indexEpisodeDigits = defaultDigitLengths[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]
if ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY in kwargs.keys():
if type(kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]) is not int:
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY} is required to be of type int")
self.__indicatorSeasonDigits = kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]
else:
self.__indicatorSeasonDigits = ShowDescriptor.DEFAULT_INDICATOR_SEASON_DIGITS
self.__indicatorSeasonDigits = defaultDigitLengths[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]
if ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY in kwargs.keys():
if type(kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]) is not int:
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY} is required to be of type int")
self.__indicatorEpisodeDigits = kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]
else:
self.__indicatorEpisodeDigits = ShowDescriptor.DEFAULT_INDICATOR_EPISODE_DIGITS
self.__indicatorEpisodeDigits = defaultDigitLengths[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]
if ShowDescriptor.QUALITY_KEY in kwargs.keys():
if type(kwargs[ShowDescriptor.QUALITY_KEY]) is not int:
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.QUALITY_KEY} is required to be of type int")
self.__quality = kwargs[ShowDescriptor.QUALITY_KEY]
else:
self.__quality = 0
if ShowDescriptor.NOTES_KEY in kwargs.keys():
if type(kwargs[ShowDescriptor.NOTES_KEY]) is not str:
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.NOTES_KEY} is required to be of type str")
self.__notes = kwargs[ShowDescriptor.NOTES_KEY]
else:
self.__notes = ''
def getId(self):
@@ -101,6 +156,10 @@ class ShowDescriptor():
return self.__indicatorSeasonDigits
def getIndicatorEpisodeDigits(self):
return self.__indicatorEpisodeDigits
def getQuality(self):
return self.__quality
def getNotes(self):
return self.__notes
def getFilenamePrefix(self):
return f"{self.__showName} ({str(self.__showYear)})"

View File

@@ -1,20 +1,13 @@
import click
from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button, DataTable, Input
from textual.widgets import Header, Footer, Static, Button, DataTable, Input, TextArea
from textual.containers import Grid
from textual.widgets._data_table import CellDoesNotExist
from ffx.model.pattern import Pattern
from .pattern_details_screen import PatternDetailsScreen
from .pattern_delete_screen import PatternDeleteScreen
from .show_controller import ShowController
from .pattern_controller import PatternController
from .tmdb_controller import TmdbController
from .shifted_season_controller import ShiftedSeasonController
from .show_descriptor import ShowDescriptor
from .shifted_season_details_screen import ShiftedSeasonDetailsScreen
@@ -23,6 +16,14 @@ from .shifted_season_delete_screen import ShiftedSeasonDeleteScreen
from ffx.model.shifted_season import ShiftedSeason
from .helper import filterFilename
from .i18n import t
from .screen_support import (
add_auto_table_column,
build_screen_bootstrap,
build_screen_controllers,
build_screen_log_pane,
go_back_or_exit,
)
# Screen[dict[int, str, int]]
@@ -31,12 +32,15 @@ class ShowDetailsScreen(Screen):
CSS = """
Grid {
grid-size: 5 16;
grid-rows: 2 2 2 2 2 2 2 2 2 2 2 9 2 9 2 2;
grid-columns: 30 30 30 30 30;
grid-size: 5 19;
grid-rows: 2 2 2 2 2 2 6 2 2 2 2 2 2 2 9 2 9 2 2;
grid-columns: 25 20 20 20 1fr;
height: 100%;
width: 100%;
min-width: 110;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}
Input {
@@ -49,6 +53,7 @@ class ShowDetailsScreen(Screen):
DataTable {
column-span: 2;
min-height: 8;
width: 100%;
}
DataTable .datatable--cursor {
@@ -83,46 +88,77 @@ class ShowDetailsScreen(Screen):
height: 100%;
border: solid green;
}
.note_box {
min-height: 6;
}
"""
BINDINGS = [
("a", "add_pattern", "Add Pattern"),
("e", "edit_pattern", "Edit Pattern"),
("r", "remove_pattern", "Remove Pattern"),
("escape", "back", t("Back")),
("a", "add_pattern", t("Add Pattern")),
("e", "edit_pattern", t("Edit Pattern")),
("r", "remove_pattern", t("Remove Pattern")),
]
def __init__(self, showId = None):
super().__init__()
self.context = self.app.getContext()
self.Session = self.context['database']['session'] # convenience
self.__sc = ShowController(context = self.context)
self.__pc = PatternController(context = self.context)
self.__tc = TmdbController()
self.__ssc = ShiftedSeasonController(context = self.context)
bootstrap = build_screen_bootstrap(self.app.getContext())
self.context = bootstrap.context
controllers = build_screen_controllers(
self.context,
pattern=True,
show=True,
tmdb=True,
shifted_season=True,
)
self.__sc = controllers['show']
self.__pc = controllers['pattern']
self.__tc = controllers['tmdb']
self.__ssc = controllers['shifted_season']
self.__showDescriptor = self.__sc.getShowDescriptor(showId) if showId is not None else None
self.__patternRowData: dict[object, dict[str, object]] = {}
self.__shiftedSeasonRowData: dict[object, dict[str, int | None]] = {}
def loadPatterns(self, show_id : int):
def _add_pattern_row(self, *, pattern_id: int | None, pattern_text: str):
row_key = self.patternTable.add_row(str(pattern_text))
self.__patternRowData[row_key] = {
'id': pattern_id,
'show_id': self.__showDescriptor.getId() if self.__showDescriptor is not None else None,
'pattern': str(pattern_text),
}
return row_key
try:
s = self.Session()
q = s.query(Pattern).filter(Pattern.show_id == int(show_id))
return [{'id': int(p.id), 'pattern': str(p.pattern)} for p in q.all()]
def _add_shifted_season_row(self, shifted_season_obj: dict[str, int | None]):
firstEpisode = shifted_season_obj['first_episode']
firstEpisodeStr = str(firstEpisode) if firstEpisode != -1 else ''
except Exception as ex:
raise click.ClickException(f"ShowDetailsScreen.loadPatterns(): {repr(ex)}")
finally:
s.close()
lastEpisode = shifted_season_obj['last_episode']
lastEpisodeStr = str(lastEpisode) if lastEpisode != -1 else ''
row = (
shifted_season_obj['original_season'],
firstEpisodeStr,
lastEpisodeStr,
shifted_season_obj['season_offset'],
shifted_season_obj['episode_offset'],
)
row_key = self.shiftedSeasonsTable.add_row(*map(str, row))
self.__shiftedSeasonRowData[row_key] = dict(shifted_season_obj)
return row_key
def updateShiftedSeasons(self):
self.shiftedSeasonsTable.clear()
self.__shiftedSeasonRowData = {}
if not self.__showDescriptor is None:
@@ -132,25 +168,16 @@ class ShowDetailsScreen(Screen):
for shiftedSeason in self.__ssc.getShiftedSeasonSiblings(showId=showId):
shiftedSeasonObj = shiftedSeason.getObj()
firstEpisode = shiftedSeasonObj['first_episode']
firstEpisodeStr = str(firstEpisode) if firstEpisode != -1 else ''
lastEpisode = shiftedSeasonObj['last_episode']
lastEpisodeStr = str(lastEpisode) if lastEpisode != -1 else ''
row = (shiftedSeasonObj['original_season'],
firstEpisodeStr,
lastEpisodeStr,
shiftedSeasonObj['season_offset'],
shiftedSeasonObj['episode_offset'])
self.shiftedSeasonsTable.add_row(*map(str, row))
shiftedSeasonObj['id'] = shiftedSeason.getId()
self._add_shifted_season_row(shiftedSeasonObj)
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
if self.__showDescriptor is not None:
showId = int(self.__showDescriptor.getId())
@@ -163,23 +190,36 @@ class ShowDetailsScreen(Screen):
self.query_one("#index_episode_digits_input", Input).value = str(self.__showDescriptor.getIndexEpisodeDigits())
self.query_one("#indicator_season_digits_input", Input).value = str(self.__showDescriptor.getIndicatorSeasonDigits())
self.query_one("#indicator_episode_digits_input", Input).value = str(self.__showDescriptor.getIndicatorEpisodeDigits())
if self.__showDescriptor.getQuality():
self.query_one("#quality_input", Input).value = str(self.__showDescriptor.getQuality())
if self.__showDescriptor.getNotes():
self.query_one("#notes_textarea", TextArea).text = str(self.__showDescriptor.getNotes())
#raise click.ClickException(f"show_id {showId}")
patternList = self.loadPatterns(showId)
# raise click.ClickException(f"patternList {patternList}")
for pattern in patternList:
row = (pattern['pattern'],)
self.patternTable.add_row(*map(str, row))
for pattern in self.__pc.getPatternsForShow(showId):
self._add_pattern_row(
pattern_id=pattern.getId(),
pattern_text=pattern.getPattern(),
)
self.updateShiftedSeasons()
else:
self.query_one("#index_season_digits_input", Input).value = "2"
self.query_one("#index_episode_digits_input", Input).value = "2"
self.query_one("#indicator_season_digits_input", Input).value = "2"
self.query_one("#indicator_episode_digits_input", Input).value = "2"
defaultDigitLengths = ShowDescriptor.getDefaultDigitLengths(self.context)
self.query_one("#index_season_digits_input", Input).value = str(
defaultDigitLengths[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]
)
self.query_one("#index_episode_digits_input", Input).value = str(
defaultDigitLengths[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]
)
self.query_one("#indicator_season_digits_input", Input).value = str(
defaultDigitLengths[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]
)
self.query_one("#indicator_episode_digits_input", Input).value = str(
defaultDigitLengths[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]
)
def getSelectedPatternDescriptor(self):
@@ -193,10 +233,7 @@ class ShowDetailsScreen(Screen):
row_key, col_key = self.patternTable.coordinate_to_cell_key(self.patternTable.cursor_coordinate)
if row_key is not None:
selected_row_data = self.patternTable.get_row(row_key)
selectedPattern['show_id'] = self.__showDescriptor.getId()
selectedPattern['pattern'] = str(selected_row_data[0])
selectedPattern = dict(self.__patternRowData.get(row_key, {}))
except CellDoesNotExist:
pass
@@ -215,25 +252,7 @@ class ShowDetailsScreen(Screen):
row_key, col_key = self.shiftedSeasonsTable.coordinate_to_cell_key(self.shiftedSeasonsTable.cursor_coordinate)
if row_key is not None:
selected_row_data = self.shiftedSeasonsTable.get_row(row_key)
shiftedSeasonObj['original_season'] = int(selected_row_data[0])
shiftedSeasonObj['first_episode'] = int(selected_row_data[1]) if selected_row_data[1].isnumeric() else -1
shiftedSeasonObj['last_episode'] = int(selected_row_data[2]) if selected_row_data[2].isnumeric() else -1
shiftedSeasonObj['season_offset'] = int(selected_row_data[3]) if selected_row_data[3].isnumeric() else 0
shiftedSeasonObj['episode_offset'] = int(selected_row_data[4]) if selected_row_data[4].isnumeric() else 0
if self.__showDescriptor is not None:
showId = int(self.__showDescriptor.getId())
shiftedSeasonId = self.__ssc.findShiftedSeason(showId,
originalSeason=shiftedSeasonObj['original_season'],
firstEpisode=shiftedSeasonObj['first_episode'],
lastEpisode=shiftedSeasonObj['last_episode'])
if shiftedSeasonId is not None:
shiftedSeasonObj['id'] = shiftedSeasonId
shiftedSeasonObj = dict(self.__shiftedSeasonRowData.get(row_key, {}))
except CellDoesNotExist:
pass
@@ -247,9 +266,14 @@ class ShowDetailsScreen(Screen):
def handle_add_pattern(self, screenResult):
if screenResult is None:
return
pattern = (screenResult['pattern'],)
self.patternTable.add_row(*map(str, pattern))
pattern_id = self.__pc.findPattern(screenResult)
self._add_pattern_row(
pattern_id=pattern_id,
pattern_text=screenResult['pattern'],
)
def action_edit_pattern(self):
@@ -257,8 +281,7 @@ class ShowDetailsScreen(Screen):
selectedPatternDescriptor = self.getSelectedPatternDescriptor()
if selectedPatternDescriptor:
selectedPatternId = self.__pc.findPattern(selectedPatternDescriptor)
selectedPatternId = selectedPatternDescriptor.get('id')
if selectedPatternId is None:
raise click.ClickException(f"ShowDetailsScreen.action_edit_pattern(): Pattern to edit has no id")
@@ -272,6 +295,8 @@ class ShowDetailsScreen(Screen):
row_key, col_key = self.patternTable.coordinate_to_cell_key(self.patternTable.cursor_coordinate)
self.patternTable.update_cell(row_key, self.column_key_pattern, screenResult['pattern'])
if row_key in self.__patternRowData:
self.__patternRowData[row_key]['pattern'] = str(screenResult['pattern'])
except CellDoesNotExist:
pass
@@ -283,7 +308,7 @@ class ShowDetailsScreen(Screen):
if selectedPatternDescriptor:
selectedPatternId = self.__pc.findPattern(selectedPatternDescriptor)
selectedPatternId = selectedPatternDescriptor.get('id')
if selectedPatternId is None:
raise click.ClickException(f"ShowDetailsScreen.action_remove_pattern(): Pattern to remove has no id")
@@ -296,6 +321,7 @@ class ShowDetailsScreen(Screen):
try:
row_key, col_key = self.patternTable.coordinate_to_cell_key(self.patternTable.cursor_coordinate)
self.patternTable.remove_row(row_key)
self.__patternRowData.pop(row_key, None)
except CellDoesNotExist:
pass
@@ -307,18 +333,18 @@ class ShowDetailsScreen(Screen):
self.patternTable = DataTable(classes="five")
# Define the columns with headers
self.column_key_pattern = self.patternTable.add_column("Pattern", width=150)
self.column_key_pattern = add_auto_table_column(self.patternTable, t("Pattern"))
self.patternTable.cursor_type = 'row'
self.shiftedSeasonsTable = DataTable(classes="five")
self.column_key_original_season = self.shiftedSeasonsTable.add_column("Original Season", width=30)
self.column_key_first_episode = self.shiftedSeasonsTable.add_column("First Episode", width=30)
self.column_key_last_episode = self.shiftedSeasonsTable.add_column("Last Episode", width=30)
self.column_key_season_offset = self.shiftedSeasonsTable.add_column("Season Offset", width=30)
self.column_key_episode_offset = self.shiftedSeasonsTable.add_column("Episode Offset", width=30)
self.column_key_original_season = add_auto_table_column(self.shiftedSeasonsTable, t("Source Season"))
self.column_key_first_episode = add_auto_table_column(self.shiftedSeasonsTable, t("First Episode"))
self.column_key_last_episode = add_auto_table_column(self.shiftedSeasonsTable, t("Last Episode"))
self.column_key_season_offset = add_auto_table_column(self.shiftedSeasonsTable, t("Season Offset"))
self.column_key_episode_offset = add_auto_table_column(self.shiftedSeasonsTable, t("Episode Offset"))
self.shiftedSeasonsTable.cursor_type = 'row'
@@ -327,82 +353,97 @@ class ShowDetailsScreen(Screen):
with Grid():
# 1
yield Static("Show" if not self.__showDescriptor is None else "New Show", id="toplabel")
yield Button("Identify", id="identify_button")
# Row 1
yield Static(t("Show") if not self.__showDescriptor is None else t("New Show"), id="toplabel")
yield Button(t("Identify"), id="identify_button")
yield Static(" ", classes="three")
# 2
yield Static("ID")
# Row 2
yield Static(t("ID"))
if not self.__showDescriptor is None:
yield Static("", id="id_static", classes="four")
else:
yield Input(type="integer", id="id_input", classes="four")
# 3
yield Static("Name")
# Row 3
yield Static(t("Name"))
yield Input(type="text", id="name_input", classes="four")
# 4
yield Static("Year")
# Row 4
yield Static(t("Year"))
yield Input(type="integer", id="year_input", classes="four")
#5
# Row 5
yield Static(t("Quality"))
yield Input(type="integer", id="quality_input", classes="four")
# Row 6
yield Static(t("Notes"))
yield Static(" ", classes="four")
# Row 7
yield TextArea(id="notes_textarea", classes="five note_box")
# Row 8
yield Static(" ", classes="five")
#6
yield Static("Index Season Digits")
# Row 9
yield Static(t("Index Season Digits"))
yield Input(type="integer", id="index_season_digits_input", classes="four")
#7
yield Static("Index Episode Digits")
# Row 10
yield Static(t("Index Episode Digits"))
yield Input(type="integer", id="index_episode_digits_input", classes="four")
#8
yield Static("Indicator Season Digits")
# Row 11
yield Static(t("Indicator Season Digits"))
yield Input(type="integer", id="indicator_season_digits_input", classes="four")
#9
yield Static("Indicator Edisode Digits")
# Row 12
yield Static(t("Indicator Edisode Digits"))
yield Input(type="integer", id="indicator_episode_digits_input", classes="four")
# 10
# Row 13
yield Static(" ", classes="five")
# 11
yield Static("Shifted seasons", classes="two")
# Row 14
yield Static(t("Numbering Mapping"))
if self.__showDescriptor is not None:
yield Button("Add", id="button_add_shifted_season")
yield Button("Edit", id="button_edit_shifted_season")
yield Button("Delete", id="button_delete_shifted_season")
yield Button(t("Add"), id="button_add_shifted_season")
yield Button(t("Edit"), id="button_edit_shifted_season")
yield Button(t("Delete"), id="button_delete_shifted_season")
else:
yield Static(" ")
yield Static(" ")
yield Static(" ")
# 12
yield Static(" ")
# Row 15
yield self.shiftedSeasonsTable
# 13
yield Static("File patterns", classes="five")
# 14
# Row 16
yield Static(t("File patterns"), classes="five")
# Row 17
yield self.patternTable
# 15
# Row 18
yield Static(" ", classes="five")
# 16
yield Button("Save", id="save_button")
yield Button("Cancel", id="cancel_button")
# Row 19
yield Button(t("Save"), id="save_button")
yield Button(t("Cancel"), id="cancel_button")
yield build_screen_log_pane()
yield Footer()
def getShowDescriptorFromInput(self) -> ShowDescriptor:
kwargs = {}
kwargs = {ShowDescriptor.CONTEXT_KEY: self.context}
try:
if self.__showDescriptor:
@@ -438,13 +479,18 @@ class ShowDetailsScreen(Screen):
kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY] = int(self.query_one("#indicator_episode_digits_input", Input).value)
except ValueError:
pass
try:
kwargs[ShowDescriptor.QUALITY_KEY] = int(self.query_one("#quality_input", Input).value)
except ValueError:
pass
kwargs[ShowDescriptor.NOTES_KEY] = str(self.query_one("#notes_textarea", TextArea).text)
return ShowDescriptor(**kwargs)
# Event handler for button press
def on_button_pressed(self, event: Button.Pressed) -> None:
# Check if the button pressed is the one we are interested in
if event.button.id == "save_button":
showDescriptor = self.getShowDescriptorFromInput()
@@ -489,4 +535,7 @@ class ShowDetailsScreen(Screen):
self.updateShiftedSeasons()
def handle_delete_shifted_season(self, screenResult):
self.updateShiftedSeasons()
self.updateShiftedSeasons()
def action_back(self):
go_back_or_exit(self)

View File

@@ -1,8 +1,16 @@
from textual.screen import Screen
from textual.widgets import Header, Footer, Static, DataTable
from textual.containers import Grid
from rich.text import Text
from .i18n import t
from .show_controller import ShowController
from .screen_support import (
add_auto_table_column,
build_screen_log_pane,
go_back_or_exit,
update_table_column_label,
)
from .show_details_screen import ShowDetailsScreen
from .show_delete_screen import ShowDeleteScreen
@@ -21,7 +29,10 @@ class ShowsScreen(Screen):
grid-rows: 2 auto;
height: 100%;
width: 100%;
min-width: 80;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}
DataTable .datatable--cursor {
@@ -49,12 +60,17 @@ class ShowsScreen(Screen):
height: 100%;
border: solid green;
}
DataTable {
width: 100%;
}
"""
BINDINGS = [
("e", "edit_show", "Edit Show"),
("n", "new_show", "New Show"),
("d", "delete_show", "Delete Show"),
("escape", "back", t("Back")),
("e", "edit_show", t("Edit Show")),
("n", "new_show", t("New Show")),
("d", "delete_show", t("Delete Show")),
]
@@ -66,6 +82,78 @@ class ShowsScreen(Screen):
self.Session = self.context['database']['session'] # convenience
self.__sc = ShowController(context = self.context)
self.__showRowData: dict[object, ShowDescriptor] = {}
self.__sortColumnKey = None
self.__sortReverse = False
self.__columnLabels: dict[object, str] = {}
def _add_show_row(self, show_descriptor: ShowDescriptor):
row_key = self.table.add_row(
str(show_descriptor.getId()),
str(show_descriptor.getName()),
str(show_descriptor.getYear()),
)
self.__showRowData[row_key] = show_descriptor
return row_key
def _get_selected_row_key(self):
try:
row_key, _ = self.table.coordinate_to_cell_key(self.table.cursor_coordinate)
return row_key
except CellDoesNotExist:
return None
def _move_cursor_to_row_key(self, row_key):
if row_key is None:
return
try:
row_index = int(self.table.get_row_index(row_key))
except Exception:
return
self.table.move_cursor(row=row_index)
def _sort_key_for_column(self, column_key):
if column_key == self.column_key_id:
return lambda value: int(value)
if column_key == self.column_key_year:
return lambda value: int(value)
if column_key == self.column_key_name:
return lambda value: str(value).casefold()
return None
def _update_header_labels(self):
if not hasattr(self, "table"):
return
arrow_up = ""
arrow_down = ""
for column_key, base_label in self.__columnLabels.items():
column = self.table.columns.get(column_key)
if column is None:
continue
label_text = base_label
if column_key == self.__sortColumnKey:
label_text = f"{base_label} {arrow_down if self.__sortReverse else arrow_up}"
update_table_column_label(self.table, column_key, Text(label_text))
def _apply_sort(self, *, preserve_row_key=None):
if self.__sortColumnKey is None:
self._update_header_labels()
return
self.table.sort(
self.__sortColumnKey,
key=self._sort_key_for_column(self.__sortColumnKey),
reverse=self.__sortReverse,
)
self._move_cursor_to_row_key(preserve_row_key)
self._update_header_labels()
def getSelectedShowId(self):
@@ -76,13 +164,29 @@ class ShowsScreen(Screen):
row_key, col_key = self.table.coordinate_to_cell_key(self.table.cursor_coordinate)
if row_key is not None:
selected_row_data = self.table.get_row(row_key)
return selected_row_data[0]
selected_show = self.__showRowData.get(row_key)
return selected_show.getId() if selected_show is not None else None
except CellDoesNotExist:
return None
def action_back(self):
go_back_or_exit(self)
def on_data_table_header_selected(self, event: DataTable.HeaderSelected) -> None:
if event.data_table is not self.table:
return
selected_row_key = self._get_selected_row_key()
if self.__sortColumnKey == event.column_key:
self.__sortReverse = not self.__sortReverse
else:
self.__sortColumnKey = event.column_key
self.__sortReverse = False
self._apply_sort(preserve_row_key=selected_row_key)
@@ -90,9 +194,9 @@ class ShowsScreen(Screen):
self.app.push_screen(ShowDetailsScreen(), self.handle_new_screen)
def handle_new_screen(self, screenResult):
show = (screenResult['id'], screenResult['name'], screenResult['year'])
self.table.add_row(*map(str, show))
if isinstance(screenResult, ShowDescriptor):
row_key = self._add_show_row(screenResult)
self._apply_sort(preserve_row_key=row_key)
def action_edit_show(self):
@@ -110,7 +214,9 @@ class ShowsScreen(Screen):
row_key, col_key = self.table.coordinate_to_cell_key(self.table.cursor_coordinate)
self.table.update_cell(row_key, self.column_key_name, showDescriptor.getName())
self.table.update_cell(row_key, self.column_key_year, showDescriptor.getYear())
self.table.update_cell(row_key, self.column_key_year, showDescriptor.getYear())
self.__showRowData[row_key] = showDescriptor
self._apply_sort(preserve_row_key=row_key)
except CellDoesNotExist:
pass
@@ -131,15 +237,22 @@ class ShowsScreen(Screen):
try:
row_key, col_key = self.table.coordinate_to_cell_key(self.table.cursor_coordinate)
self.table.remove_row(row_key)
self.__showRowData.pop(row_key, None)
except CellDoesNotExist:
pass
def on_mount(self) -> None:
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
for show in self.__sc.getAllShows():
row = (int(show.id), show.name, show.year) # Convert each element to a string before adding
self.table.add_row(*map(str, row))
self._add_show_row(show.getDescriptor(self.context))
self.__sortColumnKey = self.column_key_name
self._apply_sort()
def compose(self):
@@ -148,18 +261,31 @@ class ShowsScreen(Screen):
self.table = DataTable()
# Define the columns with headers
self.column_key_id = self.table.add_column("ID", width=10)
self.column_key_name = self.table.add_column("Name", width=50)
self.column_key_year = self.table.add_column("Year", width=10)
idLabel = t("ID")
nameLabel = t("Name")
yearLabel = t("Year")
self.column_key_id = add_auto_table_column(self.table, idLabel)
self.column_key_name = add_auto_table_column(self.table, nameLabel)
self.column_key_year = add_auto_table_column(self.table, yearLabel)
self.__columnLabels = {
self.column_key_id: idLabel,
self.column_key_name: nameLabel,
self.column_key_year: yearLabel,
}
self.table.cursor_type = 'row'
yield Header()
with Grid():
# Row 1
yield Static(t("Shows"), markup=False)
yield Static("Shows")
# Row 2
yield self.table
yield Footer()
f = Footer()
f.description = "yolo"
yield build_screen_log_pane()
yield f

View File

@@ -67,10 +67,11 @@ class TagController():
try:
s = self.Session()
q = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId),
MediaTag.key == str(tagKey))
if q.count():
tag = q.first()
tag = s.query(MediaTag).filter(
MediaTag.pattern_id == int(patternId),
MediaTag.key == str(tagKey),
).first()
if tag is not None:
s.delete(tag)
s.commit()
return True
@@ -107,12 +108,8 @@ class TagController():
try:
s = self.Session()
q = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId))
if q.count():
return {t.key:t.value for t in q.all()}
else:
return {}
tags = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId)).all()
return {t.key:t.value for t in tags}
except Exception as ex:
raise click.ClickException(f"TagController.findAllMediaTags(): {repr(ex)}")
@@ -125,12 +122,8 @@ class TagController():
try:
s = self.Session()
q = s.query(TrackTag).filter(TrackTag.track_id == int(trackId))
if q.count():
return {t.key:t.value for t in q.all()}
else:
return {}
tags = s.query(TrackTag).filter(TrackTag.track_id == int(trackId)).all()
return {t.key:t.value for t in tags}
except Exception as ex:
raise click.ClickException(f"TagController.findAllTracks(): {repr(ex)}")
@@ -142,12 +135,7 @@ class TagController():
try:
s = self.Session()
q = s.query(Track).filter(MediaTag.track_id == int(trackId), MediaTag.key == str(trackKey))
if q.count():
return q.first()
else:
return None
return s.query(Track).filter(MediaTag.track_id == int(trackId), MediaTag.key == str(trackKey)).first()
except Exception as ex:
raise click.ClickException(f"TagController.findMediaTag(): {repr(ex)}")
@@ -158,12 +146,10 @@ class TagController():
try:
s = self.Session()
q = s.query(TrackTag).filter(TrackTag.track_id == int(trackId), TrackTag.key == str(tagKey))
if q.count():
return q.first()
else:
return None
return s.query(TrackTag).filter(
TrackTag.track_id == int(trackId),
TrackTag.key == str(tagKey),
).first()
except Exception as ex:
raise click.ClickException(f"TagController.findTrackTag(): {repr(ex)}")
@@ -175,11 +161,9 @@ class TagController():
def deleteMediaTag(self, tagId) -> bool:
try:
s = self.Session()
q = s.query(MediaTag).filter(MediaTag.id == int(tagId))
tag = s.query(MediaTag).filter(MediaTag.id == int(tagId)).first()
if q.count():
tag = q.first()
if tag is not None:
s.delete(tag)
@@ -201,11 +185,9 @@ class TagController():
try:
s = self.Session()
q = s.query(TrackTag).filter(TrackTag.id == int(tagId))
tag = s.query(TrackTag).filter(TrackTag.id == int(tagId)).first()
if q.count():
tag = q.first()
if tag is not None:
s.delete(tag)

View File

@@ -2,19 +2,29 @@ from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button
from textual.containers import Grid
from .i18n import t
from .screen_support import build_screen_log_pane, go_back_or_exit
# Screen[dict[int, str, int]]
class TagDeleteScreen(Screen):
BINDINGS = [
("escape", "back", t("Back")),
]
CSS = """
Grid {
grid-size: 4 9;
grid-rows: 2 2 2 2 2 2 2 2 2;
grid-columns: 30 30 30 30;
grid-columns: 18 1fr 1fr 1fr;
height: 100%;
width: 100%;
min-width: 90;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}
Input {
@@ -54,6 +64,9 @@ class TagDeleteScreen(Screen):
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
self.query_one("#keylabel", Static).update(str(self.__key))
self.query_one("#valuelabel", Static).update(str(self.__value))
@@ -64,24 +77,25 @@ class TagDeleteScreen(Screen):
with Grid():
#1
yield Static(f"Are you sure to delete this tag ?", id="toplabel", classes="five")
# Row 1
yield Static(t("Are you sure to delete this tag?"), id="toplabel", classes="five")
#2
yield Static("Key")
# Row 2
yield Static(t("Key"))
yield Static(" ", id="keylabel", classes="four")
#3
yield Static("Value")
# Row 3
yield Static(t("Value"))
yield Static(" ", id="valuelabel", classes="four")
#4
# Row 4
yield Static(" ", classes="five")
#9
yield Button("Delete", id="delete_button")
yield Button("Cancel", id="cancel_button")
# Row 5
yield Button(t("Delete"), id="delete_button")
yield Button(t("Cancel"), id="cancel_button")
yield build_screen_log_pane()
yield Footer()
@@ -90,9 +104,11 @@ class TagDeleteScreen(Screen):
if event.button.id == "delete_button":
tag = (self.__key, self.__value)
tag = (self.__key, self.__value)
self.dismiss(tag)
if event.button.id == "cancel_button":
self.app.pop_screen()
def action_back(self):
go_back_or_exit(self)

View File

@@ -2,19 +2,29 @@ from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button, Input
from textual.containers import Grid
from .i18n import t
from .screen_support import build_screen_log_pane, go_back_or_exit
# Screen[dict[int, str, int]]
class TagDetailsScreen(Screen):
BINDINGS = [
("escape", "back", t("Back")),
]
CSS = """
Grid {
grid-size: 5 20;
grid-rows: 2 2 2 2 2 3 2 2 2 2 2 6 2 2 6 2 2 2 2 6;
grid-columns: 25 25 25 25 225;
grid-columns: 18 1fr 1fr 1fr 5fr;
height: 100%;
width: 100%;
min-width: 100;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}
Input {
@@ -77,6 +87,9 @@ class TagDetailsScreen(Screen):
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
if self.__key is not None:
self.query_one("#key_input", Input).value = str(self.__key)
@@ -90,26 +103,28 @@ class TagDetailsScreen(Screen):
with Grid():
# 8
yield Static("Key")
# Row 1
yield Static(t("Key"))
yield Input(id="key_input", classes="four")
yield Static("Value")
# Row 2
yield Static(t("Value"))
yield Input(id="value_input", classes="four")
# 17
# Row 3
yield Static(" ", classes="five")
# 18
yield Button("Save", id="save_button")
yield Button("Cancel", id="cancel_button")
# Row 4
yield Button(t("Save"), id="save_button")
yield Button(t("Cancel"), id="cancel_button")
# 19
# Row 5
yield Static(" ", classes="five")
# 20
# Row 6
yield Static(" ", classes="five", id="messagestatic")
yield build_screen_log_pane()
yield Footer(id="footer")
@@ -120,6 +135,9 @@ class TagDetailsScreen(Screen):
return (tagKey, tagValue)
def action_back(self):
go_back_or_exit(self)
# Event handler for button press
def on_button_pressed(self, event: Button.Pressed) -> None:

View File

@@ -1,6 +1,8 @@
import os, requests, time, logging
import os, requests, time
from datetime import datetime
from .logging_utils import get_ffx_logger
class TMDB_REQUEST_EXCEPTION(Exception):
def __init__(self, statusCode, statusMessage):
@@ -27,8 +29,7 @@ class TmdbController():
self.__context = context
if context is None:
self.__logger = logging.getLogger('FFX')
self.__logger.addHandler(logging.NullHandler())
self.__logger = get_ffx_logger()
else:
self.__logger = context['logger']

View File

@@ -3,15 +3,28 @@ from enum import Enum
class TrackCodec(Enum):
H265 = {'identifier': 'hevc', 'format': 'h265', 'extension': 'h265' ,'label': 'H.265'}
H264 = {'identifier': 'h264', 'format': 'h264', 'extension': 'h264' ,'label': 'H.264'}
AAC = {'identifier': 'aac', 'format': None, 'extension': 'aac' , 'label': 'AAC'}
AC3 = {'identifier': 'ac3', 'format': 'ac3', 'extension': 'ac3' , 'label': 'AC3'}
DTS = {'identifier': 'dts', 'format': 'dts', 'extension': 'dts' , 'label': 'DTS'}
ASS = {'identifier': 'ass', 'format': 'ass', 'extension': 'ass' , 'label': 'ASS'}
PGS = {'identifier': 'hdmv_pgs_subtitle', 'format': 'sup', 'extension': 'sup' , 'label': 'PGS'}
VP9 = {'identifier': 'vp9', 'format': 'ivf', 'extension': 'ivf' , 'label': 'VP9'}
H265 = {'identifier': 'hevc', 'format': None, 'extension': 'h265' ,'label': 'H.265'}
H264 = {'identifier': 'h264', 'format': 'h264', 'extension': 'h264' ,'label': 'H.264'}
MPEG4 = {'identifier': 'mpeg4', 'format': 'm4v', 'extension': 'm4v' ,'label': 'MPEG-4'}
MPEG2 = {'identifier': 'mpeg2video', 'format': 'mpeg2video', 'extension': 'mpg' ,'label': 'MPEG-2'}
UNKNOWN = {'identifier': 'unknown', 'format': None, 'extension': None, 'label': 'UNKNOWN'}
OPUS = {'identifier': 'opus', 'format': 'opus', 'extension': 'opus' , 'label': 'Opus'}
AAC = {'identifier': 'aac', 'format': None, 'extension': 'aac' , 'label': 'AAC'}
AC3 = {'identifier': 'ac3', 'format': 'ac3', 'extension': 'ac3' , 'label': 'AC3'}
EAC3 = {'identifier': 'eac3', 'format': 'eac3', 'extension': 'eac3' , 'label': 'EAC3'}
DTS = {'identifier': 'dts', 'format': 'dts', 'extension': 'dts' , 'label': 'DTS'}
MP3 = {'identifier': 'mp3', 'format': 'mp3', 'extension': 'mp3' , 'label': 'MP3'}
WEBVTT = {'identifier': 'webvtt', 'format': 'webvtt', 'extension': 'vtt' , 'label': 'WebVTT'}
SRT = {'identifier': 'subrip', 'format': 'srt', 'extension': 'srt' , 'label': 'SRT'}
ASS = {'identifier': 'ass', 'format': 'ass', 'extension': 'ass' , 'label': 'ASS'}
PGS = {'identifier': 'hdmv_pgs_subtitle', 'format': 'sup', 'extension': 'sup' , 'label': 'PGS'}
VOBSUB = {'identifier': 'dvd_subtitle', 'format': None, 'extension': 'mkv' , 'label': 'VobSub'}
PNG = {'identifier': 'png', 'format': None, 'extension': 'png' , 'label': 'PNG'}
UNKNOWN = {'identifier': 'unknown', 'format': None, 'extension': None, 'label': 'UNKNOWN'}
def identifier(self):
@@ -23,8 +36,8 @@ class TrackCodec(Enum):
return str(self.value['label'])
def format(self):
"""Returns the codec as single letter"""
return str(self.value['format'])
"""Returns the codec """
return self.value['format']
def extension(self):
"""Returns the corresponding extension"""

View File

@@ -19,6 +19,20 @@ class TrackController():
self.context = context
self.Session = self.context['database']['session'] # convenience
self.__configurationData = self.context['config'].getData()
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
if 'streams' in metadataConfiguration.keys()
and 'remove' in metadataConfiguration['streams'].keys() else [])
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
if 'streams' in metadataConfiguration.keys()
and 'ignore' in metadataConfiguration['streams'].keys() else [])
def addTrack(self, trackDescriptor : TrackDescriptor, patternId = None):
@@ -29,7 +43,7 @@ class TrackController():
s = self.Session()
track = Track(pattern_id = patId,
track_type = int(trackDescriptor.getType().index()),
codec_name = str(trackDescriptor.getCodec().identifier()),
codec_name = str(trackDescriptor.getFormatDescriptor().identifier()),
index = int(trackDescriptor.getIndex()),
source_index = int(trackDescriptor.getSourceIndex()),
disposition_flags = int(TrackDisposition.toFlags(trackDescriptor.getDispositionSet())),
@@ -40,10 +54,12 @@ class TrackController():
for k,v in trackDescriptor.getTags().items():
tag = TrackTag(track_id = track.id,
key = k,
value = v)
s.add(tag)
# Filter tags that make no sense to preserve
if k not in self.__ignoreTrackKeys and k not in self.__removeTrackKeys:
tag = TrackTag(track_id = track.id,
key = k,
value = v)
s.add(tag)
s.commit()
except Exception as ex:
@@ -59,16 +75,14 @@ class TrackController():
try:
s = self.Session()
q = s.query(Track).filter(Track.id == int(trackId))
track = s.query(Track).filter(Track.id == int(trackId)).first()
if q.count():
track : Track = q.first()
if track is not None:
track.index = int(trackDescriptor.getIndex())
track.track_type = int(trackDescriptor.getType().index())
track.codec_name = str(trackDescriptor.getCodec().identifier())
track.codec_name = str(trackDescriptor.getFormatDescriptor().identifier())
track.audio_layout = int(trackDescriptor.getAudioLayout().index())
track.disposition_flags = int(TrackDisposition.toFlags(trackDescriptor.getDispositionSet()))
@@ -177,12 +191,10 @@ class TrackController():
try:
s = self.Session()
q = s.query(Track).filter(Track.pattern_id == int(patternId), Track.index == int(index))
if q.count():
return q.first()
else:
return None
return s.query(Track).filter(
Track.pattern_id == int(patternId),
Track.index == int(index),
).first()
except Exception as ex:
raise click.ClickException(f"TrackController.getTrack(): {repr(ex)}")
@@ -202,11 +214,9 @@ class TrackController():
try:
s = self.Session()
q = s.query(Track).filter(Track.pattern_id == patternId, Track.index == index)
track = s.query(Track).filter(Track.pattern_id == patternId, Track.index == index).first()
if q.count():
track : Track = q.first()
if track is not None:
if state:
track.setDisposition(disposition)
@@ -228,15 +238,21 @@ class TrackController():
try:
s = self.Session()
q = s.query(Track).filter(Track.id == int(trackId))
track = s.query(Track).filter(Track.id == int(trackId)).first()
if q.count():
patternId = int(q.first().pattern_id)
if track is not None:
patternId = int(track.pattern_id)
q_siblings = s.query(Track).filter(Track.pattern_id == patternId).order_by(Track.index)
siblingTracks = q_siblings.all()
if len(siblingTracks) <= 1:
raise click.ClickException(
f"Cannot delete the last track from pattern #{patternId}. Patterns must define at least one track."
)
index = 0
for track in q_siblings.all():
for track in siblingTracks:
if track.id == int(trackId):
s.delete(track)

View File

@@ -5,22 +5,29 @@ from textual.widgets import Header, Footer, Static, Button
from textual.containers import Grid
from ffx.track_descriptor import TrackDescriptor
from .track_controller import TrackController
from .i18n import t
from .screen_support import build_screen_log_pane, go_back_or_exit
# Screen[dict[int, str, int]]
class TrackDeleteScreen(Screen):
BINDINGS = [
("escape", "back", t("Back")),
]
CSS = """
Grid {
grid-size: 4 9;
grid-rows: 2 2 2 2 2 2 2 2 2;
grid-columns: 30 30 30 30;
grid-columns: 18 1fr 1fr 1fr;
height: 100%;
width: 100%;
min-width: 90;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}
Input {
@@ -52,19 +59,17 @@ class TrackDeleteScreen(Screen):
def __init__(self, trackDescriptor : TrackDescriptor):
super().__init__()
self.context = self.app.getContext()
self.Session = self.context['database']['session'] # convenience
if type(trackDescriptor) is not TrackDescriptor:
raise click.ClickException('TrackDeleteScreen.init(): trackDescriptor is required to be of type TrackDescriptor')
self.__tc = TrackController(context = self.context)
self.__trackDescriptor = trackDescriptor
def on_mount(self):
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
self.query_one("#subindexlabel", Static).update(str(self.__trackDescriptor.getSubIndex()))
self.query_one("#patternlabel", Static).update(str(self.__trackDescriptor.getPatternId()))
self.query_one("#languagelabel", Static).update(str(self.__trackDescriptor.getLanguage().label()))
@@ -77,38 +82,46 @@ class TrackDeleteScreen(Screen):
with Grid():
#1
yield Static(f"Are you sure to delete the following {self.__trackDescriptor.getType().label()} track?", id="toplabel", classes="four")
# Row 1
yield Static(
t(
"Are you sure to delete the following {track_type} track?",
track_type=t(self.__trackDescriptor.getType().label()),
),
id="toplabel",
classes="four",
)
#2
yield Static("sub index")
# Row 2
yield Static(t("sub index"))
yield Static(" ", id="subindexlabel", classes="three")
#3
yield Static("from pattern")
# Row 3
yield Static(t("from pattern"))
yield Static(" ", id="patternlabel", classes="three")
#4
# Row 4
yield Static(" ", classes="four")
#5
yield Static("Language")
# Row 5
yield Static(t("Language"))
yield Static(" ", id="languagelabel", classes="three")
#6
yield Static("Title")
# Row 6
yield Static(t("Title"))
yield Static(" ", id="titlelabel", classes="three")
#7
# Row 7
yield Static(" ", classes="four")
#8
# Row 8
yield Static(" ", classes="four")
#9
yield Button("Delete", id="delete_button")
yield Button("Cancel", id="cancel_button")
# Row 9
yield Button(t("Delete"), id="delete_button")
yield Button(t("Cancel"), id="cancel_button")
yield build_screen_log_pane()
yield Footer()
@@ -116,21 +129,10 @@ class TrackDeleteScreen(Screen):
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button.id == "delete_button":
track = self.__tc.getTrack(self.__trackDescriptor.getPatternId(), self.__trackDescriptor.getIndex())
if track is None:
raise click.ClickException(f"Track is none: patternId={self.__trackDescriptor.getPatternId()} type={self.__trackDescriptor.getType()} subIndex={self.__trackDescriptor.getSubIndex()}")
if track is not None:
if self.__tc.deleteTrack(track.getId()):
self.dismiss(self.__trackDescriptor)
else:
#TODO: Meldung
self.app.pop_screen()
self.dismiss(self.__trackDescriptor)
if event.button.id == "cancel_button":
self.app.pop_screen()
def action_back(self):
go_back_or_exit(self)

View File

@@ -1,13 +1,14 @@
import logging
from typing import Self
from .attachment_format import AttachmentFormat
from .iso_language import IsoLanguage
from .track_type import TrackType
from .audio_layout import AudioLayout
from .track_disposition import TrackDisposition
from .track_codec import TrackCodec
from .logging_utils import get_ffx_logger
from .helper import dictDiff, setDiff
# from .helper import dictDiff, setDiff
class TrackDescriptor:
@@ -26,6 +27,7 @@ class TrackDescriptor:
TRACK_TYPE_KEY = "track_type"
CODEC_KEY = "codec_name"
ATTACHMENT_FORMAT_KEY = "attachment_format"
AUDIO_LAYOUT_KEY = "audio_layout"
FFPROBE_INDEX_KEY = "index"
@@ -33,8 +35,7 @@ class TrackDescriptor:
FFPROBE_TAGS_KEY = "tags"
FFPROBE_CODEC_TYPE_KEY = "codec_type"
FFPROBE_CODEC_KEY = "codec_name"
CODEC_PGS = 'hdmv_pgs_subtitle'
def __init__(self, **kwargs):
@@ -47,8 +48,7 @@ class TrackDescriptor:
self.__logger = self.__context['logger']
else:
self.__context = {}
self.__logger = logging.getLogger('FFX')
self.__logger.addHandler(logging.NullHandler())
self.__logger = get_ffx_logger()
if TrackDescriptor.ID_KEY in kwargs.keys():
if type(kwargs[TrackDescriptor.ID_KEY]) is not int:
@@ -112,15 +112,6 @@ class TrackDescriptor:
else:
self.__trackType = TrackType.UNKNOWN
if TrackDescriptor.CODEC_KEY in kwargs.keys():
if type(kwargs[TrackDescriptor.CODEC_KEY]) is not TrackCodec:
raise TypeError(
f"TrackDesciptor.__init__(): Argument {TrackDescriptor.CODEC_KEY} is required to be of type TrackCodec"
)
self.__trackCodec = kwargs[TrackDescriptor.CODEC_KEY]
else:
self.__trackCodec = TrackCodec.UNKNOWN
if TrackDescriptor.TAGS_KEY in kwargs.keys():
if type(kwargs[TrackDescriptor.TAGS_KEY]) is not dict:
raise TypeError(
@@ -153,6 +144,34 @@ class TrackDescriptor:
else:
self.__audioLayout = AudioLayout.LAYOUT_UNDEFINED
self.__trackCodec = TrackCodec.UNKNOWN
self.__attachmentFormat = AttachmentFormat.UNKNOWN
if self.__trackType == TrackType.ATTACHMENT:
if TrackDescriptor.ATTACHMENT_FORMAT_KEY in kwargs.keys():
if type(kwargs[TrackDescriptor.ATTACHMENT_FORMAT_KEY]) is not AttachmentFormat:
raise TypeError(
f"TrackDesciptor.__init__(): Argument {TrackDescriptor.ATTACHMENT_FORMAT_KEY} is required to be of type AttachmentFormat"
)
self.__attachmentFormat = kwargs[TrackDescriptor.ATTACHMENT_FORMAT_KEY]
elif TrackDescriptor.CODEC_KEY in kwargs.keys():
legacyCodec = kwargs[TrackDescriptor.CODEC_KEY]
if type(legacyCodec) is AttachmentFormat:
self.__attachmentFormat = legacyCodec
elif type(legacyCodec) is TrackCodec:
self.__attachmentFormat = AttachmentFormat.fromTrackCodec(legacyCodec)
else:
raise TypeError(
f"TrackDesciptor.__init__(): Argument {TrackDescriptor.CODEC_KEY} is required to be of type TrackCodec for legacy attachment compatibility"
)
else:
if TrackDescriptor.CODEC_KEY in kwargs.keys():
if type(kwargs[TrackDescriptor.CODEC_KEY]) is not TrackCodec:
raise TypeError(
f"TrackDesciptor.__init__(): Argument {TrackDescriptor.CODEC_KEY} is required to be of type TrackCodec"
)
self.__trackCodec = kwargs[TrackDescriptor.CODEC_KEY]
@classmethod
def fromFfprobe(cls, streamObj, subIndex: int = -1):
"""Processes ffprobe stream data as array with elements according to the following example
@@ -217,7 +236,12 @@ class TrackDescriptor:
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = trackType
kwargs[TrackDescriptor.CODEC_KEY] = TrackCodec.identify(streamObj[TrackDescriptor.FFPROBE_CODEC_KEY])
if trackType == TrackType.ATTACHMENT:
kwargs[TrackDescriptor.ATTACHMENT_FORMAT_KEY] = AttachmentFormat.identifyFfprobeStream(streamObj)
else:
kwargs[TrackDescriptor.CODEC_KEY] = TrackCodec.identify(
streamObj.get(TrackDescriptor.FFPROBE_CODEC_KEY)
)
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = (
{
@@ -279,6 +303,14 @@ class TrackDescriptor:
def getCodec(self) -> TrackCodec:
return self.__trackCodec
def getAttachmentFormat(self) -> AttachmentFormat:
return self.__attachmentFormat
def getFormatDescriptor(self):
if self.__trackType == TrackType.ATTACHMENT:
return self.__attachmentFormat
return self.__trackCodec
def getLanguage(self):
if "language" in self.__trackTags.keys():
return IsoLanguage.findThreeLetter(self.__trackTags["language"])
@@ -321,27 +353,53 @@ class TrackDescriptor:
else:
self.__dispositionSet.discard(disposition)
def compare(self, vsTrackDescriptor: Self):
compareResult = {}
tagsDiffResult = dictDiff(vsTrackDescriptor.getTags(), self.getTags())
if tagsDiffResult:
compareResult[TrackDescriptor.TAGS_KEY] = tagsDiffResult
vsDispositions = vsTrackDescriptor.getDispositionSet()
dispositions = self.getDispositionSet()
dispositionDiffResult = setDiff(vsDispositions, dispositions)
if dispositionDiffResult:
compareResult[TrackDescriptor.DISPOSITION_SET_KEY] = dispositionDiffResult
return compareResult
# def compare(self, vsTrackDescriptor: Self):
#
# compareResult = {}
#
# tagsDiffResult = dictKeysDiff(vsTrackDescriptor.getTags(), self.getTags())
#
# if tagsDiffResult:
# compareResult[TrackDescriptor.TAGS_KEY] = tagsDiffResult
#
# vsDispositions = vsTrackDescriptor.getDispositionSet()
# dispositions = self.getDispositionSet()
#
# dispositionDiffResult = setDiff(vsDispositions, dispositions)
#
# if dispositionDiffResult:
# compareResult[TrackDescriptor.DISPOSITION_SET_KEY] = dispositionDiffResult
#
# return compareResult
def setExternalSourceFilePath(self, filePath: str):
self.__externalSourceFilePath = str(filePath)
def getExternalSourceFilePath(self):
return self.__externalSourceFilePath
def clone(self, context: dict | None = None):
kwargs = {
TrackDescriptor.ID_KEY: int(self.__trackId),
TrackDescriptor.PATTERN_ID_KEY: int(self.__patternId),
TrackDescriptor.EXTERNAL_SOURCE_FILE_PATH_KEY: str(self.__externalSourceFilePath),
TrackDescriptor.INDEX_KEY: int(self.__index),
TrackDescriptor.SOURCE_INDEX_KEY: int(self.__sourceIndex),
TrackDescriptor.SUB_INDEX_KEY: int(self.__subIndex),
TrackDescriptor.TRACK_TYPE_KEY: self.__trackType,
TrackDescriptor.TAGS_KEY: dict(self.__trackTags),
TrackDescriptor.DISPOSITION_SET_KEY: set(self.__dispositionSet),
TrackDescriptor.AUDIO_LAYOUT_KEY: self.__audioLayout,
}
if self.__trackType == TrackType.ATTACHMENT:
kwargs[TrackDescriptor.ATTACHMENT_FORMAT_KEY] = self.__attachmentFormat
else:
kwargs[TrackDescriptor.CODEC_KEY] = self.__trackCodec
if context is not None:
kwargs[TrackDescriptor.CONTEXT_KEY] = context
elif self.__context:
kwargs[TrackDescriptor.CONTEXT_KEY] = self.__context
return TrackDescriptor(**kwargs)

View File

@@ -3,40 +3,45 @@ import click
from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button, SelectionList, Select, DataTable, Input
from textual.containers import Grid
from ffx.model.pattern import Pattern
from .track_controller import TrackController
from .pattern_controller import PatternController
from .tag_controller import TagController
from .track_type import TrackType
from .track_codec import TrackCodec
from .iso_language import IsoLanguage
from .track_disposition import TrackDisposition
from .audio_layout import AudioLayout
from .track_descriptor import TrackDescriptor
from .tag_details_screen import TagDetailsScreen
from .tag_delete_screen import TagDeleteScreen
from textual.widgets._data_table import CellDoesNotExist
from .attachment_format import AttachmentFormat
from .audio_layout import AudioLayout
from .iso_language import IsoLanguage
from .tag_delete_screen import TagDeleteScreen
from .tag_details_screen import TagDetailsScreen
from .track_codec import TrackCodec
from .track_descriptor import TrackDescriptor
from .track_disposition import TrackDisposition
from .track_type import TrackType
from .i18n import t
from .screen_support import (
add_auto_table_column,
build_screen_bootstrap,
build_screen_log_pane,
go_back_or_exit,
populate_tag_table,
)
# Screen[dict[int, str, int]]
class TrackDetailsScreen(Screen):
BINDINGS = [
("escape", "back", t("Back")),
]
CSS = """
Grid {
grid-size: 5 24;
grid-rows: 2 2 2 2 2 3 3 2 2 3 2 2 2 2 2 6 2 2 6 2 2 2;
grid-columns: 25 25 25 25 125;
grid-columns: 18 1fr 1fr 1fr 4fr;
height: 100%;
width: 100%;
min-width: 115;
padding: 1;
overflow-x: auto;
overflow-y: auto;
}
Input {
@@ -55,6 +60,7 @@ class TrackDetailsScreen(Screen):
DataTable {
min-height: 6;
width: 100%;
}
DataTable .datatable--cursor {
@@ -77,7 +83,7 @@ class TrackDetailsScreen(Screen):
.three {
column-span: 3;
}
.four {
column-span: 4;
}
@@ -95,339 +101,478 @@ class TrackDetailsScreen(Screen):
}
"""
def __init__(self, trackDescriptor : TrackDescriptor = None, patternId = None, trackType : TrackType = None, index = None, subIndex = None):
def __init__(
self,
trackDescriptor: TrackDescriptor = None,
patternId=None,
patternLabel: str = "",
siblingTrackDescriptors=None,
trackType: TrackType = None,
index=None,
subIndex=None,
metadata_only: bool = False,
):
super().__init__()
self.context = self.app.getContext()
self.Session = self.context['database']['session'] # convenience
bootstrap = build_screen_bootstrap(self.app.getContext())
self.context = bootstrap.context
self.__tc = TrackController(context = self.context)
self.__pc = PatternController(context = self.context)
self.__tac = TagController(context = self.context)
self.__removeTrackKeys = bootstrap.remove_track_keys
self.__ignoreTrackKeys = bootstrap.ignore_track_keys
self.__tagRowData: dict[object, tuple[str, str]] = {}
self.__isNew = trackDescriptor is None
self.__trackDescriptor = trackDescriptor
self.__patternId = (
int(patternId)
if patternId is not None
else (
int(trackDescriptor.getPatternId())
if trackDescriptor is not None and trackDescriptor.getPatternId() != -1
else -1
)
)
self.__patternLabel = str(patternLabel)
self.__siblingTrackDescriptors = list(siblingTrackDescriptors or [])
self.__metadataOnly = bool(metadata_only)
self.__applyNormalization = bool(
self.context.get("apply_metadata_normalization", True)
)
if self.__isNew:
self.__trackType = trackType
self.__trackCodec = TrackCodec.UNKNOWN
self.__attachmentFormat = AttachmentFormat.UNKNOWN
self.__audioLayout = AudioLayout.LAYOUT_UNDEFINED
self.__index = index
self.__subIndex = subIndex
self.__trackDescriptor : TrackDescriptor = None
self.__pattern : Pattern = self.__pc.getPattern(patternId) if patternId is not None else {}
self.__draftTrackTags = {}
initial_language = IsoLanguage.UNDEFINED
initial_title = ""
else:
self.__trackType = trackDescriptor.getType()
self.__trackCodec = trackDescriptor.getCodec()
self.__attachmentFormat = trackDescriptor.getAttachmentFormat()
self.__audioLayout = trackDescriptor.getAudioLayout()
self.__index = trackDescriptor.getIndex()
self.__subIndex = trackDescriptor.getSubIndex()
self.__trackDescriptor : TrackDescriptor = trackDescriptor
self.__pattern : Pattern = self.__pc.getPattern(self.__trackDescriptor.getPatternId())
self.__draftTrackTags = {
key: value
for key, value in trackDescriptor.getTags().items()
if key not in ("language", "title")
}
initial_language = trackDescriptor.getLanguage()
initial_title = trackDescriptor.getTitle()
initialTitleEmpty = not str(initial_title).strip()
self.__titleAutoManaged = bool(
initialTitleEmpty
and (
initial_language == IsoLanguage.UNDEFINED
or (self.__metadataOnly and self.__applyNormalization)
)
)
self.__suppressTitleChanged = False
self.__lastAutoTitle = ""
def _descriptor_refs_same_track(self, descriptor: TrackDescriptor) -> bool:
if self.__trackDescriptor is None:
return False
if descriptor.getId() != -1 and self.__trackDescriptor.getId() != -1:
return descriptor.getId() == self.__trackDescriptor.getId()
return (
descriptor.getPatternId() == self.__trackDescriptor.getPatternId()
and descriptor.getIndex() == self.__trackDescriptor.getIndex()
and descriptor.getSubIndex() == self.__trackDescriptor.getSubIndex()
)
def updateTags(self):
self.__tagRowData = populate_tag_table(
self.trackTagsTable,
self.__draftTrackTags,
ignore_keys=self.__ignoreTrackKeys,
remove_keys=self.__removeTrackKeys,
)
self.trackTagsTable.clear()
@staticmethod
def build_language_options():
return [
(language.label(), language)
for language in sorted(
[language for language in IsoLanguage if language != IsoLanguage.UNDEFINED],
key=lambda language: language.label().casefold(),
)
]
trackId = self.__trackDescriptor.getId()
@staticmethod
def language_select_value(language):
return Select.NULL if language == IsoLanguage.UNDEFINED else language
if trackId != -1:
def _apply_auto_title_for_language(self, language: IsoLanguage):
titleInput = self.query_one("#title_input", Input)
autoTitle = "" if language == IsoLanguage.UNDEFINED else language.label()
self.__suppressTitleChanged = True
titleInput.value = autoTitle
self.__suppressTitleChanged = False
self.__lastAutoTitle = autoTitle
trackTags = self.__tac.findAllTrackTags(trackId)
def _handle_language_selection_changed(self, language):
if not self.__titleAutoManaged:
return
for k,v in trackTags.items():
if not isinstance(language, IsoLanguage):
language = IsoLanguage.UNDEFINED
if k != 'language' and k != 'title':
row = (k,v)
self.trackTagsTable.add_row(*map(str, row))
self._apply_auto_title_for_language(language)
def _handle_title_input_changed(self, titleValue: str):
if self.__suppressTitleChanged or not self.__titleAutoManaged:
return
language = self.query_one("#language_select", Select).value
if not isinstance(language, IsoLanguage):
language = IsoLanguage.UNDEFINED
expectedAutoTitle = "" if language == IsoLanguage.UNDEFINED else language.label()
if str(titleValue) != expectedAutoTitle:
self.__titleAutoManaged = False
def on_mount(self):
self.query_one("#index_label", Static).update(str(self.__index) if self.__index is not None else '-')
self.query_one("#subindex_label", Static).update(str(self.__subIndex)if self.__subIndex is not None else '-')
if getattr(self, 'context', {}).get('debug', False):
self.title = f"{self.app.title} - {self.__class__.__name__}"
if self.__pattern is not None:
self.query_one("#pattern_label", Static).update(self.__pattern.getPattern())
self.query_one("#index_label", Static).update(
str(self.__index) if self.__index is not None else "-"
)
self.query_one("#subindex_label", Static).update(
str(self.__subIndex) if self.__subIndex is not None else "-"
)
self.query_one("#pattern_label", Static).update(self.__patternLabel)
if self.__trackType is not None:
self.query_one("#type_select", Select).value = self.__trackType.label()
if self.__trackType == TrackType.AUDIO:
self.query_one("#audio_layout_select", Select).value = self.__audioLayout.label()
self.query_one("#type_select", Select).value = self.__trackType
for d in TrackDisposition:
self.query_one("#audio_layout_select", Select).value = self.__audioLayout
dispositionIsSet = (self.__trackDescriptor is not None
and d in self.__trackDescriptor.getDispositionSet())
for disposition in TrackDisposition:
dispositionOption = (d.label(), d.index(), dispositionIsSet)
self.query_one("#dispositions_selection_list", SelectionList).add_option(dispositionOption)
dispositionIsSet = (
self.__trackDescriptor is not None
and disposition in self.__trackDescriptor.getDispositionSet()
)
dispositionOption = (
t(disposition.label()),
disposition.index(),
dispositionIsSet,
)
self.query_one("#dispositions_selection_list", SelectionList).add_option(
dispositionOption
)
if self.__trackDescriptor is not None:
self.query_one("#language_select", Select).value = self.__trackDescriptor.getLanguage().label()
self.query_one("#language_select", Select).value = self.language_select_value(
self.__trackDescriptor.getLanguage()
)
self.query_one("#title_input", Input).value = self.__trackDescriptor.getTitle()
if self.__titleAutoManaged and not self.__trackDescriptor.getTitle().strip():
self._apply_auto_title_for_language(self.__trackDescriptor.getLanguage())
self.updateTags()
if self.__metadataOnly:
self.query_one("#type_select", Select).disabled = True
self.query_one("#audio_layout_select", Select).disabled = True
def on_select_changed(self, event: Select.Changed) -> None:
if event.select.id == "language_select":
self._handle_language_selection_changed(event.value)
def on_input_changed(self, event: Input.Changed) -> None:
if event.input.id == "title_input":
self._handle_title_input_changed(event.value)
def compose(self):
self.trackTagsTable = DataTable(classes="five")
# Define the columns with headers
self.column_key_track_tag_key = self.trackTagsTable.add_column("Key", width=50)
self.column_key_track_tag_value = self.trackTagsTable.add_column("Value", width=100)
self.column_key_track_tag_key = add_auto_table_column(self.trackTagsTable, t("Key"))
self.column_key_track_tag_value = add_auto_table_column(self.trackTagsTable, t("Value"))
self.trackTagsTable.cursor_type = 'row'
languages = [l.label() for l in IsoLanguage]
self.trackTagsTable.cursor_type = "row"
yield Header()
with Grid():
# 1
yield Static(f"New stream" if self.__isNew else f"Edit stream", id="toplabel", classes="five")
# Row 1
yield Static(
t("New stream") if self.__isNew else t("Edit stream"),
id="toplabel",
classes="five",
)
# 2
yield Static("for pattern")
yield Static("", id="pattern_label", classes="four")
# Row 2
yield Static(t("for pattern"))
yield Static("", id="pattern_label", classes="four", markup=False)
# 3
# Row 3
yield Static(" ", classes="five")
# 4
yield Static("Index / Subindex")
# Row 4
yield Static(t("Index / Subindex"))
yield Static("", id="index_label", classes="two")
yield Static("", id="subindex_label", classes="two")
# 5
# Row 5
yield Static(" ", classes="five")
# 6
yield Static("Type")
yield Select.from_values([t.label() for t in TrackType], classes="four", id="type_select")
# 7
if self.__trackType == TrackType.AUDIO:
yield Static("Audio Layout")
yield Select.from_values([t.label() for t in AudioLayout], classes="four", id="audio_layout_select")
else:
yield Static(" ", classes="five")
# 8
yield Static(" ", classes="five")
# 9
yield Static(" ", classes="five")
# 10
yield Static("Language")
yield Select.from_values(languages, classes="four", id="language_select")
# 11
yield Static(" ", classes="five")
# 12
yield Static("Title")
yield Input(id="title_input", classes="four")
# 13
yield Static(" ", classes="five")
# 14
yield Static(" ", classes="five")
# 15
yield Static("Stream tags")
yield Static(" ")
yield Button("Add", id="button_add_stream_tag")
yield Button("Edit", id="button_edit_stream_tag")
yield Button("Delete", id="button_delete_stream_tag")
# 16
yield self.trackTagsTable
# 17
yield Static(" ", classes="five")
# 18
yield Static("Stream dispositions", classes="five")
# 19
yield SelectionList[int](
classes="five",
id = "dispositions_selection_list"
# Row 6
yield Static(t("Type"))
yield Select(
[(t(trackType.label()), trackType) for trackType in TrackType],
classes="four",
id="type_select",
)
# 20
yield Static(" ", classes="five")
# 21
# Row 7
yield Static(t("Audio Layout"))
yield Select(
[(t(layout.label()), layout) for layout in AudioLayout],
classes="four",
id="audio_layout_select",
)
# Row 8
yield Static(" ", classes="five")
# 22
yield Button("Save", id="save_button")
yield Button("Cancel", id="cancel_button")
# 23
# Row 9
yield Static(" ", classes="five")
# 24
# Row 10
yield Static(t("Language"))
yield Select(
self.build_language_options(),
prompt=t("Select"),
classes="four",
id="language_select",
)
# Row 11
yield Static(" ", classes="five")
# Row 12
yield Static(t("Title"))
yield Input(id="title_input", classes="four")
# Row 13
yield Static(" ", classes="five")
# Row 14
yield Static(" ", classes="five")
# Row 15
yield Static(t("Stream tags"))
yield Static(" ")
yield Button(t("Add"), id="button_add_stream_tag")
yield Button(t("Edit"), id="button_edit_stream_tag")
yield Button(t("Delete"), id="button_delete_stream_tag")
# Row 16
yield self.trackTagsTable
# Row 17
yield Static(" ", classes="five")
# Row 18
yield Static(t("Stream dispositions"), classes="five")
# Row 19
yield SelectionList[int](
classes="five",
id="dispositions_selection_list",
)
# Row 20
yield Static(" ", classes="five")
# Row 21
yield Static(" ", classes="five")
# Row 22
yield Button(t("Save"), id="save_button")
yield Button(t("Cancel"), id="cancel_button")
# Row 23
yield Static(" ", classes="five")
# Row 24
yield Static(" ", classes="five", id="messagestatic")
yield build_screen_log_pane()
yield Footer(id="footer")
def getTrackDescriptorFromInput(self):
kwargs = {}
kwargs[TrackDescriptor.CONTEXT_KEY] = self.context
kwargs[TrackDescriptor.PATTERN_ID_KEY] = int(self.__pattern.getId())
if self.__trackDescriptor is not None and self.__trackDescriptor.getId() != -1:
kwargs[TrackDescriptor.ID_KEY] = self.__trackDescriptor.getId()
kwargs[TrackDescriptor.INDEX_KEY] = self.__index
kwargs[TrackDescriptor.SUB_INDEX_KEY] = self.__subIndex #!
if self.__patternId != -1:
kwargs[TrackDescriptor.PATTERN_ID_KEY] = int(self.__patternId)
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.fromLabel(self.query_one("#type_select", Select).value)
kwargs[TrackDescriptor.INDEX_KEY] = int(self.__index)
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = (
int(self.__trackDescriptor.getSourceIndex())
if self.__trackDescriptor is not None
else int(self.__index)
)
if self.__subIndex is not None and int(self.__subIndex) >= 0:
kwargs[TrackDescriptor.SUB_INDEX_KEY] = int(self.__subIndex)
kwargs[TrackDescriptor.CODEC_KEY] = self.__trackCodec
if self.__trackType == TrackType.AUDIO:
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(self.query_one("#audio_layout_select", Select).value)
selectedTrackType = self.query_one("#type_select", Select).value
if not isinstance(selectedTrackType, TrackType):
selectedTrackType = TrackType.UNKNOWN
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = selectedTrackType
if selectedTrackType == TrackType.ATTACHMENT:
kwargs[TrackDescriptor.ATTACHMENT_FORMAT_KEY] = self.__attachmentFormat
else:
kwargs[TrackDescriptor.CODEC_KEY] = self.__trackCodec
if selectedTrackType == TrackType.AUDIO:
selectedAudioLayout = self.query_one("#audio_layout_select", Select).value
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = (
selectedAudioLayout
if isinstance(selectedAudioLayout, AudioLayout)
else AudioLayout.LAYOUT_UNDEFINED
)
else:
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.LAYOUT_UNDEFINED
trackTags = {}
trackTags = dict(self.__draftTrackTags)
language = self.query_one("#language_select", Select).value
if language:
trackTags['language'] = IsoLanguage.find(language).threeLetter()
if isinstance(language, IsoLanguage):
trackTags["language"] = language.threeLetter()
title = self.query_one("#title_input", Input).value
if title:
trackTags['title'] = title
trackTags["title"] = title
tableTags = {row[0]:row[1] for r in self.trackTagsTable.rows if (row := self.trackTagsTable.get_row(r)) and row[0] != 'language' and row[0] != 'title'}
kwargs[TrackDescriptor.TAGS_KEY] = trackTags
kwargs[TrackDescriptor.TAGS_KEY] = trackTags | tableTags
dispositionFlags = sum([2**f for f in self.query_one("#dispositions_selection_list", SelectionList).selected])
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = TrackDisposition.toSet(dispositionFlags)
dispositionFlags = sum(
[2 ** flag for flag in self.query_one("#dispositions_selection_list", SelectionList).selected]
)
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = TrackDisposition.toSet(
dispositionFlags
)
return TrackDescriptor(**kwargs)
def action_back(self):
go_back_or_exit(self)
def getSelectedTag(self):
try:
# Fetch the currently selected row when 'Enter' is pressed
#selected_row_index = self.table.cursor_row
row_key, col_key = self.trackTagsTable.coordinate_to_cell_key(self.trackTagsTable.cursor_coordinate)
row_key, _ = self.trackTagsTable.coordinate_to_cell_key(
self.trackTagsTable.cursor_coordinate
)
if row_key is not None:
selected_tag_data = self.trackTagsTable.get_row(row_key)
return self.__tagRowData.get(row_key)
tagKey = str(selected_tag_data[0])
tagValue = str(selected_tag_data[1])
return tagKey, tagValue
else:
return None
return None
except CellDoesNotExist:
return None
# Event handler for button press
def on_button_pressed(self, event: Button.Pressed) -> None:
# Check if the button pressed is the one we are interested in
if event.button.id == "save_button":
# Check for multiple default/forced disposition flags
if self.__trackType == TrackType.VIDEO:
trackList = self.__tc.findVideoTracks(self.__pattern.getId())
if self.__trackType == TrackType.AUDIO:
trackList = self.__tc.findAudioTracks(self.__pattern.getId())
elif self.__trackType == TrackType.SUBTITLE:
trackList = self.__tc.findSubtitleTracks(self.__pattern.getId())
else:
trackList = []
siblingTrackList = [t for t in trackList if t.getType() == self.__trackType and t.getIndex() != self.__index]
numDefaultTracks = len([t for t in siblingTrackList if TrackDisposition.DEFAULT in t.getDispositionSet()])
numForcedTracks = len([t for t in siblingTrackList if TrackDisposition.FORCED in t.getDispositionSet()])
self.__subIndex = len(trackList)
trackDescriptor = self.getTrackDescriptorFromInput()
if ((TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet() and numDefaultTracks)
or (TrackDisposition.FORCED in trackDescriptor.getDispositionSet() and numForcedTracks)):
siblingTrackList = [
descriptor
for descriptor in self.__siblingTrackDescriptors
if not self._descriptor_refs_same_track(descriptor)
]
siblingTrackList = [
descriptor
for descriptor in siblingTrackList
if descriptor.getType() == trackDescriptor.getType()
]
self.query_one("#messagestatic", Static).update("Cannot add another stream with disposition flag 'debug' or 'forced' set")
numDefaultTracks = len(
[
descriptor
for descriptor in siblingTrackList
if TrackDisposition.DEFAULT in descriptor.getDispositionSet()
]
)
numForcedTracks = len(
[
descriptor
for descriptor in siblingTrackList
if TrackDisposition.FORCED in descriptor.getDispositionSet()
]
)
if self.__isNew:
trackDescriptor.setSubIndex(len(siblingTrackList))
elif self.__subIndex is not None and int(self.__subIndex) >= 0:
trackDescriptor.setSubIndex(int(self.__subIndex))
if (
TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet()
and numDefaultTracks
) or (
TrackDisposition.FORCED in trackDescriptor.getDispositionSet()
and numForcedTracks
):
self.query_one("#messagestatic", Static).update(
t(
"Cannot add another stream with disposition flag 'default' or 'forced' set"
)
)
else:
self.query_one("#messagestatic", Static).update(" ")
if self.__isNew:
# Track per Screen hinzufügen
self.__tc.addTrack(trackDescriptor)
self.dismiss(trackDescriptor)
else:
track = self.__tc.getTrack(self.__pattern.getId(), self.__index)
# Track per details screen updaten
if self.__tc.updateTrack(track.getId(), trackDescriptor):
self.dismiss(trackDescriptor)
else:
self.app.pop_screen()
self.dismiss(trackDescriptor)
if event.button.id == "cancel_button":
self.app.pop_screen()
if event.button.id == "button_add_stream_tag":
if not self.__isNew:
self.app.push_screen(TagDetailsScreen(), self.handle_update_tag)
self.app.push_screen(TagDetailsScreen(), self.handle_update_tag)
if event.button.id == "button_edit_stream_tag":
tagKey, tagValue = self.getSelectedTag()
self.app.push_screen(TagDetailsScreen(key=tagKey, value=tagValue), self.handle_update_tag)
selectedTag = self.getSelectedTag()
if selectedTag is not None:
self.app.push_screen(
TagDetailsScreen(key=selectedTag[0], value=selectedTag[1]),
self.handle_update_tag,
)
if event.button.id == "button_delete_stream_tag":
tagKey, tagValue = self.getSelectedTag()
self.app.push_screen(TagDeleteScreen(key=tagKey, value=tagValue), self.handle_delete_tag)
selectedTag = self.getSelectedTag()
if selectedTag is not None:
self.app.push_screen(
TagDeleteScreen(key=selectedTag[0], value=selectedTag[1]),
self.handle_delete_tag,
)
def handle_update_tag(self, tag):
trackId = self.__trackDescriptor.getId()
if trackId == -1:
raise click.ClickException(f"TrackDetailsScreen.handle_update_tag: trackId not set (-1) trackDescriptor={self.__trackDescriptor}")
if self.__tac.updateTrackTag(trackId, tag[0], tag[1]) is not None:
self.updateTags()
if tag is None:
return
self.__draftTrackTags[str(tag[0])] = str(tag[1])
self.updateTags()
def handle_delete_tag(self, trackTag):
trackId = self.__trackDescriptor.getId()
if trackId == -1:
raise click.ClickException(f"TrackDetailsScreen.handle_delete_tag: trackId not set (-1) trackDescriptor={self.__trackDescriptor}")
tag = self.__tac.findTrackTag(trackId, trackTag[0])
if tag is not None:
if self.__tac.deleteTrackTag(tag.id):
self.updateTags()
if trackTag is None:
return
self.__draftTrackTags.pop(str(trackTag[0]), None)
self.updateTags()

View File

@@ -5,6 +5,7 @@ class TrackType(Enum):
VIDEO = {'label': 'video', 'index': 1}
AUDIO = {'label': 'audio', 'index': 2}
SUBTITLE = {'label': 'subtitle', 'index': 3}
ATTACHMENT = {'label': 'attachment', 'index': 4}
UNKNOWN = {'label': 'unknown', 'index': 0}

View File

@@ -4,7 +4,9 @@ class VideoEncoder(Enum):
AV1 = {'label': 'av1', 'index': 1}
VP9 = {'label': 'vp9', 'index': 2}
H264 = {'label': 'h264', 'index': 3}
COPY = {'label': 'copy', 'index': 4}
UNDEFINED = {'label': 'undefined', 'index': 0}
def label(self):

1
tests/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Repo-root tests package for legacy and future test code.

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,138 @@
from __future__ import annotations
from pathlib import Path
import tempfile
import unittest
from tests.support.ffx_bundle import (
PatternTrackSpec,
SourceTrackSpec,
add_show,
build_controller_context,
create_source_fixture,
dispose_controller_context,
expected_output_path,
run_ffx_convert,
)
from ffx.pattern_controller import PatternController
from ffx.track_type import TrackType
try:
import pytest
except ImportError: # pragma: no cover - unittest-only environments
pytest = None
if pytest is not None:
pytestmark = [pytest.mark.integration, pytest.mark.pattern_management]
class PatternManagementCliTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
self.workdir = Path(self.tempdir.name)
self.home_dir = self.workdir / "home"
self.home_dir.mkdir()
self.database_path = self.workdir / "test.db"
def tearDown(self):
self.tempdir.cleanup()
def prepare_duplicate_matching_patterns(self):
context = build_controller_context(self.database_path)
try:
add_show(context, show_id=1)
add_show(context, show_id=2)
controller = PatternController(context)
track_descriptors = [
PatternTrackSpec(index=0, source_index=0, track_type=TrackType.VIDEO)
]
def to_track_descriptor(spec: PatternTrackSpec):
from ffx.track_descriptor import TrackDescriptor
kwargs = {
TrackDescriptor.INDEX_KEY: spec.index,
TrackDescriptor.SOURCE_INDEX_KEY: spec.source_index,
TrackDescriptor.TRACK_TYPE_KEY: spec.track_type,
TrackDescriptor.TAGS_KEY: dict(spec.tags),
TrackDescriptor.DISPOSITION_SET_KEY: set(spec.dispositions),
}
return TrackDescriptor(**kwargs)
controller.savePatternSchema(
{"show_id": 1, "pattern": r"^dup_(s[0-9]+e[0-9]+)\.mkv$"},
[to_track_descriptor(track_descriptors[0])],
)
controller.savePatternSchema(
{"show_id": 2, "pattern": r"^dup_.*$"},
[to_track_descriptor(track_descriptors[0])],
)
finally:
dispose_controller_context(context)
def test_convert_fails_when_filename_matches_more_than_one_pattern(self):
self.prepare_duplicate_matching_patterns()
source_filename = "dup_s01e01.mkv"
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng"),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-tmdb",
"--no-prompt",
"--no-signature",
str(source_path),
)
self.assertNotEqual(completed.returncode, 0)
error_output = f"{completed.stdout}\n{completed.stderr}"
self.assertIn("matched more than one pattern", error_output)
self.assertFalse(expected_output_path(self.workdir, source_filename).exists())
def test_convert_can_ignore_duplicate_matches_when_no_pattern_is_requested(self):
self.prepare_duplicate_matching_patterns()
source_filename = "dup_s01e01.mkv"
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng"),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-pattern",
"--no-tmdb",
"--no-prompt",
"--no-signature",
str(source_path),
)
self.assertEqual(
0,
completed.returncode,
f"STDOUT:\n{completed.stdout}\nSTDERR:\n{completed.stderr}",
)
self.assertTrue(expected_output_path(self.workdir, source_filename).exists())
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,436 @@
from __future__ import annotations
import json
from pathlib import Path
import tempfile
import unittest
from tests.support.ffx_bundle import (
PatternTrackSpec,
SourceTrackSpec,
create_source_fixture,
expected_output_path,
extract_first_subtitle_text,
ffprobe_json,
get_tag,
prepare_pattern_database,
run_ffx_convert,
write_vtt,
)
from ffx.track_type import TrackType
try:
import pytest
except ImportError: # pragma: no cover - unittest-only environments
pytest = None
if pytest is not None:
pytestmark = [pytest.mark.integration, pytest.mark.subtrack_mapping]
class SubtrackMappingBundleTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
self.workdir = Path(self.tempdir.name)
self.home_dir = self.workdir / "home"
self.home_dir.mkdir()
self.database_path = self.workdir / "test.db"
def tearDown(self):
self.tempdir.cleanup()
def write_config(self, data: dict) -> None:
config_dir = self.home_dir / ".local" / "etc"
config_dir.mkdir(parents=True, exist_ok=True)
(config_dir / "ffx.json").write_text(json.dumps(data), encoding="utf-8")
def assertCompleted(self, completed):
if completed.returncode != 0:
self.fail(
"FFX convert failed\n"
f"STDOUT:\n{completed.stdout}\n"
f"STDERR:\n{completed.stderr}"
)
def test_pattern_reorders_and_omits_tracks_preserving_metadata_and_group_order(self):
source_filename = "reorder_s01e01.mkv"
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0", title="Video Zero"),
SourceTrackSpec(
TrackType.SUBTITLE,
identity="subtitle-1",
language="eng",
title="First Subtitle",
subtitle_lines=("first embedded subtitle",),
),
SourceTrackSpec(
TrackType.AUDIO,
identity="audio-2",
language="deu",
title="German Audio",
),
SourceTrackSpec(
TrackType.SUBTITLE,
identity="subtitle-3",
language="fra",
title="Second Subtitle",
subtitle_lines=("second embedded subtitle",),
),
SourceTrackSpec(TrackType.ATTACHMENT, attachment_name="ordered.ttf"),
],
)
prepare_pattern_database(
self.database_path,
r"^reorder_(s[0-9]+e[0-9]+)\.mkv$",
[
PatternTrackSpec(
index=0,
source_index=0,
track_type=TrackType.VIDEO,
tags={"THIS_IS": "video-0", "title": "Video Zero"},
),
PatternTrackSpec(
index=1,
source_index=2,
track_type=TrackType.AUDIO,
tags={"THIS_IS": "audio-2", "language": "deu", "title": "German Audio"},
),
PatternTrackSpec(
index=2,
source_index=1,
track_type=TrackType.SUBTITLE,
tags={"THIS_IS": "subtitle-1", "language": "eng", "title": "First Subtitle"},
),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-tmdb",
"--no-prompt",
"--no-signature",
str(source_path),
)
self.assertCompleted(completed)
output_path = expected_output_path(self.workdir, source_filename)
self.assertTrue(output_path.is_file(), output_path)
streams = ffprobe_json(output_path)["streams"]
self.assertEqual(
[stream["codec_type"] for stream in streams],
["video", "audio", "subtitle", "attachment"],
)
self.assertEqual(
[get_tag(streams[index], "THIS_IS") for index in range(3)],
["video-0", "audio-2", "subtitle-1"],
)
self.assertNotIn(
"subtitle-3",
[get_tag(stream, "THIS_IS") for stream in streams if stream["codec_type"] != "attachment"],
)
self.assertEqual(streams[-1]["codec_name"], "ttf")
extracted_subtitle = extract_first_subtitle_text(self.workdir, output_path)
self.assertIn("first embedded subtitle", extracted_subtitle)
self.assertNotIn("second embedded subtitle", extracted_subtitle)
def test_cli_rearrange_streams_reorders_tracks_without_database_pattern(self):
source_filename = "cli_s01e01.mkv"
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng", title="First Audio"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-2", language="deu", title="Second Audio"),
SourceTrackSpec(TrackType.SUBTITLE, identity="subtitle-3", language="eng", title="Subtitle"),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-pattern",
"--no-tmdb",
"--no-prompt",
"--no-signature",
"--rearrange-streams",
"0,2,1,3",
str(source_path),
)
self.assertCompleted(completed)
output_path = expected_output_path(self.workdir, source_filename)
streams = ffprobe_json(output_path)["streams"]
self.assertEqual(
[stream["codec_type"] for stream in streams],
["video", "audio", "audio", "subtitle"],
)
self.assertEqual(
[get_tag(stream, "THIS_IS") for stream in streams],
["video-0", "audio-2", "audio-1", "subtitle-3"],
)
def test_no_pattern_stream_remove_list_clears_copied_stream_metadata(self):
source_filename = "remove_tags_s01e01.mkv"
self.write_config(
{
"metadata": {
"streams": {
"remove": ["BPS"],
}
}
}
)
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(
TrackType.VIDEO,
identity="video-0",
extra_tags={"BPS": "remove-me", "KEEP_ME": "video-keep"},
),
SourceTrackSpec(
TrackType.AUDIO,
identity="audio-1",
language="eng",
title="Main Audio",
extra_tags={"BPS": "remove-me", "KEEP_ME": "audio-keep"},
),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-pattern",
"--no-tmdb",
"--no-prompt",
"--no-signature",
str(source_path),
)
self.assertCompleted(completed)
output_path = expected_output_path(self.workdir, source_filename)
streams = ffprobe_json(output_path)["streams"]
self.assertEqual(
[stream["codec_type"] for stream in streams],
["video", "audio"],
)
self.assertEqual(get_tag(streams[0], "THIS_IS"), "video-0")
self.assertEqual(get_tag(streams[0], "KEEP_ME"), "video-keep")
self.assertIsNone(get_tag(streams[0], "BPS"))
self.assertEqual(get_tag(streams[1], "THIS_IS"), "audio-1")
self.assertEqual(get_tag(streams[1], "KEEP_ME"), "audio-keep")
self.assertIsNone(get_tag(streams[1], "BPS"))
def test_pattern_validation_fails_for_nonexistent_source_track_reference(self):
source_filename = "invalid_s01e01.mkv"
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-1"),
SourceTrackSpec(TrackType.SUBTITLE, identity="subtitle-2"),
],
)
prepare_pattern_database(
self.database_path,
r"^invalid_(s[0-9]+e[0-9]+)\.mkv$",
[
PatternTrackSpec(index=0, source_index=0, track_type=TrackType.VIDEO),
PatternTrackSpec(index=1, source_index=99, track_type=TrackType.SUBTITLE),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-tmdb",
"--no-prompt",
"--no-signature",
str(source_path),
)
self.assertNotEqual(completed.returncode, 0)
error_output = f"{completed.stdout}\n{completed.stderr}"
self.assertIn("non-existent source track #99", error_output)
self.assertFalse(expected_output_path(self.workdir, source_filename).exists())
def test_external_subtitle_file_replaces_payload_and_overrides_metadata(self):
source_filename = "substitute_s01e01.mkv"
self.write_config(
{
"metadata": {
"streams": {
"remove": ["BPS"],
}
}
}
)
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng", title="Main Audio"),
SourceTrackSpec(
TrackType.SUBTITLE,
identity="embedded-subtitle",
language="eng",
title="Embedded Title",
extra_tags={"BPS": "remove-me", "EXTERNAL_KEEP": "keep-me"},
subtitle_lines=("embedded subtitle payload",),
),
],
)
write_vtt(
self.workdir / "substitute_s01e01_2_deu.vtt",
("external subtitle payload",),
)
prepare_pattern_database(
self.database_path,
r"^substitute_(s[0-9]+e[0-9]+)\.mkv$",
[
PatternTrackSpec(index=0, source_index=0, track_type=TrackType.VIDEO),
PatternTrackSpec(index=1, source_index=1, track_type=TrackType.AUDIO),
PatternTrackSpec(index=2, source_index=2, track_type=TrackType.SUBTITLE),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-tmdb",
"--no-prompt",
"--no-signature",
"--subtitle-directory",
str(self.workdir),
"--subtitle-prefix",
"substitute",
str(source_path),
)
self.assertCompleted(completed)
output_path = expected_output_path(self.workdir, source_filename)
streams = ffprobe_json(output_path)["streams"]
subtitle_stream = [stream for stream in streams if stream["codec_type"] == "subtitle"][0]
self.assertEqual(get_tag(subtitle_stream, "language"), "deu")
self.assertEqual(get_tag(subtitle_stream, "title"), "Embedded Title")
self.assertEqual(get_tag(subtitle_stream, "THIS_IS"), "embedded-subtitle")
self.assertEqual(get_tag(subtitle_stream, "EXTERNAL_KEEP"), "keep-me")
self.assertIsNone(get_tag(subtitle_stream, "BPS"))
extracted_subtitle = extract_first_subtitle_text(self.workdir, output_path)
self.assertIn("external subtitle payload", extracted_subtitle)
self.assertNotIn("embedded subtitle payload", extracted_subtitle)
def test_subtitle_prefix_uses_configured_base_directory_when_directory_is_omitted(self):
source_filename = "substitute_default_s01e01.mkv"
subtitle_prefix = "substitute_default"
subtitles_base_dir = self.home_dir / ".local" / "var" / "sync" / "subtitles"
resolved_subtitle_dir = subtitles_base_dir / subtitle_prefix
resolved_subtitle_dir.mkdir(parents=True, exist_ok=True)
self.write_config(
{
"subtitlesDirectory": "~/.local/var/sync/subtitles",
"metadata": {
"streams": {
"remove": ["BPS"],
}
}
}
)
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng", title="Main Audio"),
SourceTrackSpec(
TrackType.SUBTITLE,
identity="embedded-subtitle",
language="eng",
title="Embedded Title",
extra_tags={"BPS": "remove-me", "EXTERNAL_KEEP": "keep-me"},
subtitle_lines=("embedded subtitle payload",),
),
],
)
write_vtt(
resolved_subtitle_dir / f"{subtitle_prefix}_s01e01_2_deu.vtt",
("external subtitle payload",),
)
prepare_pattern_database(
self.database_path,
r"^substitute_default_(s[0-9]+e[0-9]+)\.mkv$",
[
PatternTrackSpec(index=0, source_index=0, track_type=TrackType.VIDEO),
PatternTrackSpec(index=1, source_index=1, track_type=TrackType.AUDIO),
PatternTrackSpec(index=2, source_index=2, track_type=TrackType.SUBTITLE),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-tmdb",
"--no-prompt",
"--no-signature",
"--subtitle-prefix",
subtitle_prefix,
str(source_path),
)
self.assertCompleted(completed)
output_path = expected_output_path(self.workdir, source_filename)
streams = ffprobe_json(output_path)["streams"]
subtitle_stream = [stream for stream in streams if stream["codec_type"] == "subtitle"][0]
self.assertEqual(get_tag(subtitle_stream, "language"), "deu")
self.assertEqual(get_tag(subtitle_stream, "title"), "Embedded Title")
self.assertEqual(get_tag(subtitle_stream, "THIS_IS"), "embedded-subtitle")
self.assertEqual(get_tag(subtitle_stream, "EXTERNAL_KEEP"), "keep-me")
self.assertIsNone(get_tag(subtitle_stream, "BPS"))
extracted_subtitle = extract_first_subtitle_text(self.workdir, output_path)
self.assertIn("external subtitle payload", extracted_subtitle)
self.assertNotIn("embedded subtitle payload", extracted_subtitle)
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,303 @@
from __future__ import annotations
import json
import os
from pathlib import Path
import subprocess
import sys
import tempfile
import unittest
from tests.support.ffx_bundle import (
SourceTrackSpec,
build_controller_context,
create_source_fixture,
dispose_controller_context,
)
from ffx.pattern_controller import PatternController
from ffx.show_controller import ShowController
from ffx.show_descriptor import ShowDescriptor
from ffx.shifted_season_controller import ShiftedSeasonController
from ffx.track_codec import TrackCodec
from ffx.track_descriptor import TrackDescriptor
from ffx.track_type import TrackType
try:
import pytest
except ImportError: # pragma: no cover - unittest-only environments
pytest = None
if pytest is not None:
pytestmark = [pytest.mark.integration]
SRC_ROOT = Path(__file__).resolve().parents[2] / "src"
def run_ffx_unmux(workdir: Path, home_dir: Path, database_path: Path, *args: str) -> subprocess.CompletedProcess[str]:
env = os.environ.copy()
env["HOME"] = str(home_dir)
existing_pythonpath = env.get("PYTHONPATH", "")
env["PYTHONPATH"] = str(SRC_ROOT) if not existing_pythonpath else f"{SRC_ROOT}{os.pathsep}{existing_pythonpath}"
command = [
sys.executable,
"-m",
"ffx",
"--database-file",
str(database_path),
"unmux",
*args,
]
return subprocess.run(command, cwd=workdir, env=env, capture_output=True, text=True)
class UnmuxCliTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
self.workdir = Path(self.tempdir.name)
self.home_dir = self.workdir / "home"
self.home_dir.mkdir()
self.database_path = self.workdir / "test.db"
def tearDown(self):
self.tempdir.cleanup()
def write_config(self, data: dict) -> None:
config_dir = self.home_dir / ".local" / "etc"
config_dir.mkdir(parents=True, exist_ok=True)
(config_dir / "ffx.json").write_text(json.dumps(data), encoding="utf-8")
def assertCompleted(self, completed):
if completed.returncode != 0:
self.fail(
"FFX unmux failed\n"
f"STDOUT:\n{completed.stdout}\n"
f"STDERR:\n{completed.stderr}"
)
def seed_matching_show(self, pattern_expression: str, *, indicator_season_digits: int, indicator_episode_digits: int) -> None:
context = build_controller_context(self.database_path)
try:
ShowController(context).updateShow(
ShowDescriptor(
id=1,
name="Unmux Test Show",
year=2000,
indicator_season_digits=indicator_season_digits,
indicator_episode_digits=indicator_episode_digits,
)
)
PatternController(context).savePatternSchema(
{
"show_id": 1,
"pattern": pattern_expression,
"quality": 0,
"notes": "",
},
trackDescriptors=[
TrackDescriptor(
index=0,
source_index=0,
track_type=TrackType.VIDEO,
codec_name=TrackCodec.H264,
tags={},
disposition_set=set(),
)
],
)
finally:
dispose_controller_context(context)
def add_show_shift(
self,
*,
show_id: int,
original_season: int,
first_episode: int,
last_episode: int,
season_offset: int,
episode_offset: int,
) -> None:
context = build_controller_context(self.database_path)
try:
ShiftedSeasonController(context).addShiftedSeason(
showId=show_id,
shiftedSeasonObj={
"original_season": original_season,
"first_episode": first_episode,
"last_episode": last_episode,
"season_offset": season_offset,
"episode_offset": episode_offset,
},
)
finally:
dispose_controller_context(context)
def test_subtitles_only_without_output_directory_uses_configured_base_plus_label(self):
self.write_config(
{
"subtitlesDirectory": "~/.local/var/sync/subtitles",
}
)
source_filename = "unmux_s01e01.mkv"
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(
TrackType.SUBTITLE,
identity="subtitle-1",
language="eng",
subtitle_lines=("subtitle payload",),
),
],
)
completed = run_ffx_unmux(
self.workdir,
self.home_dir,
self.database_path,
"--subtitles-only",
"--label",
"dball",
str(source_path),
)
self.assertCompleted(completed)
expected_directory = self.home_dir / ".local" / "var" / "sync" / "subtitles" / "dball"
self.assertTrue(expected_directory.is_dir(), expected_directory)
def test_unmux_uses_configured_indicator_digits_in_output_filenames(self):
self.write_config(
{
"defaultIndicatorSeasonDigits": 3,
"defaultIndicatorEpisodeDigits": 4,
}
)
source_filename = "unmux_s01e01.mkv"
output_directory = self.workdir / "unmux-output"
output_directory.mkdir()
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
],
)
completed = run_ffx_unmux(
self.workdir,
self.home_dir,
self.database_path,
"--label",
"dball",
"--output-directory",
str(output_directory),
str(source_path),
)
self.assertCompleted(completed)
output_filenames = sorted(path.name for path in output_directory.iterdir())
self.assertEqual(1, len(output_filenames), output_filenames)
self.assertTrue(
output_filenames[0].startswith("dball_S001E0001_"),
output_filenames,
)
def test_unmux_prefers_matched_show_indicator_digits_over_config_defaults(self):
self.write_config(
{
"defaultIndicatorSeasonDigits": 4,
"defaultIndicatorEpisodeDigits": 4,
}
)
self.seed_matching_show(
r"^unmux_([sS][0-9]+[eE][0-9]+)\.mkv$",
indicator_season_digits=1,
indicator_episode_digits=3,
)
source_filename = "unmux_s01e01.mkv"
output_directory = self.workdir / "unmux-output"
output_directory.mkdir()
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
],
)
completed = run_ffx_unmux(
self.workdir,
self.home_dir,
self.database_path,
"--label",
"dball",
"--output-directory",
str(output_directory),
str(source_path),
)
self.assertCompleted(completed)
output_filenames = sorted(path.name for path in output_directory.iterdir())
self.assertEqual(1, len(output_filenames), output_filenames)
self.assertTrue(
output_filenames[0].startswith("dball_S1E001_"),
output_filenames,
)
def test_unmux_applies_shifted_season_mapping_to_output_filenames(self):
self.seed_matching_show(
r"^unmux_([sS][0-9]+[eE][0-9]+)\.mkv$",
indicator_season_digits=2,
indicator_episode_digits=2,
)
self.add_show_shift(
show_id=1,
original_season=1,
first_episode=1,
last_episode=99,
season_offset=1,
episode_offset=-88,
)
source_filename = "unmux_s01e89.mkv"
output_directory = self.workdir / "unmux-output"
output_directory.mkdir()
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(
TrackType.SUBTITLE,
identity="subtitle-1",
language="eng",
subtitle_lines=("subtitle payload",),
),
],
)
completed = run_ffx_unmux(
self.workdir,
self.home_dir,
self.database_path,
"--label",
"dball",
"--output-directory",
str(output_directory),
"--subtitles-only",
str(source_path),
)
self.assertCompleted(completed)
self.assertIn(
"Unmuxing stream 1 into file dball_S02E01_1_eng",
completed.stderr,
)
if __name__ == "__main__":
unittest.main()

1
tests/legacy/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Legacy custom FFX test harness modules.

View File

@@ -24,8 +24,9 @@ class BasenameCombinator():
@staticmethod
def getClassReference(identifier):
importlib.import_module(f"ffx.test.basename_combinator_{ identifier }")
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.basename_combinator_{ identifier }"]):
module_name = f"tests.legacy.basename_combinator_{ identifier }"
importlib.import_module(module_name)
for name, obj in inspect.getmembers(sys.modules[module_name]):
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
if inspect.isclass(obj) and name != 'BasenameCombinator' and name.startswith('BasenameCombinator'):
return obj

Some files were not shown because too many files have changed in this diff Show More