134 Commits

Author SHA1 Message Date
Javanaut
9dc08d48e9 ff 2026-04-12 10:06:19 +02:00
Javanaut
20bdfc0dd7 Fix pri lang for rename mode 2026-04-12 10:06:01 +02:00
Javanaut
4365e083dc Adapt unmux command to changes in convert command 2026-04-11 22:31:04 +02:00
Javanaut
528915a235 Adds subtitle default dir 2026-04-11 21:17:21 +02:00
Javanaut
9a980b5766 Fix streamtags remove list 2026-04-11 20:50:09 +02:00
Javanaut
5eee7e1161 Extd cut parameter 2026-04-11 20:27:58 +02:00
Javanaut
0a41998e29 Adds Q/P values to output file metadata 2026-04-11 17:46:16 +02:00
Javanaut
ebdc23c3ce Fixes remove stream tags per list 2026-04-11 17:31:10 +02:00
Javanaut
9611930949 Misc Opts 2026-04-11 16:52:58 +02:00
Javanaut
609f93b783 Fix cpu percentage interpretations 2026-04-11 16:30:41 +02:00
Javanaut
52c6462fa8 Optimizes niceness and cpulimit usage 2026-04-11 16:21:17 +02:00
Javanaut
358ef18f77 Fix regex issues 2026-04-11 16:10:41 +02:00
Javanaut
fc729a2414 Opt database bootstrapping 2026-04-11 16:04:54 +02:00
Javanaut
0939a0c6c2 Optimizes ffprobe usage 2026-04-11 16:00:01 +02:00
Javanaut
c384d54c12 Impr upgrade 2026-04-11 15:08:08 +02:00
Javanaut
71553aad32 Streamlines imports and app start 2026-04-11 14:57:01 +02:00
Javanaut
d19e69990a Opt pattern matching 2026-04-09 16:11:51 +02:00
Javanaut
be0f4b4c4e Optimize database queries 2026-04-09 13:49:14 +02:00
Javanaut
01b5fdb289 Refine tests, CLI 2026-04-09 13:34:38 +02:00
Javanaut
60ae58500a Tidy up logging and rework tests from scratch 2026-04-09 12:46:24 +02:00
Javanaut
f9c8b8ac5e ffn2 2026-04-09 01:13:06 +02:00
Javanaut
5871ae30ad ffn 2026-04-09 01:06:09 +02:00
Javanaut
52724ecc5b ff 2026-04-09 01:03:41 +02:00
Javanaut
f288d445e4 Adds requirements, streamlines CLI helper procedures 2026-04-09 00:59:37 +02:00
Javanaut
d9db6da191 tf 2026-01-31 17:30:35 +01:00
Javanaut
5443881ea1 tf 2026-01-31 17:12:27 +01:00
Javanaut
8946b57456 fix attachement descriptor handling 2026-01-31 12:06:24 +01:00
Javanaut
686239491b Adapt for .ssa subtitles with attached fonts 2026-01-31 10:00:14 +01:00
Javanaut
126ba4487c fixes TextArea 2025-11-07 15:54:14 +01:00
Javanaut
447cda19ef ff 2025-11-07 15:49:52 +01:00
Javanaut
f1ba913a98 ff 2025-11-07 15:45:47 +01:00
Javanaut
59336aafb7 dd 2025-11-07 15:43:31 +01:00
Javanaut
fd5ad3ed56 Removes build artifacts from branch 2025-11-07 15:38:07 +01:00
Javanaut
2d03a3bb10 ff 2025-11-07 15:17:13 +01:00
Javanaut
4dc02d52a2 Adds notes field for patterns 2025-11-07 15:14:55 +01:00
Javanaut
ed0cea9c26 Adapts Q Message 2025-11-07 14:11:50 +01:00
Javanaut
15bfbdbe88 Adds setting quality accoeding to pattern default 2025-11-06 14:08:00 +01:00
Javanaut
c354ba09ba Adds pattern quality UI field 2025-11-05 21:24:53 +01:00
2eeea08be0 Merge branch 'dev' of gitea.maveno.de:Javanaut/ffx into dev 2025-10-08 11:01:15 +02:00
fbfc8ea965 rfc niceness/cpulimit 2025-10-08 10:59:04 +02:00
Javanaut
6ec5db2ea2 ff 2025-10-07 10:10:27 +02:00
Javanaut
8feced6f1c Lang/Codec changes 2025-10-07 10:10:09 +02:00
Javanaut
285649c30a Fix chinese iso code 2025-09-09 08:26:27 +02:00
Javanaut
558da817f1 Fügt hinzu Ländercodes Bokmal und Filipino 2025-09-02 23:46:56 +02:00
Javanaut
2a84327f69 Fügt hinzu Ländercodes Filipinisch und Bokmal 2025-09-02 23:38:07 +02:00
535b11dca5 fix pattern markup 2025-03-27 08:04:41 +01:00
8edc715795 typo 2025-03-04 22:58:14 +01:00
cd203703e8 adding languages 2025-03-04 22:56:39 +01:00
8f2367b71e ff 2025-02-17 00:11:38 +01:00
101c7605d2 deint 2025-02-16 23:54:32 +01:00
a5b58e34e4 ff 2025-02-11 20:19:55 +01:00
a32e86550c ff 2025-02-11 19:55:12 +01:00
5de3778ae5 ff 2025-02-11 19:52:05 +01:00
81aab0657e ff 2025-02-11 19:48:38 +01:00
8514a0c152 ff 2025-02-02 17:01:22 +01:00
c846147c64 ff 2025-02-02 17:00:33 +01:00
e52297b2ba ff 2025-02-02 16:58:24 +01:00
655833f13e ff 2025-02-02 16:36:36 +01:00
03dd02ed87 ff 2025-02-02 16:25:19 +01:00
b6ee197536 ff 2025-02-02 16:14:16 +01:00
d8374ae9f2 ff 2025-02-02 16:12:11 +01:00
f262eaa120 ff 2025-02-02 16:10:59 +01:00
d940a6e92a ff 2025-02-02 16:03:09 +01:00
e1395aeca0 tf auto_crop 2025-02-02 16:00:57 +01:00
48841c5750 ff 2025-02-02 13:35:11 +01:00
d558bbf6bd ff 2025-02-02 13:34:06 +01:00
b05d989581 ff 2025-02-02 13:29:50 +01:00
bc8af53525 ff 2025-02-02 13:29:08 +01:00
6bd1587947 ff 2025-02-02 13:27:19 +01:00
7d6531b40e ff 2025-02-02 13:26:28 +01:00
ab435a4c76 ff 2025-02-02 13:24:06 +01:00
0a88e366b1 ff 2025-02-02 13:22:30 +01:00
1c80cd7d7d ff 2025-02-02 13:19:21 +01:00
a45c180aaa ff 2025-02-02 13:17:28 +01:00
0b204ff19c ff 2025-02-02 13:09:59 +01:00
d7ec5f7620 ff 2025-02-02 13:07:35 +01:00
3f64304374 ff 2025-02-02 13:05:28 +01:00
b459272149 ff 2025-02-02 13:02:08 +01:00
4b05fc194b ff 2025-02-02 12:53:55 +01:00
9d088819ab ff 2025-02-02 12:50:56 +01:00
e20f7a1f67 ff 2025-02-02 12:48:07 +01:00
9d683dfa84 tf h264/mkv 2025-02-02 12:46:45 +01:00
867756c661 ff 2025-02-02 12:22:34 +01:00
f81a6edb07 ff 2025-02-02 12:21:38 +01:00
ec4bce473c ff 2025-02-02 12:20:25 +01:00
bf882b741f ff 2025-02-02 12:19:36 +01:00
a4e25b5ec8 ff 2025-02-02 12:16:38 +01:00
ff6bacb0d5 ff 2025-02-02 12:11:26 +01:00
f32b7a06c0 ff 2025-02-02 12:10:56 +01:00
7ceed58e7b add cropdetect stub 2025-02-02 12:10:04 +01:00
153f401dd3 ff 2025-01-17 18:27:29 +01:00
7f1f34fb9f multiplicity iso languages 2025-01-17 18:25:25 +01:00
21fe7cb1eb ff 2025-01-14 22:10:20 +01:00
9e63184524 tf import dubtiles for movies 2025-01-14 22:02:19 +01:00
3742221189 5.0 channel layout 2025-01-14 21:30:04 +01:00
478ac15ab8 ff 2025-01-14 08:06:14 +01:00
ef0a01bc9b ff 2025-01-14 08:04:55 +01:00
802c11be44 ff 2025-01-14 08:00:41 +01:00
4cbb135772 ff 2025-01-14 08:00:14 +01:00
3d52442471 ff 2025-01-14 07:56:35 +01:00
81640192ab ff 2025-01-14 00:45:33 +01:00
81d760aabe ff 2025-01-14 00:44:10 +01:00
c0eff679f7 ff 2025-01-14 00:41:28 +01:00
07097058d7 add mpeg format 2025-01-14 00:30:57 +01:00
cd7a338541 ff 2024-12-31 11:12:11 +01:00
be652f8efb copy only mode 2024-12-31 10:56:17 +01:00
dd51b14d49 ff 2024-12-27 09:10:08 +01:00
a471808392 add mp3 codec 2024-12-27 09:08:59 +01:00
b3da8ce738 add mpeg-4 format 2024-12-18 22:00:57 +01:00
fe0c078c3f ff 2024-12-15 17:16:57 +01:00
962522b974 ff 2024-12-15 17:13:39 +01:00
24367ea08a ff 2024-12-15 17:13:13 +01:00
f0eebd0bea ff 2024-12-15 17:12:45 +01:00
c8e21b9260 modipy ansible role for pypi packaing 2024-12-15 17:02:03 +01:00
cdc1664779 adapt for manjaro 2024-12-15 16:53:36 +01:00
Maveno
2849eda05a perm filter out png thumbnails 2024-11-29 07:08:59 +01:00
Maveno
cfb2df8d66 pf png tracks 2024-11-27 23:40:22 +01:00
Maveno
12c8ad3782 add codec eac3 png 2024-11-25 08:13:02 +01:00
Maveno
74a39a8f9a #433 Descriptor Pattern Checks 2024-11-24 14:01:55 +01:00
Maveno
5eacb0d0cb #411 Input/Output Pfade 2024-11-24 13:21:46 +01:00
Maveno
e8c0c3d646 fix unchanged tracks for external files 2024-11-24 12:56:31 +01:00
Maveno
6b2671a1f5 ff 2024-11-23 18:08:24 +01:00
Maveno
2d8622506e Rework Descriptor Diff 2024-11-23 13:26:44 +01:00
Maveno
86cc7dfc6f nighl 2024-11-20 22:12:40 +01:00
Maveno
d84bee74c4 ff 2024-11-20 19:01:23 +01:00
Maveno
488caa7a08 ff 2024-11-19 07:56:37 +01:00
Maveno
62877dfed6 ff 2024-11-19 07:54:39 +01:00
Maveno
87ff94e204 add codec srt 2024-11-19 07:54:06 +01:00
Maveno
0c78ed7cf7 ff 2024-11-18 21:15:29 +01:00
Maveno
4db9bfd103 ff 2024-11-18 20:57:04 +01:00
Maveno
db7700a6b9 fix #409: Doppelter Show-Eintrag bei ffx inspect 2024-11-18 20:53:15 +01:00
Maveno
222234f978 hf fix tmdb filename filters 2024-11-18 18:37:31 +01:00
Maveno
3672474ff5 ff 2024-11-18 07:53:07 +01:00
Maveno
5ff0fc3fad hf Episodenteil Substitutionen 2024-11-18 07:51:23 +01:00
141 changed files with 8782 additions and 1468 deletions

21
.gitignore vendored
View File

@@ -1,10 +1,23 @@
__pycache__
__pycache__/
*.py[cod]
junk/
.vscode
.ipynb_checkpoints/
ansible/inventory/hawaii.yml
ansible/inventory/peppermint.yml
tools/ansible/inventory/hawaii.yml
tools/ansible/inventory/peppermint.yml
tools/ansible/inventory/cappuccino.yml
tools/ansible/inventory/group_vars/all.yml
ffx_test_report.log
bin/conversiontest.py
*.egg-info/
build/
dist/
*.egg-info/
.venv/
venv/
.codex
*.mkv
*.webm
ffmpeg2pass-0.log

376
AGENTS.md Normal file
View File

@@ -0,0 +1,376 @@
# AGENTS.md
This file is the entry point for agent guidance in this repository.
It is intentionally generic and reusable across projects. Keep this file focused on non-project-specific constraints, working style, and the structure used to link more detailed guidance.
# Purpose
- Provide a small default rule set for agents working in this repository.
- Keep the base guidance modular and easy to extend.
- Separate reusable agent behavior from project-specific requirements.
# Comment Syntax
- A segment wrapped in `<!--` and `-->` is a comment and must be ignored by agents.
- Use HTML comments for optional guidance that should stay inactive until enabled.
- To enable an optional segment, remove the surrounding `<!--` and `-->` markers.
# Core Principles
- Prefer the simplest solution that satisfies the current goal.
- Keep guidance lightweight: only add detail when it meaningfully improves outcomes.
- Reuse modular guideline files instead of expanding this file indefinitely.
- Treat project-specific documents as the source of truth for project behavior.
- When guidance conflicts, use the most specific applicable document.
# Rule Terms
- A `rule` is the general term for any constraint, requirement, definition, or similar guidance item.
- A `rule set` addresses all rules inside one file that share the same rule set ID.
- Any rule inside a rule set shall use an ID following the schema `RULESET-0001`, `RULESET-0002`, and so on.
- Rules without a rule set ID are also valid, but they are not addressable by rule ID.
# Scope Of This File
This file should contain:
- Generic agent behavior and constraints.
- Rules that are reusable across multiple projects.
- Links to optional guideline modules.
- Links to project-specific requirements.
- Commented optional templates for released-product documentation and agent-output locations.
This file should not contain:
- Project business requirements.
- Project architecture decisions.
- Stack-specific implementation details unless they are universally applicable.
- Task-specific runbooks that belong in dedicated modules.
# Default Agent Behavior
- Read the relevant context before making changes.
- Prefer small, understandable edits over broad refactors.
- Preserve existing patterns unless there is a clear reason to change them.
- Document assumptions when context is missing.
- Ignore HTML comment segments.
- If a more specific enabled guideline exists for the current task, follow it.
# Guideline Structure
Use the following structure for reusable guidance files and project-specific documentation as needed:
```text
/
|-- AGENTS.md
|-- guidance/
| |-- stacks/
| |-- conventions/
| `-- workflows/
|-- prompts/
`-- requirements/
Optional files and directories
|-- SCRATCHPAD.md
|-- docs/
| |-- readme.md
| |-- installation.md
| `-- history.md
|-- process/
| |-- log.md
| `-- coding-handbook.md
```
# Optional Reusable Modules
Add files under `guidance/` only when they are needed.
# Optional Scratchpad
- `SCRATCHPAD.md` is an optional repo-root scratchpad for temporary
information aimed at the next iteration.
- Developers may create or delete `SCRATCHPAD.md` at any time.
- Developers may refer to `SCRATCHPAD.md` as `scratchpad` when giving agents a
source or target for information.
- Agents may read, update, create, or remove the scratchpad when the task
explicitly calls for it.
- Treat the scratchpad as low-formality working context rather than canonical
project truth.
- Use the scratchpad for short-lived notes, open questions, sketches, and
temporary decisions that should be resolved away.
- Move durable outcomes into `requirements/`, `guidance/`, code, tests, or
another long-lived location.
- If `SCRATCHPAD.md` is absent, agents should continue normally.
# Optional Rule Sets
- Optional rule sets may be stored in `guidance/optional/` or in `guidance/{section}/optional/`.
- Optional rule sets are inactive by default and shall only be applied when a prompt explicitly requests them, for example by phrases such as `Apply rules for lean interface iteration in the following steps.` or `Apply LII rules.`
- An optional rule set may be requested by its descriptive name, by its rule set ID, or by another equally clear explicit reference.
- Agents shall never infer or auto-enable optional rule sets from general intent alone.
- If an optional rule or rule set cannot be identified and addressed clearly, agents shall stop and ask before proceeding.
# Prepared Orders
- An `order` is a prepared prompt for one isolated operation rather than a general workflow or standing rule set.
- Orders shall be stored under `prompts/`.
- Order files shall use the naming schema `ORDER-0001-<slug>.md`, `ORDER-0002-<slug>.md`, and so on.
- The canonical order identifier is the `ORDER-0001` style prefix. The trailing slug is descriptive only.
- Recommended internal order file structure is: prompt ID, prompt name, purpose, trigger examples, scope, operation, and expected output.
- Orders shall only be executed when they are explicitly requested by a prompt such as `Execute ORDER-0007.` or `Execute ORDER 7.`
- Agents may accept an unambiguous short numeric reference such as `ORDER 7` as an alias for `ORDER-0007`.
- If an order cannot be identified uniquely and clearly, agents shall stop and ask before proceeding.
# Toolstack Guides
Location:
```text
guidance/stacks/
```
Examples:
- `guidance/stacks/python.md`
- `guidance/stacks/typescript.md`
- `guidance/stacks/docker.md`
- `guidance/stacks/terraform.md`
Use for:
- Language or framework expectations.
- Tooling and environment conventions.
- Build, test, and runtime guidance tied to a specific stack.
# Coding Conventions
Location:
```text
guidance/conventions/
```
Examples:
- `guidance/conventions/naming.md`
- `guidance/conventions/testing.md`
- `guidance/conventions/review.md`
Use for:
- Naming and structure conventions.
- Testing expectations.
- Code review and quality rules.
# Recurring Workflows
Location:
```text
guidance/workflows/
```
Examples:
- `guidance/workflows/feature-delivery.md`
- `guidance/workflows/bugfix.md`
- `guidance/workflows/release.md`
- `guidance/workflows/incident-response.md`
Use for:
- Repeatable task flows.
- Checklists for common delivery work.
- Operational or maintenance procedures.
<!-- Enable this optional section by removing the outer HTML comment markers from this segment
when you want agents to create, update, and consult released-product
documentation in `docs/`.
# Released Product Documentation
Released-product documentation should live outside the generic sections above.
Recommended location:
```text
docs/
```
Examples:
- `docs/readme.md`
- `docs/installation.md`
- `docs/history.md`
Agent rules for docs output:
- Keep content compact but comprehensive.
- Write for end users, operators, or other consumers of the released product.
- Prefer shipped behavior, supported workflows, and stable terminology over
internal implementation detail.
- Keep documentation synchronized with released behavior.
- Update release history when user-visible changes are shipped.
Recommended topics:
- Product overview and intended use.
- Installation, configuration, and upgrade guidance.
- Usage patterns, operational instructions, and support boundaries.
- Compatibility notes, migration notes, and release history.
- Troubleshooting and common pitfalls when relevant. -->
<!-- Enable this optional section by removing the outer HTML comment markers from this
segment when you want agents to produce and consult workflow output in `process/`.
# Agent Output In `process/`
The `process/` directory is primarily for agent output created during
delivery, maintenance, and review work.
Recommended location:
```text
process/
```
Agent rules for process output:
- Use `process/` for agent-produced artifacts rather than released-product
documentation.
- Keep entries concise, traceable, and tied to resulting changes.
- Treat `process/` as workflow output, not as the primary source of product
truth.
- Prefer summaries and rationale over raw transcript dumps unless a workflow
explicitly requires full prompt history.
# Agent Change Log
Location:
```text
process/log.md
```
Use for:
- Capturing prompts given to agents.
- Recording concise explanations of the resulting changes made by agents.
- Preserving task-by-task rationale, decisions, and implementation notes.
# Coding Handbook
Location:
```text
process/coding-handbook.md
```
Use for:
- A tutorial-style handbook that explains the programming components used in
the project.
- Compact but comprehensive technical onboarding material for future
contributors.
- Written explanations that connect code structure, concepts, and
implementation patterns. -->
# Project-Specific Requirements
Project-specific material should live outside the generic sections above.
Recommended location:
```text
requirements/
```
Examples:
- `requirements/project.md`
- `requirements/architecture.md`
- `requirements/decisions.md`
- `requirements/domain.md`
Use for:
- Product and business requirements.
- Project goals and constraints.
- Architecture and design decisions.
- Domain knowledge that is specific to this repository.
# Agent-Level Variables
When present, `requirements/identifiers.yml` is an optional project-specific
input that defines agent-level variables for use inside `requirements/` and
`guidance/`.
Variable schema:
- Use `@{VARIABLE_NAME}` for agent-level variables.
- Prefer uppercase snake case names such as `@{PROJECT_ID}` or `@{VENDOR_ID}`.
- Do not treat `${...}` as an agent-level variable form; that syntax may appear
in Bash or other code and should not be interpreted as agent metadata.
Scope:
- The effective scope of `requirements/identifiers.yml` is limited to
`requirements/` and `guidance/`.
- Definitions from `requirements/identifiers.yml` must not leak into product code.
Defaults:
- Default `@{VENDOR_ID}` is `osgw`.
- Default `@{PROJECT_ID}` is the current repository directory name.
Resolution rules:
- Treat `requirements/identifiers.yml` as optional; when it is absent, agents
may still resolve the defaults defined above.
- If a variable is used in `requirements/` or `guidance/` and it is not
defined in `requirements/identifiers.yml` and does not have a default in this
file, agents may stop and report the undefined variable.
- Prefer updating duplicated identifier values in `requirements/` and
`guidance/` to use the variable schema when that improves consistency.
# Precedence
Some precedence levels may be absent because optional levels can remain inside
HTML comments. The smaller numeric index wins.
Apply guidance in this order:
1. Direct user or task instructions.
2. Project-specific documents in `requirements/`.
<!-- 3. Released-product documentation in `docs/` when shipped behavior or
user-facing expectations are relevant. -->
4. Relevant modular guides in `guidance/stacks/`, `guidance/conventions/`, or `guidance/workflows/`.
<!-- 5. Agent output in `process/` when prior prompts, rationale, or
implementation notes are relevant. -->
6. This `AGENTS.md`.
# Maintenance
- Keep this file short and stable.
- Move detail into dedicated modules when a section becomes too specific or too long.
- Add new guideline files only when they solve a recurring need.
- Remove outdated references when the repository structure changes.
# Current Status
This repository defines the base `AGENTS.md` structure plus project-specific
requirements and modular guidance.
Future project work can add:
- Reusable modules under `guidance/`
- Project-specific documentation under `requirements/`
- Optional temporary iteration context in `SCRATCHPAD.md`
- Optional released-product documentation under `docs/` by uncommenting its segment
- Optional agent output under `process/` by uncommenting its segment
- Cross-references from this file once those documents exist

129
README.md
View File

@@ -1,48 +1,135 @@
# FFX
FFX is a local CLI and Textual TUI for inspecting TV episode files, storing normalization rules in SQLite, and converting outputs into a predictable stream, metadata, and filename layout.
## Requirements
- Linux-like environment
- `python3`
- `ffmpeg`
- `ffprobe`
- `cpulimit`
## Installation
per https:
FFX uses a two-step local setup flow.
### 1. Install The Bundle
This step creates or reuses the persistent bundle virtualenv in `~/.local/share/ffx.venv`, installs FFX into it, and ensures `ffx` is exposed through a shell alias.
```sh
pip install https://<URL>/<Releaser>/ffx.git@<Branch>
bash tools/setup.sh
```
per git:
If you also want the Python packages needed for the modern test suite:
```sh
pip install git+ssh://<Username>@<URL>/<Releaser>/ffx.git@<Branch>
bash tools/setup.sh --with-tests
```
## Version history
You can verify the bundle state without changing anything:
### 0.1.1
```sh
bash tools/setup.sh --check
```
Bugfixes, TMBD identify shows
### 2. Prepare System Dependencies And Local User Files
### 0.1.2
This step installs or verifies workstation dependencies and seeds local config and data directories. It is the step wrapped by the CLI command `ffx configure_workstation`.
Bugfixes
Run it directly:
### 0.1.3
```sh
bash tools/configure_workstation.sh
```
Subtitle file imports
Or through the installed CLI:
### 0.2.0
```sh
ffx configure_workstation
```
Tests, Config-File
Check-only mode is available in both forms:
### 0.2.1
```sh
bash tools/configure_workstation.sh --check
ffx configure_workstation --check
```
Signature, Tags cleaning, Bugfixes, Refactoring
`tools/configure_workstation.sh` does not manage the bundle virtualenv. Python-side test packages belong to `tools/setup.sh --with-tests`.
### 0.2.2
## Basic Usage
CLI-Overrides
Examples:
```sh
ffx version
ffx inspect /path/to/episode.mkv
ffx convert /path/to/episode.mkv
ffx shows
```
## Modern Tests
Install Python test packages first:
```sh
bash tools/setup.sh --with-tests
```
Then run the modern automatically discovered test suite:
```sh
./tools/test.sh
```
This runner uses `pytest` and intentionally excludes the legacy harness under `tests/legacy/`.
## Default Local Paths
- Config: `~/.local/etc/ffx.json`
- Database: `~/.local/var/ffx/ffx.db`
- Log file: `~/.local/var/log/ffx.log`
- Bundle venv: `~/.local/share/ffx.venv`
## TMDB
TMDB-backed metadata enrichment requires `TMDB_API_KEY` to be set in the environment.
## Version History
### 0.2.3
PyPi packaging
Templating output filename
Season shiftung
DB-Versionierung
- PyPI packaging
- output filename templating
- season shifting
- DB versioning
### 0.2.2
- CLI overrides
### 0.2.1
- signature handling
- tag cleanup
- bugfixes and refactoring
### 0.2.0
- tests
- config file
### 0.1.3
- subtitle file imports
### 0.1.2
- bugfixes
### 0.1.1
- bugfixes
- TMDB show identification

89
SCRATCHPAD.md Normal file
View File

@@ -0,0 +1,89 @@
# Scratchpad
## Goal
- Capture a compact, project-wide list of optimization candidates after a broad scan of the current FFX codebase, tooling, and requirements.
## Settled
- The biggest near-term wins are in startup cost, repeated subprocess work, repeated database query patterns, and general repo hygiene.
- This list is intentionally optimization-oriented rather than bug-oriented. Some items below also improve correctness or maintainability, but they were selected because they can reduce runtime cost, operator friction, or iteration overhead.
- A first modern integration slice now exists under [`tests/integration/subtrack_mapping`](/home/osgw/.local/src/codex/ffx/tests/integration/subtrack_mapping). Remaining test-suite cleanup is now mostly about migrating and shrinking the legacy harness surface under [`tests/legacy`](/home/osgw/.local/src/codex/ffx/tests/legacy).
- The CLI root now lazy-loads heavy runtime dependencies so lightweight commands such as `version`, `help`, `setup`, `configure_workstation`, and `upgrade` stay import-light.
- Shared CLI defaults for container/output tokens now live outside [`src/ffx/ffx_controller.py`](/home/osgw/.local/src/codex/ffx/src/ffx/ffx_controller.py), and a focused unit test locks in the lazy-import contract.
- `FileProperties` now uses one cached `ffprobe -show_format -show_streams -of json` call per source file, and the combined payload was confirmed against the Dragonball asset to satisfy both previous probe call sites fully.
- Crop detection now uses configurable sampling windows plus per-process caching keyed by source file and sampling range, and the `cropdetect` CLI command now calls the real `FileProperties.findCropArguments()` path.
- Database startup now bootstraps schema only when required tables are actually missing, while version enforcement still runs on ordinary DB-backed context creation.
- Helper filename and rich-text utilities now use compiled raw regexes plus translate-based filename filtering, with unit coverage for TMDB suffix rewriting and Rich color stripping.
- Process resource limiting now has explicit disabled/default states in the CLI and requirements, and combined CPU-plus-niceness wrapping now executes as `cpulimit -- nice -n ... <command>` instead of a less explicit prefix chain.
- FFX logger setup now reuses named handlers, and fallback logger access no longer mutates handlers in ordinary constructors and helpers.
- The process wrapper now uses `subprocess.run(...)` with centralized command formatting plus stable timeout and missing-command error mapping.
- Active ORM controllers now use single-query accessors instead of paired `count()` plus `first()` lookups.
- Pattern matching now uses cached compiled regexes plus explicit duplicate-match errors, and pattern creation flows no longer persist zero-track patterns.
- The two-step local setup flow now has aligned CLI wrappers for both phases: `ffx setup` for bundle prep and `ffx configure_workstation` for workstation prep, while the shell scripts remain the bootstrap entrypoints before the bundle exists.
- The large detail screens now share one screen-bootstrap helper for context, metadata-filter extraction, and controller wiring, and show-pattern loading now goes through `PatternController` instead of a screen-local session query.
## Focused Snapshot
- Highest-leverage application optimizations:
- Decide whether placeholder help/settings screens should ship or disappear.
- Trim dead helpers and other dormant surface that still looks active.
- Highest-leverage repo and workflow optimizations:
- Continue migrating the oversized legacy test/combinator surface into focused modern tests so it is easier to run, debug, and extend.
## Optimization Candidates
1. Placeholder UI surfaces should either ship or disappear
- [`src/ffx/help_screen.py`](/home/osgw/.local/src/codex/ffx/src/ffx/help_screen.py) and [`src/ffx/settings_screen.py`](/home/osgw/.local/src/codex/ffx/src/ffx/settings_screen.py) are placeholders.
- Optimization:
- Either remove them from the active UI surface or complete them.
- Avoid paying ongoing maintenance cost for unfinished navigation targets.
- Expected value:
- Leaner interface.
- Lower UX ambiguity.
2. Several helper functions are unfinished or dead-weight
- [`src/ffx/helper.py`](/home/osgw/.local/src/codex/ffx/src/ffx/helper.py) contains `permutateList(...): pass`.
- There are many combinator and conversion placeholders across tests and migrations.
- Optimization:
- Remove dead code, finish it, or isolate it behind a clearly dormant area.
- Avoid carrying stubbed utility surface that looks reusable but is not.
- Expected value:
- Smaller mental model.
- Less time spent re-evaluating inactive paths.
3. Test suite shape is expensive to understand and likely expensive to run
- The project still carries a large legacy matrix of combinator files under [`tests/legacy`](/home/osgw/.local/src/codex/ffx/tests/legacy), several placeholder `pass` implementations, and at least one suspicious filename with an embedded space: [`tests/legacy/disposition_combinator_2_3 .py`](/home/osgw/.local/src/codex/ffx/tests/legacy/disposition_combinator_2_3 .py).
- A first focused replacement slice now exists in [`tests/integration/subtrack_mapping/test_cli_bundle.py`](/home/osgw/.local/src/codex/ffx/tests/integration/subtrack_mapping/test_cli_bundle.py), so the remaining work is migration and consolidation rather than creating the modern test shape from scratch.
- Optimization:
- Continue replacing broad combinator matrices with focused parametrized integration and unit tests.
- Retire the bespoke legacy discovery and runner path once equivalent coverage exists.
- Normalize file naming and test discovery conventions.
- Expected value:
- Faster contributor onboarding.
- Easier CI adoption later.
## Open
- Should optimization work focus first on operator-perceived latency, internal maintainability, or correctness-risk cleanup that also has performance upside?
- Is the long-term supported model still “local Linux workstation plus Textual UI,” or should optimization decisions bias toward a more scriptable/headless CLI?
## Gaps Right Now
- No explicit prioritization owner or milestone for the optimization backlog.
- No benchmark or timing harness exists for startup, probe, DB, or conversion orchestration overhead.
- Repo hygiene is still mixed with generated artifacts and some clearly unfinished files.
- The legacy TMDB-backed `Scenario 4` path is currently blocked by a pattern/track regression: `Patterns must define at least one track before they can be stored.` This surfaced while rerunning TMDB-dependent checks after the zero-track pattern hardening.
## Next
1. Triage the list into quick wins, medium refactors, and long-horizon cleanup.
2. Tackle the cheapest remaining product-surface cleanup first:
- placeholder UI surfaces and dead helper cleanup.
3. Continue replacing oversized legacy test matrices with focused modern integration and unit coverage.
4. Triage the legacy `Scenario 4` pattern/track failure and decide whether to fix the harness, adapt it to the zero-track guard, or retire that path during the ongoing test-suite migration.
## Delete When
- Delete this scratchpad once the optimization backlog is either converted into issues/work items or distilled into durable project guidance.

View File

@@ -0,0 +1,28 @@
# Lean Interface Iteration
Rule set name: `lean-interface-iteration`
Rule set ID: `LII`
Status: optional, prompt-activated only
Trigger examples:
- `Apply the lean-interface-iteration rules.`
- `Apply LII rules.`
LII-0001: Apply this rule set only when it is explicitly requested in the prompt.
LII-0002: The target of work under this rule set is the iterated product state for the addressed iteration only.
LII-0003: Optimize the addressed interface toward the leanest and least complex model that still satisfies the iteration order.
LII-0004: Backward compatibility, legacy aliases, and compatibility shims are not required unless the prompt explicitly asks to preserve them.
LII-0005: Prefer one authoritative interface over multiple overlapping parameters, flags, or naming variants.
LII-0006: Remove or avoid transitional interface layers when they are not required by the addressed iteration order.
LII-0007: Update affected tests, guidance, requirements, and documentation so they describe the simplified interface model rather than a mixed legacy-and-new model.
LII-0008: Never change behavior, interfaces, or surrounding areas that are not addressed by the current iteration order.

View File

@@ -0,0 +1,56 @@
# Preparation Script Design
Rule set name: `preparation-script-design`
Rule set ID: `PSD`
Status: optional, prompt-activated only
Trigger examples:
- `Apply the preparation-script-design rules.`
- `Apply PSD rules.`
PSD-0001: Apply this rule set only when it is explicitly requested in the prompt.
PSD-0002: Use this rule set for scripts whose purpose is to prepare, verify, or expose a local development or automation environment rather than to perform product runtime behavior.
PSD-0003: Keep a preparation script focused on environment readiness, dependency installation, local helper exposure, and clear verification output; do not mix unrelated product logic into the script.
PSD-0004: Design the script to be idempotent so repeated runs converge on the same prepared state without unnecessary reinstallation or destructive side effects.
PSD-0005: Provide a verification-only mode such as `--check` that reports readiness without installing, modifying, or creating dependencies.
PSD-0006: Separate component checks from installation steps so the script can report what is missing before or after attempted remediation.
PSD-0007: Group required capabilities into clear purpose-oriented sections such as support toolchains, local package bundles, generated environment helpers, or other relevant readiness areas instead of presenting one undifferentiated dependency list.
PSD-0008: Prefer explicit per-component check helpers over opaque one-shot checks so failures remain traceable and easy to extend.
PSD-0009: Generate or update environment helper files only when they provide a stable, reusable way to expose repo-local or workspace-local tools, paths, or environment variables.
PSD-0010: Generated environment helper files shall be safe to source multiple times and should avoid duplicating path entries or clobbering unrelated user environment state.
PSD-0011: When a preparation flow seeds optional user-owned files such as config templates, do so non-destructively by creating them only when absent unless the prompt explicitly requests overwrite behavior.
PSD-0012: Report status in a concise scan-friendly line format of the shape `[status] Label: detail`, where the label names the checked component and the detail string stays short and specific.
PSD-0013: Prefer a small canonical status vocabulary in those report lines, with `ok` for satisfied checks, `warn` for non-blocking gaps, and a failure status such as `failed` for blocking or unsuccessful states.
PSD-0014: When a preparation script uses terminal colors in its status output, apply a consistent severity mapping so `ok` is green, `warn` is yellow, and all other status levels are red.
PSD-0015: In bracketed status markers such as `[ok]` or `[warn]`, keep the square brackets uncolored and apply the severity color only to the inner status text.
PSD-0016: Colorized status output shall degrade safely in non-terminal or non-color contexts so the script remains readable and automation-friendly without ANSI support.
PSD-0017: End with an explicit readiness conclusion that distinguishes between successful preparation, incomplete prerequisites, and failed installation attempts.
PSD-0018: Installation logic should use the narrowest supported platform-specific package-manager actions necessary for the declared scope and should fail clearly when no supported installation path is available.
PSD-0019: Treat repo-local helper tooling and local package installation boundaries explicitly rather than assuming global installs, especially when the prepared environment is intended to be reproducible.
PSD-0020: Keep the script suitable for both interactive local developer use and non-interactive automation checks by avoiding prompts during normal execution unless the prompt explicitly requires interactivity.
PSD-0021: When a script depends on generated helper files or adjacent validation helpers, update those supporting files only as needed to keep the preparation flow coherent and usable.
PSD-0022: Verify shell syntax after changes and, when feasible, run a dry readiness check so the resulting preparation flow is validated rather than only written.

View File

@@ -27,6 +27,11 @@ Homepage = "https://gitea.maveno.de/Javanaut/ffx"
Repository = "https://gitea.maveno.de/Javanaut/ffx.git"
Issues = "https://gitea.maveno.de/Javanaut/ffx/issues"
[project.optional-dependencies]
test = [
"pytest",
]
[build-system]
requires = [
"setuptools",
@@ -35,4 +40,15 @@ requires = [
build-backend = "setuptools.build_meta"
[project.scripts]
ffx = "ffx.ffx:ffx"
ffx = "ffx.cli:ffx"
[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py"]
norecursedirs = ["tests/legacy", "tests/support"]
addopts = "-ra"
markers = [
"integration: exercises the FFX bundle with real ffmpeg/ffprobe processes",
"pattern_management: covers requirements/pattern_management.md",
"subtrack_mapping: covers requirements/subtrack_mapping.md",
]

View File

@@ -0,0 +1,97 @@
# Architecture
## Architecture Goals
- Keep the tool small, local, and easy to reason about.
- Separate media inspection, stored normalization rules, and conversion execution clearly enough that users can inspect and adjust behavior.
- Favor explicit local state and deterministic rule application over opaque automation.
- Make external runtime dependencies and platform assumptions visible.
## System Context
- Primary actors:
- Local operator running the CLI.
- Local operator using the Textual TUI to inspect files and maintain rules.
- External systems:
- `ffprobe` for media introspection.
- `ffmpeg` for conversion and extraction.
- TMDB API for optional show and episode metadata.
- Local filesystem for source media, generated outputs, subtitles, logs, config, and database files.
- Data entering the system:
- Media container and stream metadata from source files.
- Regex patterns and per-show normalization rules entered in the TUI.
- Optional config values from `~/.local/etc/ffx.json`.
- Optional TMDB identifiers and CLI overrides.
- Optional external subtitle files.
- Data leaving the system:
- Normalized output media files.
- Extracted stream files from unmux operations.
- SQLite rows representing shows, patterns, tracks, tags, shifted seasons, and properties.
- Local log output and console messages.
## High-Level Building Blocks
- Frontend, CLI, API, or worker:
- A Click-based CLI in [`src/ffx/cli.py`](/home/osgw/.local/src/codex/ffx/src/ffx/cli.py), exposed as the `ffx` command and via `python -m ffx`, including lightweight maintenance wrappers for bundle setup, workstation preparation, and upgrade tasks.
- A Textual terminal UI rooted in [`src/ffx/ffx_app.py`](/home/osgw/.local/src/codex/ffx/src/ffx/ffx_app.py) with screens for shows, patterns, file inspection, tracks, tags, and shifted seasons.
- Core business logic:
- Descriptor objects model media files, shows, and tracks.
- Controllers encapsulate CRUD operations and workflow orchestration for shows, patterns, tags, tracks, season shifts, configuration, and conversion.
- `MediaDescriptorChangeSet` computes differences between a file and its stored target schema to drive metadata and disposition updates.
- File inspection caches combined `ffprobe` data and crop-detection results per source and sampling window within one process to avoid repeated subprocess work.
- Storage:
- SQLite via SQLAlchemy ORM, with schema rooted in shows, patterns, tracks, media tags, track tags, shifted seasons, and generic properties.
- A configuration JSON file supplies optional path, metadata-filtering, and filename-template settings.
- Integration adapters:
- Process execution wrapper for `ffmpeg`, `ffprobe`, `nice`, and `cpulimit`, with explicit disabled states for niceness and CPU limiting, support for both absolute `cpulimit` values and machine-wide percent input, and a combined `cpulimit -- nice -n ... <command>` execution shape when both limits are configured.
- HTTP adapter for TMDB via `requests`.
## Data And Interface Notes
- Key entities or records:
- `Show`: canonical TV show metadata plus digit-formatting rules for generated filenames.
- `Pattern`: regex rule tying filenames to one show and one target media schema.
- `Track` and `TrackTag`: persisted target stream records, codec, dispositions, audio layout, and stream-level tags. Detailed source-to-target mapping rules live in `requirements/subtrack_mapping.md`.
- `MediaTag`: persisted container-level metadata for a pattern.
- `ShiftedSeason`: mapping from source numbering ranges to adjusted season and episode numbers.
- `Property`: internal key-value storage currently used for database versioning.
- External interfaces:
- CLI commands for conversion, inspection, extraction, and crop detection.
- TUI workflows for rule authoring and rule maintenance.
- Environment variable `TMDB_API_KEY` for TMDB access.
- Config keys `databasePath`, `logDirectory`, and `outputFilenameTemplate`, plus optional metadata-filter rules.
- Validation rules:
- Only supported media-file extensions are accepted for conversion.
- Stored database version must match the runtime-required version.
- A normalized descriptor may have at most one default and one forced stream per relevant track type.
- Shifted-season ranges are intended not to overlap for the same show and season.
- TMDB lookups require a show ID and season and episode numbers.
- Error-handling approach:
- User-facing operational failures are raised as `click.ClickException` or warnings.
- Ambiguous default and forced stream states trigger prompts unless `--no-prompt` is set, in which case the command fails fast.
- External-process failures and invalid media are surfaced through logs and command errors rather than retries, except for TMDB rate-limit retries.
## Deployment And Operations
- Runtime environment:
- Local Python environment with the package installed and `ffmpeg`, `ffprobe`, `nice`, and `cpulimit` available on `PATH`.
- Deployment shape:
- Single-process command execution on demand; no daemon, queue, or network service of its own.
- Secrets and configuration handling:
- TMDB secret is read from `TMDB_API_KEY`.
- User config is read from `~/.local/etc/ffx.json`.
- Database path may also be overridden per command via `--database-file`.
- Logging and monitoring approach:
- File and console logging configured per invocation.
- Default log file path is `~/.local/var/log/ffx.log`.
- No dedicated monitoring integration is present.
## Open Technical Questions
- Question: Should Linux-specific assumptions such as `/dev/null`, `nice`, `cpulimit`, and `~/.local` remain part of the supported-platform contract?
- Risk: Portability and operational behavior are underspecified for non-Linux environments.
- Next decision needed: Either document Linux-like systems as the official support boundary or refactor the process and path handling for broader portability.
- Question: Should placeholder TUI surfaces such as settings and help become part of the required product surface or stay explicitly out of scope?
- Risk: The UI appears broader than the actually finished feature set.
- Next decision needed: Either remove or complete placeholder screens and update requirements accordingly.

View File

@@ -0,0 +1,68 @@
# Pattern Management
This file defines the behavioral contract for managing shows, patterns, and
pattern-backed filename matching.
Primary source: actual tool code in `src/ffx/`.
Secondary source: operator intent captured in task discussion.
## Scope
- The show, pattern, and track hierarchy stored in SQLite.
- The role of a pattern as a reusable normalization definition for related media files.
- Filename-driven assignment of a scanned media file to one show through one matching pattern.
- Duplicate-match handling when more than one pattern matches the same filename.
## Terms
- `show`: logical series identity such as one TV show entry in the database.
- `pattern`: regex-backed normalization definition attached to one show.
- `track`: one persisted target-track definition attached to one pattern.
- `scanned media file`: one source file currently being inspected or converted.
- `duplicate pattern match`: a filename state where more than one stored pattern matches the same scanned media file.
- `pattern-backed target schema`: the combination of one pattern's stored media tags and stored track definitions.
## Rules
- `PATTERN_MANAGEMENT-0001`: The domain model shall treat a show as the parent entity for patterns that describe distinct release families or normalization schemas for that show. A show may temporarily exist without patterns during editing or initial TUI creation.
- `PATTERN_MANAGEMENT-0002`: Each persisted pattern shall belong to exactly one show.
- `PATTERN_MANAGEMENT-0003`: The domain model shall treat a pattern as the reusable normalization definition for a series of media files expected to share the same internal track layout and materially similar stream and container metadata.
- `PATTERN_MANAGEMENT-0004`: Each persisted track definition shall belong to exactly one pattern.
- `PATTERN_MANAGEMENT-0005`: A pattern may also carry pattern-level media tags. The pattern's media tags plus its track definitions together form the pattern-backed target schema.
- `PATTERN_MANAGEMENT-0006`: A scanned media file shall resolve to at most one pattern and therefore at most one show.
- `PATTERN_MANAGEMENT-0007`: If no pattern matches a filename, the file shall remain unmatched rather than being assigned implicitly.
- `PATTERN_MANAGEMENT-0008`: If more than one pattern matches the same filename, the system shall raise a duplicate pattern match error instead of silently selecting one.
- `PATTERN_MANAGEMENT-0009`: Duplicate-match detection shall apply regardless of whether the competing patterns belong to the same show or to different shows.
- `PATTERN_MANAGEMENT-0010`: Exact duplicate pattern definitions for the same show should not create multiple persisted pattern rows.
- `PATTERN_MANAGEMENT-0011`: A persisted pattern shall define one or more tracks. Creating or retaining a zero-track pattern in the database is invalid managed state and shall be prohibited.
- `PATTERN_MANAGEMENT-0012`: A show may exist without patterns as an intermediate editing state, for example when a user creates the show first in the TUI and adds patterns later.
- `PATTERN_MANAGEMENT-0013`: Operator-facing pattern management should expose the owning show, regex pattern, stored track set, and stored media-tag set so a user can reason about matching and normalization behavior.
- `PATTERN_MANAGEMENT-0014`: Matching semantics shall be deterministic and documented. Implicit "last matching pattern wins" behavior is not acceptable released behavior.
## Acceptance
- A filename that matches exactly one pattern yields one matched pattern and one show identity.
- A filename that matches no pattern yields no matched pattern and an unmatched state.
- A filename that matches more than one pattern yields an explicit duplicate-match error.
- A pattern-backed target schema can be reconstructed from one pattern's stored media tags and stored track definitions.
- A show may be stored before any patterns are attached to it.
- A pattern cannot be stored or retained as a valid managed pattern unless at least one track is defined for it.
- Pattern-backed conversion never proceeds with two competing matching patterns for the same input filename.
## Current Code Fit
- `src/ffx/model/show.py` implements a one-to-many `Show -> Pattern` relationship.
- `src/ffx/model/pattern.py` implements `Pattern.show_id`, a one-to-many `Pattern -> Track` relationship, a one-to-many `Pattern -> MediaTag` relationship, and a unique `(show_id, pattern)` constraint for freshly created databases.
- `src/ffx/model/track.py` implements `Track.pattern_id`, so each persisted track belongs to one pattern.
- `src/ffx/model/pattern.py` reconstructs a pattern-backed target schema through `Pattern.getMediaDescriptor(...)`, combining stored media tags and stored tracks.
- `src/ffx/file_properties.py` assumes a scanned file resolves to at most one pattern, because it stores only one `self.__pattern` and derives one `show_id` from it.
- `src/ffx/pattern_controller.py` prevents exact duplicate `(show_id, pattern)` definitions during create and update flows, and it refreshes cached compiled regexes when stored pattern expressions change.
- `src/ffx/pattern_controller.py` now complies with duplicate-match safety. `matchFilename(...)` scans deterministically, returns exactly one match, returns `{}` for no match, and raises an explicit duplicate-pattern-match error when more than one pattern matches the same filename.
- The current persistence layer already aligns with the intended empty-show workflow because a show can exist without patterns.
- New pattern creation and schema replacement flows now require at least one track, and `TrackController.deleteTrack(...)` prevents deleting the last persisted track from a pattern.
- Trackless legacy rows can still exist in preexisting databases, but matching now rejects them explicitly instead of letting them participate silently.
## Risks
- The intended "release family" meaning of a pattern is a domain assumption, not something the code verifies automatically across all files matching that pattern.
- Preexisting databases created before the newer validation rules may still contain invalid rows, so upgrade and cleanup paths should continue to treat explicit validation failures as recoverable operator signals.

116
requirements/project.md Normal file
View File

@@ -0,0 +1,116 @@
## Purpose And Scope
- Project name: FFX
- User problem: TV episode files from mixed sources arrive with inconsistent codecs, stream metadata, subtitle layouts, season and episode numbering, and output filenames, which makes them awkward to archive and use in media-player applications.
- Target users: Individual operators curating a local TV media library on a workstation, especially users willing to define normalization rules per show.
- Success outcome: A user can inspect source files, define reusable show and pattern rules, and produce output files whose streams, metadata, and filenames follow a predictable schema for web playback and library import.
- Out of scope:
- Multi-user or hosted service workflows.
- General movie-library management.
- Distributed transcoding or remote job orchestration.
- Broad media-server administration beyond file preparation.
## Required Product
- Deliverable type: Installable Python command-line application with a Textual terminal UI for inspection and rule editing.
- Core capabilities:
- Maintain an SQLite-backed database of shows, filename-matching patterns, per-pattern stream layouts and metadata tags, and optional season-shift rules.
- Inspect existing media files through `ffprobe` and compare discovered stream metadata with stored normalization rules.
- Convert media files through `ffmpeg` into a normalized output layout, including video recoding, audio transcoding to Opus, metadata cleanup and rewrite, and controlled disposition flags.
- Build output filenames from detected or configured show, season, and episode information, optionally enriched from TMDB and a configurable Jinja-style filename template.
- Support auxiliary file operations such as subtitle import, unmuxing, crop detection, and rename-only runs.
- Supported environments:
- Local execution on a Python-capable workstation.
- Best-supported on Linux-like systems because the implementation assumes `~/.local`, `/dev/null`, `nice`, and `cpulimit`.
- Requires `ffmpeg`, `ffprobe`, and `cpulimit` on `PATH`.
- Operational owner: The local user running the tool and maintaining its config, database, and external tooling.
## Suggested User Stories
- As a library maintainer, I want to define show-specific matching rules once so that future source files can be normalized automatically.
- As an operator, I want to inspect a file before conversion so that I can compare its actual streams and tags against the stored target schema.
- As a user preparing web-playback files, I want to recode video and audio with a small set of predictable options so that results are compatible and consistently named.
- As a user dealing with nonstandard releases, I want CLI overrides for language, title, stream order, default and forced tracks, and season and episode data so that one-off fixes do not require database edits first.
- As a user importing anime or other shifted numbering schemes, I want season and episode offsets per show so that generated filenames align with TMDB and media-library expectations.
## Functional Requirements
- The system shall provide a CLI entrypoint named `ffx` with commands for `convert`, `inspect`, `shows`, `unmux`, `cropdetect`, `setup`, `configure_workstation`, `upgrade`, `version`, and `help`.
- The system shall support a two-step local installation and preparation flow:
- `tools/setup.sh` is the bootstrap entrypoint for the first step and shall own bundle virtualenv creation, package installation, shell alias exposure, and optional Python test-package installation.
- `tools/configure_workstation.sh` is the bootstrap entrypoint for the second step and shall own workstation dependency checks and installation plus local config and directory seeding.
- After the bundle is installed, `ffx setup` and `ffx configure_workstation` shall remain aligned wrapper entrypoints for those same two steps.
- The CLI command `ffx setup` shall act as a wrapper for the first-step bundle-preparation flow in `tools/setup.sh`.
- The CLI command `ffx configure_workstation` shall act as a wrapper for the second-step preparation flow in `tools/configure_workstation.sh`.
- The system shall persist reusable normalization rules in SQLite for:
- shows and show formatting digits,
- regex-based filename patterns,
- per-pattern media tags,
- per-pattern stream definitions,
- shifted-season mappings,
- internal database version properties.
- Detailed show, pattern, and duplicate-match management rules live in `requirements/pattern_management.md`.
- The system shall inspect source media using `ffprobe` and derive a structured description of container metadata and streams.
- The system shall optionally open a Textual UI to browse shows, inspect files, and create, edit, or delete shows, patterns, stream definitions, tags, and shifted-season rules.
- The system shall match filenames against stored regex patterns to decide whether an input file should inherit a target stream and metadata schema.
- The system shall convert supported input files (`mkv`, `mp4`, `avi`, `flv`, `webm`) with `ffmpeg`, supporting at least:
- VP9, AV1, and H.264 video encoding,
- Opus audio encoding with bitrate selection based on channel layout,
- metadata and disposition rewriting,
- optional crop detection and crop application,
- optional deinterlacing and denoising,
- optional subtitle import from external files,
- rename-only move mode.
- The system shall support optional TMDB lookups to resolve show names, years, and episode titles when a show ID, season, and episode are available.
- The system shall generate output filenames from show metadata, season and episode indices, and episode names using the configured filename template.
- The system shall allow CLI overrides for stream languages, stream titles, default and forced tracks, stream order, TMDB show and episode data, output directory, label prefix, and processing resource limits.
- Processing resource limit rules:
- `--nice` shall accept niceness values from `-20` through `19`; omitting the option shall disable niceness adjustment.
- `--cpu` shall accept either a positive absolute `cpulimit` value such as `200`, or a percentage suffixed with `%` such as `25%` to represent a share of present CPUs; omitting the option or using `0` shall disable CPU limiting.
- When both limits are configured, the process wrapper shall execute the target command through `cpulimit` around a `nice -n ...` invocation so both limits apply to the launched media command.
- The system shall support extracting streams into separate files via `unmux` and reporting suggested crop parameters via `cropdetect`.
- Crop detection shall use a configurable sampling window, defaulting to a 60-second seek and a 180-second analysis duration, and repeated crop-detection requests for the same source plus sampling window shall reuse cached results within one process.
- The system shall handle invalid input and system failures gracefully by logging warnings or raising `click` errors for missing files, invalid media, missing TMDB credentials, incompatible database versions, and ambiguous track dispositions when prompting is disabled.
## Quality Requirements
- The system should stay understandable as a small local tool: controllers, descriptors, models, and screens should remain separate enough for contributors to trace a workflow end to end.
- The system should produce predictable output for the same database rules, CLI overrides, and source files.
- The system should preserve a lightweight operational footprint: local SQLite state, local log file, no mandatory background services.
- The system should be testable through modern automatically discovered tests and through remaining legacy harness coverage during migration.
- The system should expose enough logging to diagnose failed probes, failed conversions, and rule mismatches without requiring a debugger.
## Constraints And Assumptions
- Technology constraints:
- Python package built with setuptools.
- Primary libraries: `click`, `textual`, `sqlalchemy`, `jinja2`, `requests`.
- Conversion and inspection rely on external executables rather than pure-Python media libraries.
- Hosting or infrastructure constraints:
- Intended for local execution, not server deployment.
- Stores default state in `~/.local/etc/ffx.json`, `~/.local/var/ffx/ffx.db`, and `~/.local/var/log/ffx.log`.
- Timeline constraints:
- The current implemented scope reflects a compact alpha release stream up to version `0.2.3`.
- Team capacity assumptions:
- Maintained as a small codebase where simple patterns and direct controller logic are preferred over framework-heavy abstractions.
- Third-party dependencies:
- `ffmpeg`, `ffprobe`, and `cpulimit`.
- TMDB API access through `TMDB_API_KEY` for metadata enrichment.
- Installation assumptions:
- The Python-side bundle install step and optional Python test extras are managed by `tools/setup.sh`, with `ffx setup` as the aligned wrapper after bootstrap.
- The workstation-preparation step is managed separately by `tools/configure_workstation.sh` or `ffx configure_workstation`.
## Acceptance Scope
- First release boundary:
- Local installation through `pip`.
- Working SQLite-backed rule storage.
- Functional CLI conversion and inspection workflows.
- Textual CRUD flows for shows, patterns, tags, tracks, and shifted seasons.
- TMDB-assisted filename generation, subtitle import, season shifting, database versioning, and configurable output filename templating.
- Excluded follow-up ideas:
- Completing placeholder screens such as settings and help.
- Hardening platform portability beyond Linux-like systems.
- Broader media types, richer release packaging, and production-grade background processing.
- Demonstration scenario:
- Inspect a TV episode file, define or update the matching show and pattern in the TUI, then run `ffx convert` so the result uses the stored stream schema, optional TMDB episode naming, and a normalized output filename.

View File

@@ -0,0 +1,74 @@
# Subtrack Mapping
This file defines the behavioral contract for mapping input subtracks to output
subtracks during conversion.
Primary source: actual tool code in `src/ffx/`.
Secondary source: `tests/legacy/`, used only to clarify intent and reveal gaps.
## Scope
- Ensuring each target subtrack is created from the corresponding source-subtrack information, including stream-level metadata.
- Mapping input streams to output streams during conversion.
- Using persisted pattern-track definitions from the database as the target schema.
- Allowing omission and reordering of retained tracks.
- Keeping stream-level metadata attached to the correct source-derived logical track after remapping.
- Normalizing target output into ordered track groups: video, audio, subtitle, then special types such as fonts or images.
## Terms
- `source_index`: identity of the originating input stream from ffprobe or an imported source descriptor.
- `index`: final output-track order across all retained tracks.
- `sub_index`: per-type position within the retained tracks of one type, for example audio stream `0` or subtitle stream `1`.
- `target schema`: stored or constructed output-track definition that decides which tracks are kept, omitted, reordered, and rewritten.
- `separate source file`: additional file bound to one target track slot whose media payload replaces the regular source payload for that slot.
## Rules
- `SUBTRACK_MAPPING-0001`: The system shall represent source-stream identity separately from output order. `source_index`, `index`, and `sub_index` are distinct concepts and shall not be collapsed into one field.
- `SUBTRACK_MAPPING-0002`: The system shall derive `source_index` for probed tracks from the original ffprobe stream index and preserve that identity through conversion planning.
- `SUBTRACK_MAPPING-0003`: Pattern-backed track definitions stored in the database shall persist both target output order and originating source-stream identity.
- `SUBTRACK_MAPPING-0004`: When a filename matches a pattern, the pattern target schema shall be the source of truth for which source tracks are retained, which are omitted, and in what order retained tracks appear in the output.
- `SUBTRACK_MAPPING-0005`: A target track may refer only to an existing source track of the same type. Conversion shall fail fast when a target track refers to a nonexistent source stream or a source stream of a different type.
- `SUBTRACK_MAPPING-0006`: The ffmpeg mapping phase shall be generated from target output order while resolving each retained output track back to its originating source stream via `source_index`.
- `SUBTRACK_MAPPING-0007`: Reordering and omission shall preserve logical track identity. Stream-level metadata, titles, languages, and disposition decisions shall stay attached to the correct source-derived logical track after mapping.
- `SUBTRACK_MAPPING-0008`: The system shall support one-off CLI stream-order overrides without requiring prior database edits.
- `SUBTRACK_MAPPING-0009`: Operator-facing inspection and editing surfaces shall expose enough source-versus-target information to let a user reason about subtrack mapping decisions.
- `SUBTRACK_MAPPING-0010`: Test coverage for subtrack mapping shall assert source-derived identity, omission, and output order explicitly. Final track counts or final type sequences alone are insufficient proof of correct mapping.
- `SUBTRACK_MAPPING-0011`: Retained target tracks shall appear in ordered groups: video track or tracks first, then audio tracks, then subtitle tracks, then special types such as fonts or images. Within each group, the target schema shall define the order.
- `SUBTRACK_MAPPING-0012`: Track omission is valid when required by output compatibility, when needed to normalize source tracks into the required target group order and schema, or when explicitly requested by database rules or CLI options.
- `SUBTRACK_MAPPING-0013`: If source tracks do not already comply with the required target group order, conversion shall reorder retained tracks to match the target ordering contract without losing source-track identity or stream-level metadata lineage.
## Separate Additional Source Files
- `SUBTRACK_MAPPING-0014`: A separate source file may substitute the media payload of one target subtrack without changing that target track's intended output position.
- `SUBTRACK_MAPPING-0015`: When a separate source file is used, the target track shall remain bound to the corresponding logical source track for mapping, validation, and metadata lineage.
- `SUBTRACK_MAPPING-0016`: Metadata for a substituted target track shall be merged from the regular source track and the separate source file when available.
- `SUBTRACK_MAPPING-0017`: If the separate source file provides a metadata field that is also present on the regular source track, the separate source file value shall win in the target output.
- `SUBTRACK_MAPPING-0018`: If a metadata field is absent from the separate source file, the system shall fall back to the corresponding metadata from the regular source track or target schema rewrite rules.
## Acceptance
- Given a source media descriptor and a pattern-backed target schema, the planned output tracks can be listed in final output order and each retained track can still be traced to one originating source stream.
- Planned output order follows grouped target order: video, audio, subtitle, then special types.
- Tracks not referenced by the target schema are omitted from output mapping.
- Tracks may also be omitted when they are incompatible with the chosen output format or explicitly excluded by database or CLI rules.
- Two retained target tracks never originate from the same source stream unless duplication is implemented explicitly as a separate feature.
- If target-track metadata is rewritten after reordering, it is written onto the correct source-derived logical track rather than the track that merely occupies the same final output position.
- Invalid target-to-source references fail deterministically before the conversion job is launched.
- If a separate source file substitutes one target track, that track keeps its target slot and ordering while metadata is merged with separate-file values taking precedence when both sides provide the same field.
- A test proving subtrack mapping must assert at least one of: exact `source_index` to output-order mapping, omission of named source tracks, or preservation of per-track metadata after reorder.
## Test Notes
- `tests/legacy/scenario.py` names pattern behavior as `Filter/Reorder Tracks`.
- `tests/legacy/scenario_4.py` is the strongest end-to-end signal because it runs DB-backed conversion and reapplies source indices before assertion.
- `tests/legacy/track_tag_combinator_2_0.py` and `tests/legacy/track_tag_combinator_3_4.py` sort result tracks by `source_index` before checking tags, which matches the intended identity model.
- Legacy permutation combinators define permutations but their assertion functions are stubs.
- Some legacy scenarios produce `AP` and `SP` selectors but do not execute them.
## Risks
- `src/ffx/media_descriptor.py` contains an explicit `rearrangeTrackDescriptors()` path whose current implementation appears defective and under-tested.
- Separate-source-file metadata precedence is only partly expressed in current implementation paths and should be covered directly in the rewritten test suite.
- Production code expresses the mapping contract more clearly than the legacy harness, so a rewrite should add direct logic-level tests for mapping and reorder planning.

144
requirements/tests.md Normal file
View File

@@ -0,0 +1,144 @@
# Test Rewrite
This file captures the structure executed by `tests/legacy_runner.py` today and
defines the target shape for a complete rewrite.
Detailed product rules for source-to-target subtrack mapping live in
`requirements/subtrack_mapping.md`. This file describes only how tests cover
that area.
## Interpreter Requirement
- Agents shall run Python-side test commands with `~/.local/share/ffx.venv/bin/python`.
- This applies to the legacy harness, `unittest`, `pytest`, helper scripts, and `python -m ffx ...` test invocations.
- Agents shall not silently substitute `python`, `python3`, or another interpreter for Python-side test work.
- If `~/.local/share/ffx.venv/bin/python` is missing or not executable, agents shall stop and report the missing venv instead of continuing with Python-side test execution.
## Shell Environment Requirement
- Agents shall source `~/.bashrc` from an interactive Bash shell before running TMDB-dependent test commands or TMDB-dependent `python -m ffx ...` test invocations.
- Agents shall not source `~/.bashrc.d/interactive/77_tmdb.sh` directly for normal test work; `~/.bashrc` is the required entry point.
- In automation this means agents shall use an interactive Bash invocation such as `bash -ic 'source ~/.bashrc && ...'`, because a non-interactive `bash -lc` returns from `~/.bashrc` before the interactive fragments are loaded.
- If sourcing `~/.bashrc` still does not provide required shell environment such as `TMDB_API_KEY`, agents shall stop and report the missing environment instead of continuing with TMDB-dependent test execution.
## Current Harness
- Entrypoint: `~/.local/share/ffx.venv/bin/python tests/legacy_runner.py run`
- Runner style: custom Click CLI, not `pytest` or `unittest`
- Commands:
- `run`: discover scenario files, instantiate each scenario, run yielded jobs
- `dupe`: helper command that creates duplicate media fixtures; not part of the test run
- Filters: `--scenario`, `--variant`, `--limit`
- Shared context:
- builds one mutable dict for the whole run
- installs loggers and writes `ffx_test_report.log`
- creates `ConfigurationController` eagerly
- tracks only passed and failed counters
- Discovery:
- scenario files: `tests/legacy/scenario_*.py`
- combinators: `glob + importlib + inspect` by filename convention
- ordering: implicit glob order, no explicit sorting
- Skip behavior:
- Scenario 4 is skipped when `TMDB_API_KEY` is missing
- only `TMDB_API_KEY_NOT_PRESENT_EXCEPTION` is caught at scenario construction time
## Current Scenarios
- `1`: `tests/legacy/scenario_1.py`
- focus: basename generation without pattern lookup or TMDB
- inputs per job: `1`
- jobs: `140`
- expected failures: `0`
- execution: build one synthetic source file, run `~/.local/share/ffx.venv/bin/python -m ffx convert`, assert filename selectors only
- selectors executed: `B`, `L`, `I`
- selectors defined but not executed: `S`, `R`
- `2`: `tests/legacy/scenario_2.py`
- focus: conversion matrix over media layouts, dispositions, tags, and permutations
- inputs per job: `1`
- jobs: `8193`
- expected failures: `3267`
- execution: build one synthetic source file, run `~/.local/share/ffx.venv/bin/python -m ffx convert`, probe result with `FileProperties`, assert track layout and selected audio and subtitle metadata
- selectors executed: `M`, `AD`, `AT`, `SD`, `ST`
- selectors defined but not executed: `MT`, `AP`, `SP`, `J`
- `4`: `tests/legacy/scenario_4.py`
- focus: pattern-driven batch conversion with SQLite state and live TMDB naming
- inputs per job: `6`
- jobs: `768`
- expected failures: `336`
- execution: build six synthetic preset files, recreate temp SQLite DB, insert show and pattern, run one batch convert command via `~/.local/share/ffx.venv/bin/python`, query TMDB during assertions
- selectors executed: `M`, `AD`, `AT`, `SD`, `ST`
- selectors defined but not executed: `MT`, `AP`, `SP`, `J`
- notes:
- uses `MediaCombinator6` only
- issues live HTTP requests through `TmdbController` with no request cache
## Current Combinator Families
- scenario files discovered: `3`
- basename combinators discovered: `2`
- media combinators discovered: `8`
- media tag combinators discovered: `3`
- disposition combinator 2 variants: `4`
- disposition combinator 3 variants: `5`
- track tag combinator 2 variants: `4`
- track tag combinator 3 variants: `5`
- indicator variants: `7`
- label variants: `2`
- show variants: `3`
- release variants: `3`
- permutation 2 variants: `2`
- permutation 3 variants: `3`
## Current Totals
- full run without TMDB: `8333`
- full run with TMDB: `9101`
- Scenario 4 generated source files: `4608`
- Scenario 4 live TMDB episode queries: `4608`
## Current Behavior Areas
- output basename rules for label, season and episode indicator, show name, and release suffix combinations
- track layout normalization across the eight media combinator shapes from `VA` through `VAASSS`
- two-track and three-track disposition edge cases, including intentional failure cases
- two-track and three-track track-tag preservation checks, including checks that sort results by source identity
- container-level media tag handling
- pattern-backed conversion against a temporary SQLite database
- TMDB-assisted episode naming for batch conversion
## Structural Findings
- The suite is process-heavy: most jobs run `ffmpeg` to generate a fixture and then spawn the FFX CLI as a subprocess.
- The suite is integration-first and has almost no isolated unit-level coverage for pure logic.
- The base `Combinator` class is a placeholder and is not the real abstraction boundary used by the suite.
- Many combinator methods are placeholders: there are `25` `pass` statements across the current test modules.
- Several assertion families are never executed because scenario selector dispatch is incomplete.
- Scenario comments mention a Scenario 3, but no `scenario_3.py` exists.
- `tests/legacy/_basename_combinator_1.py` is effectively orphaned because discovery only matches `basename_combinator_*.py`.
- `tests/legacy/disposition_combinator_2_3 .py` contains an embedded space in the filename and is still part of discovery.
- Expected failures are validated only as subprocess return-code matches, not as specific error types or messages.
- The current suite depends on `ffmpeg`, `ffprobe`, SQLite, the local Python environment, and for Scenario 4 a live TMDB API key plus network access.
## Rewrite Target
- Replace the custom Click harness with a standard test runner, preferably `pytest`.
- Split the suite into explicit layers: unit, integration, and optional external-system tests.
- Keep unit tests as the default path and make them runnable without `ffmpeg`, `ffprobe`, TMDB, or a user config directory.
- Model discovery explicitly in code instead of relying on glob-plus-reflection naming conventions.
- Convert the current Cartesian-product combinators into readable parametrized cases grouped by behavior area.
- Preserve the current behavior areas, but represent them with targeted cases instead of thousands of opaque variant IDs.
- Make every assertion family explicit and executable; there must be no selector that is produced but never consumed.
- Replace live TMDB access with fixtures or mocks in normal runs; any live-contract test must be opt-in.
- Replace ad hoc subprocess return-code checks with assertions on typed exceptions, stderr content, or structured outputs.
- Provide small reusable media fixtures or fixture builders so only a narrow integration slice needs `ffmpeg`-generated media.
- Make database tests self-contained and fast through temporary databases and direct controller-level assertions.
- Make ordering, naming, and selection deterministic so a contributor can predict exactly what will run.
- Expose a small smoke suite for quick local runs and CI, plus a separately marked slower integration suite.
- Prefer domain-oriented test modules over combinator-family modules: basename, pattern matching, metadata rewrite, track ordering, TMDB naming, CLI smoke, and failure handling.
## Rewrite Acceptance
- A default local test run finishes quickly and without network access.
- A contributor can identify which behavior a failing test covers without decoding variant strings like `VAASSS-A:D10-S:T001`.
- All current intended failure behaviors remain covered, but each one is asserted directly and readably.
- The rewritten suite can be adopted by CI without requiring live TMDB credentials.

9
src/ffx/__main__.py Normal file
View File

@@ -0,0 +1,9 @@
from .cli import ffx
def main():
ffx()
if __name__ == "__main__":
main()

View File

@@ -9,6 +9,7 @@ class AudioLayout(Enum):
LAYOUT_7_1 = {"label": "7.1", "index": 4} #TODO: Does this exist?
LAYOUT_6CH = {"label": "6ch", "index": 5}
LAYOUT_5_0 = {"label": "5.0(side)", "index": 6}
LAYOUT_UNDEFINED = {"label": "undefined", "index": 0}
@@ -29,6 +30,15 @@ class AudioLayout(Enum):
except:
return AudioLayout.LAYOUT_UNDEFINED
# @staticmethod
# def fromIndex(index : int):
# try:
# target_index = int(index)
# except (TypeError, ValueError):
# return AudioLayout.LAYOUT_UNDEFINED
# return next((a for a in AudioLayout if a.value['index'] == target_index),
# AudioLayout.LAYOUT_UNDEFINED)
@staticmethod
def fromIndex(index : int):
try:

View File

@@ -1,40 +1,189 @@
#! /usr/bin/python3
import os, click, time, logging
from __future__ import annotations
from ffx.configuration_controller import ConfigurationController
import os, sys, click, time, shutil, subprocess
from typing import TYPE_CHECKING
from ffx.file_properties import FileProperties
# Allow direct execution via `python src/ffx/cli.py` by preferring the package
# root on sys.path.
if __package__ in (None, ''):
script_dir = os.path.dirname(__file__)
package_root = os.path.dirname(os.path.dirname(__file__))
sys.path = [p for p in sys.path if os.path.abspath(p) != os.path.abspath(script_dir)]
sys.path.insert(0, package_root)
from ffx.ffx_app import FfxApp
from ffx.ffx_controller import FfxController
from ffx.tmdb_controller import TmdbController
from ffx.constants import (
DEFAULT_AC3_BANDWIDTH,
DEFAULT_CROPDETECT_DURATION_SECONDS,
DEFAULT_CROPDETECT_SEEK_SECONDS,
DEFAULT_cut_length,
DEFAULT_cut_start,
DEFAULT_CONTAINER_EXTENSION,
DEFAULT_CONTAINER_FORMAT,
DEFAULT_DTS_BANDWIDTH,
DEFAULT_STEREO_BANDWIDTH,
DEFAULT_VIDEO_ENCODER_LABEL,
FFMPEG_COMMAND_TOKENS,
SUPPORTED_INPUT_FILE_EXTENSIONS,
VERSION,
)
from ffx.database import databaseContext
if TYPE_CHECKING:
from ffx.media_descriptor import MediaDescriptor
from ffx.track_descriptor import TrackDescriptor
from ffx.media_descriptor import MediaDescriptor
from ffx.track_descriptor import TrackDescriptor
from ffx.show_descriptor import ShowDescriptor
LIGHTWEIGHT_COMMANDS = {None, 'version', 'help', 'setup', 'configure_workstation', 'upgrade'}
CPU_OPTION_HELP = (
"Limit CPU for started processes. Use an absolute cpulimit value such as 200 "
+ "(about 2 cores), or use a percentage such as 25% for a share of present cores. "
+ "Omit to disable; 0 also disables."
)
SUBTITLE_DIRECTORY_OPTION_HELP = (
"Load subtitles from here. When omitted and --subtitle-prefix is set, "
+ "FFX uses the configured subtitlesDirectory base path plus the prefix as a subdirectory."
)
SUBTITLE_PREFIX_OPTION_HELP = (
"Subtitle filename prefix. Requires --subtitle-directory, or a configured "
+ "subtitlesDirectory base path that contains a matching <prefix>/ subdirectory."
)
UNMUX_OUTPUT_DIRECTORY_OPTION_HELP = (
"Write extracted streams here. When omitted together with --subtitles-only and "
+ "--label, FFX uses the configured subtitlesDirectory base path plus the label."
)
CROPDETECT_SEEK_OPTION_HELP = (
"Start crop detection this many seconds into the input. "
+ "Useful for skipping logos, intros, or black frames."
)
CROPDETECT_DURATION_OPTION_HELP = (
"Analyze this many seconds for crop detection. "
+ "Shorter windows are faster; longer windows are usually steadier."
)
DEFAULT_CUT_OPTION_VALUE = f"{DEFAULT_cut_start},{DEFAULT_cut_length}"
CUT_OPTION_HELP = (
"Cut output in seconds. "
+ f"Use --cut for the default {DEFAULT_CUT_OPTION_VALUE}, "
+ "--cut DURATION to cut from 0 for DURATION seconds, "
+ "or --cut START,DURATION for an explicit start and duration. "
+ "Omit to disable."
)
from ffx.track_type import TrackType
from ffx.video_encoder import VideoEncoder
from ffx.track_disposition import TrackDisposition
from ffx.track_codec import TrackCodec
from ffx.process import executeProcess
from ffx.helper import filterFilename
from ffx.helper import getEpisodeFileBasename
def normalizeNicenessOption(ctx, param, value):
from ffx.process import normalizeNiceness
from ffx.constants import DEFAULT_STEREO_BANDWIDTH, DEFAULT_AC3_BANDWIDTH, DEFAULT_DTS_BANDWIDTH, DEFAULT_7_1_BANDWIDTH
try:
return normalizeNiceness(value)
except ValueError as ex:
raise click.BadParameter(str(ex)) from ex
from ffx.filter.quality_filter import QualityFilter
from ffx.filter.preset_filter import PresetFilter
from ffx.filter.nlmeans_filter import NlmeansFilter
def normalizeCpuOption(ctx, param, value):
from ffx.process import normalizeCpuPercent
from ffx.constants import VERSION
try:
return normalizeCpuPercent(value)
except ValueError as ex:
raise click.BadParameter(str(ex)) from ex
def parseCutOptionValue(value) -> tuple[int, int] | None:
if value is None:
return None
cutValue = str(value).strip()
if not cutValue:
raise ValueError(
"Cut value must be DURATION or START,DURATION, or use --cut without a value."
)
cutTokens = [token.strip() for token in cutValue.split(',')]
try:
if len(cutTokens) == 1:
cutStart = 0
cutLength = int(cutTokens[0])
elif len(cutTokens) == 2:
cutStart = int(cutTokens[0])
cutLength = int(cutTokens[1])
else:
raise ValueError
except ValueError as ex:
raise ValueError(
"Cut value must be DURATION or START,DURATION, or use --cut without a value."
) from ex
if cutStart < 0:
raise ValueError("Cut start must be 0 or greater.")
if cutLength <= 0:
raise ValueError("Cut duration must be greater than 0.")
return cutStart, cutLength
def normalizeCutOption(ctx, param, value):
try:
return parseCutOptionValue(value)
except ValueError as ex:
raise click.BadParameter(str(ex)) from ex
def resolveSubtitleImportOptions(context, subtitleDirectory, subtitlePrefix):
resolvedSubtitlePrefix = str(subtitlePrefix).strip()
resolvedSubtitleDirectory = (
os.path.expanduser(str(subtitleDirectory).strip())
if subtitleDirectory
else ''
)
if not resolvedSubtitlePrefix:
return False, resolvedSubtitleDirectory, resolvedSubtitlePrefix
if resolvedSubtitleDirectory:
return True, resolvedSubtitleDirectory, resolvedSubtitlePrefix
configuredSubtitlesBaseDirectory = context['config'].getSubtitlesDirectoryPath()
if not configuredSubtitlesBaseDirectory:
raise click.ClickException(
"Subtitle prefix was set but no --subtitle-directory was provided and "
+ "no subtitlesDirectory default is configured in ffx.json."
)
resolvedSubtitleDirectory = os.path.join(
configuredSubtitlesBaseDirectory,
resolvedSubtitlePrefix,
)
if not os.path.isdir(resolvedSubtitleDirectory):
raise click.ClickException(
"Subtitle prefix was set but the resolved subtitle directory does not exist: "
+ resolvedSubtitleDirectory
)
return True, resolvedSubtitleDirectory, resolvedSubtitlePrefix
def resolveUnmuxOutputDirectory(context, outputDirectory, subtitlesOnly, label):
resolvedOutputDirectory = (
os.path.expanduser(str(outputDirectory).strip())
if outputDirectory
else ''
)
resolvedLabel = str(label).strip()
if resolvedOutputDirectory or not subtitlesOnly or not resolvedLabel:
return resolvedOutputDirectory, False
configuredSubtitlesBaseDirectory = context['config'].getSubtitlesDirectoryPath()
if not configuredSubtitlesBaseDirectory:
raise click.ClickException(
"Subtitles-only unmux with --label requires --output-directory or a configured "
+ "subtitlesDirectory default in ffx.json."
)
return os.path.join(configuredSubtitlesBaseDirectory, resolvedLabel), True
from ffx.shifted_season_controller import ShiftedSeasonController
@click.group()
@@ -47,6 +196,18 @@ def ffx(ctx, database_file, verbose, dry_run):
ctx.obj = {}
if ctx.resilient_parsing:
return
if ctx.invoked_subcommand in LIGHTWEIGHT_COMMANDS:
ctx.obj['dry_run'] = dry_run
ctx.obj['verbosity'] = verbose
return
from ffx.configuration_controller import ConfigurationController
from ffx.database import databaseContext
from ffx.logging_utils import configure_ffx_logger
ctx.obj['config'] = ConfigurationController()
ctx.obj['database'] = databaseContext(databasePath=database_file
@@ -63,23 +224,11 @@ def ffx(ctx, database_file, verbose, dry_run):
fileLogVerbosity = max(40 - verbose * 10, 10)
consoleLogVerbosity = max(20 - verbose * 10, 10)
ctx.obj['logger'] = logging.getLogger('FFX')
ctx.obj['logger'].setLevel(logging.DEBUG)
ffxFileHandler = logging.FileHandler(ctx.obj['config'].getLogFilePath())
ffxFileHandler.setLevel(fileLogVerbosity)
ffxConsoleHandler = logging.StreamHandler()
ffxConsoleHandler.setLevel(consoleLogVerbosity)
fileFormatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ffxFileHandler.setFormatter(fileFormatter)
consoleFormatter = logging.Formatter(
'%(message)s')
ffxConsoleHandler.setFormatter(consoleFormatter)
ctx.obj['logger'].addHandler(ffxConsoleHandler)
ctx.obj['logger'].addHandler(ffxFileHandler)
ctx.obj['logger'] = configure_ffx_logger(
ctx.obj['config'].getLogFilePath(),
fileLogVerbosity,
consoleLogVerbosity,
)
# Define a subcommand
@@ -92,13 +241,164 @@ def version():
@ffx.command()
def help():
click.echo(f"ffx {VERSION}\n")
click.echo(f"Usage: ffx [input file] [output file] [vp9|av1] [q=[nn[,nn,...]]] [p=nn] [a=nnn[k]] [ac3=nnn[k]] [dts=nnn[k]] [crop]")
click.echo("Maintenance commands: setup, configure_workstation, upgrade")
click.echo("Media commands: shows, inspect, convert, unmux, cropdetect")
click.echo("Use 'ffx --help' or 'ffx <command> --help' for full command help.")
def getRepoRootPath():
currentFilePath = os.path.abspath(__file__)
return os.path.dirname(os.path.dirname(os.path.dirname(currentFilePath)))
def getConfigureWorkstationScriptPath():
return os.path.join(getRepoRootPath(), 'tools', 'configure_workstation.sh')
def getSetupScriptPath():
return os.path.join(getRepoRootPath(), 'tools', 'setup.sh')
def getBundleVenvDirectory():
return os.path.join(os.path.expanduser('~'), '.local', 'share', 'ffx.venv')
def getBundlePipPath():
return os.path.join(getBundleVenvDirectory(), 'bin', 'pip')
def getBundleRepoPath():
return getRepoRootPath()
def getTrackedGitChanges(repoPath):
completed = subprocess.run(
['git', 'status', '--porcelain', '--untracked-files=no'],
cwd=repoPath,
capture_output=True,
text=True,
)
if completed.returncode != 0:
commandLabel = 'git status --porcelain --untracked-files=no'
errorOutput = completed.stderr.strip() or completed.stdout.strip()
raise click.ClickException(
f"Unable to inspect bundle repository state using '{commandLabel}': {errorOutput}"
)
return [line for line in completed.stdout.splitlines() if line.strip()]
def runScriptWrapper(ctx, scriptPath, missingDescription, commandArgs):
if not os.path.isfile(scriptPath):
raise click.ClickException(f"{missingDescription} not found at {scriptPath}")
commandSequence = ['bash', scriptPath] + list(commandArgs)
if ctx.obj.get('dry_run', False):
click.echo(' '.join(commandSequence))
return
completed = subprocess.run(commandSequence)
ctx.exit(completed.returncode)
@ffx.command(name='setup')
@click.pass_context
@click.option('--check', is_flag=True, default=False, help='Only verify bundle-setup readiness')
@click.option('--with-tests', is_flag=True, default=False, help='Also install or verify Python test packages in the bundle venv')
@click.argument('setup_args', nargs=-1, type=click.UNPROCESSED)
def setup(ctx, check, with_tests, setup_args):
"""Prepare or repair the FFX bundle virtualenv and shell alias."""
commandArgs = []
if check:
commandArgs.append('--check')
if with_tests:
commandArgs.append('--with-tests')
commandArgs += list(setup_args)
runScriptWrapper(ctx, getSetupScriptPath(), "Bundle setup script", commandArgs)
@ffx.command(name='configure_workstation')
@click.pass_context
@click.option('--check', is_flag=True, default=False, help='Only verify workstation-configuration readiness')
@click.argument('configure_args', nargs=-1, type=click.UNPROCESSED)
def configure_workstation(ctx, check, configure_args):
"""Prepare workstation dependencies and local config after bundle install."""
commandArgs = []
if check:
commandArgs.append('--check')
commandArgs += list(configure_args)
runScriptWrapper(
ctx,
getConfigureWorkstationScriptPath(),
"Workstation configuration script",
commandArgs,
)
@ffx.command(name='upgrade')
@click.pass_context
@click.option('--branch', type=str, default='', help='Checkout this branch before pulling')
def upgrade(ctx, branch):
bundleRepoPath = getBundleRepoPath()
bundlePipPath = getBundlePipPath()
if not os.path.isdir(bundleRepoPath):
raise click.ClickException(f"Bundle repository not found at {bundleRepoPath}")
if not os.path.isfile(bundlePipPath):
raise click.ClickException(f"Bundle pip not found at {bundlePipPath}")
commandSequences = []
trackedChanges = getTrackedGitChanges(bundleRepoPath)
if trackedChanges:
click.echo("Tracked local changes detected in the bundle repository:")
for trackedChange in trackedChanges:
click.echo(f" {trackedChange}")
shouldReset = click.confirm(
"Discard these tracked changes with 'git reset --hard HEAD' before upgrade?",
default=False,
)
if not shouldReset:
raise click.ClickException(
"Upgrade aborted because tracked local changes are present."
)
commandSequences.append(['git', 'reset', '--hard', 'HEAD'])
if branch:
commandSequences.append(['git', 'checkout', branch])
commandSequences += [
['git', 'pull'],
[bundlePipPath, 'install', '--upgrade', 'pip', 'setuptools', 'wheel'],
[bundlePipPath, 'install', '--editable', '.'],
]
if ctx.obj.get('dry_run', False):
for commandSequence in commandSequences:
click.echo(f"(cd {bundleRepoPath} && {' '.join(commandSequence)})")
return
for commandSequence in commandSequences:
completed = subprocess.run(commandSequence, cwd=bundleRepoPath)
if completed.returncode != 0:
ctx.exit(completed.returncode)
@ffx.command()
@click.pass_context
@click.argument('filename', nargs=1)
def inspect(ctx, filename):
from ffx.ffx_app import FfxApp
ctx.obj['command'] = 'inspect'
ctx.obj['arguments'] = {}
@@ -111,7 +411,7 @@ def inspect(ctx, filename):
def getUnmuxSequence(trackDescriptor: TrackDescriptor, sourcePath, targetPrefix, targetDirectory = ''):
# executable and input file
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
commandTokens = list(FFMPEG_COMMAND_TOKENS) + ['-i', sourcePath]
trackType = trackDescriptor.getType()
@@ -141,10 +441,24 @@ def getUnmuxSequence(trackDescriptor: TrackDescriptor, sourcePath, targetPrefix,
@click.argument('paths', nargs=-1)
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
@click.option("-o", "--output-directory", type=str, default='')
@click.option("-o", "--output-directory", type=str, default='', help=UNMUX_OUTPUT_DIRECTORY_OPTION_HELP)
@click.option("-s", "--subtitles-only", is_flag=True, default=False)
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
@click.option(
'--nice',
type=int,
default=None,
callback=normalizeNicenessOption,
show_default='disabled',
help='Adjust niceness of started processes (-20..19). Omit to disable; 99 also disables.',
)
@click.option(
'--cpu',
type=str,
default=None,
callback=normalizeCpuOption,
show_default='disabled',
help=CPU_OPTION_HELP,
)
def unmux(ctx,
paths,
label,
@@ -152,14 +466,28 @@ def unmux(ctx,
subtitles_only,
nice,
cpu):
from ffx.file_properties import FileProperties
from ffx.process import executeProcess
from ffx.track_disposition import TrackDisposition
from ffx.track_type import TrackType
existingSourcePaths = [p for p in paths if os.path.isfile(p)]
ctx.obj['logger'].debug(f"\nUnmuxing {len(existingSourcePaths)} files")
ctx.obj['resource_limits'] = {}
ctx.obj['resource_limits']['niceness'] = nice
ctx.obj['resource_limits']['cpu_limit'] = cpu
ctx.obj['resource_limits']['cpu_percent'] = cpu
output_directory, create_output_directory = resolveUnmuxOutputDirectory(
ctx.obj,
output_directory,
subtitles_only,
label,
)
if create_output_directory and existingSourcePaths and not ctx.obj.get('dry_run', False):
os.makedirs(output_directory, exist_ok=True)
for sourcePath in existingSourcePaths:
fp = FileProperties(ctx.obj, sourcePath)
@@ -181,11 +509,12 @@ def unmux(ctx,
else:
ctx.obj['logger'].info(f"\nUnmuxing file {fp.getFilename()}\n")
for trackDescriptor in sourceMediaDescriptor.getAllTrackDescriptors():
# for trackDescriptor in sourceMediaDescriptor.getAllTrackDescriptors():
for trackDescriptor in sourceMediaDescriptor.getTrackDescriptors():
if trackDescriptor.getType() == TrackType.SUBTITLE or not subtitles_only:
# SEASON_EPISODE_STREAM_LANGUAGE_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
# SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
targetPrefix = f"{targetLabel}{targetIndicator}_{trackDescriptor.getIndex()}_{trackDescriptor.getLanguage().threeLetter()}"
td: TrackDisposition
@@ -211,10 +540,79 @@ def unmux(ctx,
ctx.obj['logger'].warning(f"Skipping File {sourcePath} ({ex})")
@ffx.command()
@click.pass_context
@click.argument('paths', nargs=-1)
@click.option(
'--nice',
type=int,
default=None,
callback=normalizeNicenessOption,
show_default='disabled',
help='Adjust niceness of started processes (-20..19). Omit to disable; 99 also disables.',
)
@click.option(
'--cpu',
type=str,
default=None,
callback=normalizeCpuOption,
show_default='disabled',
help=CPU_OPTION_HELP,
)
@click.option(
'--crop-seek',
type=click.IntRange(min=0),
default=DEFAULT_CROPDETECT_SEEK_SECONDS,
show_default=True,
help=CROPDETECT_SEEK_OPTION_HELP,
)
@click.option(
'--crop-duration',
type=click.IntRange(min=1),
default=DEFAULT_CROPDETECT_DURATION_SECONDS,
show_default=True,
help=CROPDETECT_DURATION_OPTION_HELP,
)
def cropdetect(ctx,
paths,
nice,
cpu,
crop_seek,
crop_duration):
from ffx.file_properties import FileProperties
existingSourcePaths = [p for p in paths if os.path.isfile(p)]
ctx.obj['logger'].debug(f"\nUnmuxing {len(existingSourcePaths)} files")
ctx.obj['resource_limits'] = {}
ctx.obj['resource_limits']['niceness'] = nice
ctx.obj['resource_limits']['cpu_limit'] = cpu
ctx.obj['resource_limits']['cpu_percent'] = cpu
ctx.obj['cropdetect'] = {
'seek_seconds': crop_seek,
'duration_seconds': crop_duration,
}
for sourcePath in existingSourcePaths:
try:
fp = FileProperties(ctx.obj, sourcePath)
cropParams = fp.findCropArguments()
click.echo(cropParams)
except Exception as ex:
ctx.obj['logger'].warning(f"Skipping File {sourcePath} ({ex})")
@ffx.command()
@click.pass_context
def shows(ctx):
from ffx.ffx_app import FfxApp
ctx.obj['command'] = 'shows'
@@ -223,6 +621,8 @@ def shows(ctx):
def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
from ffx.track_disposition import TrackDisposition
from ffx.track_type import TrackType
# Check for multiple default or forced dispositions if not set by user input or database requirements
#
@@ -272,17 +672,17 @@ def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
@click.option('-v', '--video-encoder', type=str, default=FfxController.DEFAULT_VIDEO_ENCODER, help=f"Target video encoder (vp9 or av1)", show_default=True)
@click.option('-v', '--video-encoder', type=str, default=DEFAULT_VIDEO_ENCODER_LABEL, help=f"Target video encoder (vp9, av1, h264 or copy)", show_default=True)
@click.option('-q', '--quality', type=str, default="", help=f"Quality settings to be used with VP9 encoder")
@click.option('-q', '--quality', type=str, default="", help=f"Quality settings to be used with VP9/H264 encoder")
@click.option('-p', '--preset', type=str, default="", help=f"Quality preset to be used with AV1 encoder")
@click.option('-a', '--stereo-bitrate', type=int, default=DEFAULT_STEREO_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode stereo audio streams", show_default=True)
@click.option('--ac3', type=int, default=DEFAULT_AC3_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode 5.1 audio streams", show_default=True)
@click.option('--dts', type=int, default=DEFAULT_DTS_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode 6.1 audio streams", show_default=True)
@click.option('--subtitle-directory', type=str, default='', help='Load subtitles from here')
@click.option('--subtitle-prefix', type=str, default='', help='Subtitle filename prefix')
@click.option('--subtitle-directory', type=str, default='', help=SUBTITLE_DIRECTORY_OPTION_HELP)
@click.option('--subtitle-prefix', type=str, default='', help=SUBTITLE_PREFIX_OPTION_HELP)
@click.option('--language', type=str, multiple=True, help='Set stream language. Use format <stream index>:<3 letter iso code>')
@click.option('--title', type=str, multiple=True, help='Set stream title. Use format <stream index>:<title>')
@@ -296,10 +696,36 @@ def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
@click.option('--rearrange-streams', type=str, default="", help='Rearrange output streams order. Use format comma separated integers')
@click.option("--crop", is_flag=False, flag_value="default", default="none")
@click.option("--crop", is_flag=False, flag_value="auto", default="none")
@click.option(
'--crop-seek',
type=click.IntRange(min=0),
default=DEFAULT_CROPDETECT_SEEK_SECONDS,
show_default=True,
help='When --crop auto is used, start crop detection this many seconds into the input.',
)
@click.option(
'--crop-duration',
type=click.IntRange(min=1),
default=DEFAULT_CROPDETECT_DURATION_SECONDS,
show_default=True,
help='When --crop auto is used, analyze this many seconds for crop detection.',
)
@click.option(
"--cut",
type=str,
metavar="DURATION|START,DURATION",
is_flag=False,
flag_value=DEFAULT_CUT_OPTION_VALUE,
default=None,
callback=normalizeCutOption,
help=CUT_OPTION_HELP,
)
@click.option("--output-directory", type=str, default='')
@click.option("--deinterlace", is_flag=False, flag_value="default", default="none")
@click.option("--denoise", is_flag=False, flag_value="default", default="none")
@click.option("--denoise-use-hw", is_flag=True, default=False)
@click.option('--denoise-strength', type=str, default='', help='Denoising strength, more blurring vs more details.')
@@ -321,8 +747,24 @@ def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
@click.option("--no-signature", is_flag=True, default=False)
@click.option("--keep-mkvmerge-metadata", is_flag=True, default=False)
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
@click.option(
'--nice',
type=int,
default=None,
callback=normalizeNicenessOption,
show_default='disabled',
help='Adjust niceness of started processes (-20..19). Omit to disable; 99 also disables.',
)
@click.option(
'--cpu',
type=str,
default=None,
callback=normalizeCpuOption,
show_default='disabled',
help=CPU_OPTION_HELP,
)
@click.option('--rename-only', is_flag=True, default=False, help='Only renaming and moving, no recoding')
def convert(ctx,
paths,
@@ -350,8 +792,14 @@ def convert(ctx,
rearrange_streams,
crop,
crop_seek,
crop_duration,
cut,
output_directory,
deinterlace,
denoise,
denoise_use_hw,
denoise_strength,
@@ -372,13 +820,28 @@ def convert(ctx,
keep_mkvmerge_metadata,
nice,
cpu):
cpu,
rename_only):
"""Batch conversion of audiovideo files in format suitable for web playback, e.g. jellyfin
Files found under PATHS will be converted according to parameters.
Filename extensions will be changed appropriately.
Suffices will we appended to filename in case of multiple created files
or if the filename has not changed."""
from ffx.ffx_controller import FfxController
from ffx.file_properties import FileProperties
from ffx.filter.crop_filter import CropFilter
from ffx.filter.deinterlace_filter import DeinterlaceFilter
from ffx.filter.nlmeans_filter import NlmeansFilter
from ffx.filter.preset_filter import PresetFilter
from ffx.filter.quality_filter import QualityFilter
from ffx.helper import filterFilename, getEpisodeFileBasename, substituteTmdbFilename
from ffx.shifted_season_controller import ShiftedSeasonController
from ffx.show_descriptor import ShowDescriptor
from ffx.tmdb_controller import TmdbController
from ffx.track_codec import TrackCodec
from ffx.track_disposition import TrackDisposition
from ffx.video_encoder import VideoEncoder
startTime = time.perf_counter()
@@ -386,8 +849,13 @@ def convert(ctx,
context['video_encoder'] = VideoEncoder.fromLabel(video_encoder)
targetFormat = FfxController.DEFAULT_FILE_FORMAT
targetExtension = FfxController.DEFAULT_FILE_EXTENSION
# HINT: quick and dirty override for h264, todo improve
if context['video_encoder'] in (VideoEncoder.H264, VideoEncoder.COPY):
targetFormat = ''
targetExtension = 'mkv'
else:
targetFormat = DEFAULT_CONTAINER_FORMAT
targetExtension = DEFAULT_CONTAINER_EXTENSION
context['use_tmdb'] = not no_tmdb
context['use_pattern'] = not no_pattern
@@ -398,16 +866,29 @@ def convert(ctx,
context['resource_limits'] = {}
context['resource_limits']['niceness'] = nice
context['resource_limits']['cpu_limit'] = cpu
context['resource_limits']['cpu_percent'] = cpu
context['cropdetect'] = {
'seek_seconds': crop_seek,
'duration_seconds': crop_duration,
}
context['import_subtitles'] = (subtitle_directory and subtitle_prefix)
(
context['import_subtitles'],
resolvedSubtitleDirectory,
resolvedSubtitlePrefix,
) = resolveSubtitleImportOptions(
context,
subtitle_directory,
subtitle_prefix,
)
if context['import_subtitles']:
context['subtitle_directory'] = subtitle_directory
context['subtitle_prefix'] = subtitle_prefix
context['subtitle_directory'] = resolvedSubtitleDirectory
context['subtitle_prefix'] = resolvedSubtitlePrefix
existingSourcePaths = [p for p in paths if os.path.isfile(p) and p.split('.')[-1] in FfxController.INPUT_FILE_EXTENSIONS]
existingSourcePaths = [p for p in paths if os.path.isfile(p) and p.split('.')[-1] in SUPPORTED_INPUT_FILE_EXTENSIONS]
# CLI Overrides
@@ -476,14 +957,6 @@ def convert(ctx,
ctx.obj['logger'].debug(f"\nVideo encoder: {video_encoder}")
qualityTokens = quality.split(',')
q_list = [q for q in qualityTokens if q.isnumeric()]
ctx.obj['logger'].debug(f"Qualities: {q_list}")
presetTokens = preset.split(',')
p_list = [p for p in presetTokens if p.isnumeric()]
ctx.obj['logger'].debug(f"Presets: {p_list}")
context['bitrates'] = {}
context['bitrates']['stereo'] = str(stereo_bitrate) if str(stereo_bitrate).endswith('k') else f"{stereo_bitrate}k"
@@ -494,26 +967,37 @@ def convert(ctx,
ctx.obj['logger'].debug(f"AC3 bitrate: {context['bitrates']['ac3']}")
ctx.obj['logger'].debug(f"DTS bitrate: {context['bitrates']['dts']}")
# Process crop parameters
context['perform_crop'] = (crop != 'none')
if context['perform_crop']:
cTokens = crop.split(',')
if cTokens and len(cTokens) == 2:
context['crop_start'] = int(cTokens[0])
context['crop_length'] = int(cTokens[1])
ctx.obj['logger'].debug(f"Crop start={context['crop_start']} length={context['crop_length']}")
#->
# Process cut parameters
context['perform_cut'] = (cut is not None)
if context['perform_cut']:
context['cut_start'], context['cut_length'] = cut
click.echo(
f"Cutting enabled: start {context['cut_start']} s, duration {context['cut_length']} s."
)
ctx.obj['logger'].debug(
f"Cut start={context['cut_start']} length={context['cut_length']}"
)
tc = TmdbController() if context['use_tmdb'] else None
qualityKwargs = {QualityFilter.QUALITY_KEY: quality}
qualityKwargs = {QualityFilter.QUALITY_KEY: str(quality)}
qf = QualityFilter(**qualityKwargs)
if context['video_encoder'] == VideoEncoder.AV1 and preset:
presetKwargs = {PresetFilter.PRESET_KEY: preset}
PresetFilter(**presetKwargs)
cf = None
# if crop != 'none':
if crop == 'auto':
cropKwargs = {}
cf = CropFilter(**cropKwargs)
denoiseKwargs = {}
if denoise_strength:
denoiseKwargs[NlmeansFilter.STRENGTH_KEY] = denoise_strength
@@ -528,6 +1012,9 @@ def convert(ctx,
if denoise != 'none' or denoiseKwargs:
NlmeansFilter(**denoiseKwargs)
if deinterlace != 'none':
DeinterlaceFilter()
chainYield = list(qf.getChainYield())
ctx.obj['logger'].info(f"\nRunning {len(existingSourcePaths) * len(chainYield)} jobs")
@@ -548,8 +1035,19 @@ def convert(ctx,
targetSuffices = {}
mediaFileProperties = FileProperties(context, sourcePath)
mediaFileProperties = FileProperties(context, sourceFilename)
# if not cf is None:
#
cropArguments = {} if cf is None else mediaFileProperties.findCropArguments()
#
# ctx.obj['logger'].info(f"\nSetting crop arguments: ouput width: {cropArguments[CropFilter.OUTPUT_WIDTH_KEY]} "
# + f"height: {cropArguments[CropFilter.OUTPUT_HEIGHT_KEY]} "
# + f"offset x: {cropArguments[CropFilter.OFFSET_X_KEY]} "
# + f"y: {cropArguments[CropFilter.OFFSET_Y_KEY]}")
#
# cf.setArguments(**cropArguments)
ssc = ShiftedSeasonController(context)
@@ -572,6 +1070,16 @@ def convert(ctx,
sourceMediaDescriptor = mediaFileProperties.getMediaDescriptor()
if ([smd for smd in sourceMediaDescriptor.getSubtitleTracks()
if smd.getCodec() == TrackCodec.ASS]
and [amd for amd in sourceMediaDescriptor.getAttachmentTracks()
if amd.getCodec() == TrackCodec.TTF]):
targetFormat = ''
targetExtension = 'mkv'
#HINT: This is None if the filename did not match anything in database
currentPattern = mediaFileProperties.getPattern() if context['use_pattern'] else None
@@ -599,18 +1107,43 @@ def convert(ctx,
checkUniqueDispositions(context, targetMediaDescriptor)
currentShowDescriptor = currentPattern.getShowDescriptor(ctx.obj)
# Check if source and target track descriptors match
sourceTrackDescriptorList = sourceMediaDescriptor.getTrackDescriptors()
targetTrackDescriptorList = targetMediaDescriptor.getTrackDescriptors()
for ttd in targetTrackDescriptorList:
tti = ttd.getIndex()
ttsi = ttd.getSourceIndex()
stList = [st for st in sourceTrackDescriptorList if st.getIndex() == ttsi]
std = stList[0] if stList else None
if std is None:
raise click.ClickException(f"Target track #{tti} refering to non-existent source track #{ttsi}")
ttType = ttd.getType()
stType = std.getType()
if ttType != stType:
raise click.ClickException(f"Target track #{tti} type ({ttType.label()}) not matching source track #{ttsi} type ({stType.label()})")
if context['import_subtitles']:
targetMediaDescriptor.importSubtitles(context['subtitle_directory'],
context['subtitle_prefix'],
showSeason,
showEpisode)
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
# ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getTrackDescriptors()]}")
if cliOverrides:
targetMediaDescriptor.applyOverrides(cliOverrides)
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
# ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getTrackDescriptors()]}")
ctx.obj['logger'].debug(f"Input mapping tokens (2nd pass): {targetMediaDescriptor.getInputMappingTokens()}")
@@ -653,9 +1186,9 @@ def convert(ctx,
ctx.obj['logger'].debug(f"tmdbEpisodeResult={tmdbEpisodeResult}")
if tmdbEpisodeResult:
filteredEpisodeName = filterFilename(tmdbEpisodeResult['name'])
substitutedEpisodeName = filterFilename(substituteTmdbFilename(tmdbEpisodeResult['name']))
sourceFileBasename = getEpisodeFileBasename(showFilenamePrefix,
filteredEpisodeName,
substitutedEpisodeName,
shiftedShowSeason,
shiftedShowEpisode,
indexSeasonDigits,
@@ -678,12 +1211,8 @@ def convert(ctx,
for chainIteration in chainYield:
ctx.obj['logger'].debug(f"\nchain iteration: {chainIteration}\n")
# if len(q_list) > 1:
# targetSuffices['q'] = f"q{q}"
chainVariant = '-'.join([fy['variant'] for fy in chainIteration])
ctx.obj['logger'].debug(f"\nRunning job {jobIndex} file={sourcePath} variant={chainVariant}")
@@ -692,10 +1221,10 @@ def convert(ctx,
ctx.obj['logger'].debug(f"label={label if label else 'Falsy'}")
ctx.obj['logger'].debug(f"sourceFileBasename={sourceFileBasename}")
# targetFileBasename = mediaFileProperties.assembleTargetFileBasename(label,
# q if len(q_list) > 1 else -1,
#
targetFileBasename = sourceFileBasename if context['use_tmdb'] and not label else label
# targetFileBasename = sourceFileBasename if context['use_tmdb'] and not label else label
targetFileBasename = (label or sourceFileBasename) if context['use_tmdb'] else sourceFileBasename
targetFilenameTokens = [targetFileBasename]
@@ -703,34 +1232,31 @@ def convert(ctx,
if 'se' in targetSuffices.keys():
targetFilenameTokens += [targetSuffices['se']]
# if 'q' in targetSuffices.keys():
# targetFilenameTokens += [targetSuffices['q']]
for filterYield in chainIteration:
# filterIdentifier = filterYield['identifier']
# filterParameters = filterYield['parameters']
# filterSuffices = filterYield['suffices']
targetFilenameTokens += filterYield['suffices']
#TODO #387
# targetFilename = ((f"{sourceFileBasename}_q{q}" if len(q_list) > 1 else sourceFileBasename)
# if context['use_tmdb'] else targetFileBasename)
targetFilename = f"{'_'.join(targetFilenameTokens)}.{sourceFilenameExtension if rename_only else targetExtension}"
targetFilename = f"{'_'.join(targetFilenameTokens)}.{targetExtension}"
if sourceFilename == targetFilename:
targetFilename = f"out_{targetFilename}"
targetPath = os.path.join(output_directory if output_directory else sourceDirectory, targetFilename)
#TODO: target extension anpassen
targetPath = os.path.join(output_directory, targetFilename) if output_directory else targetFilename
ctx.obj['logger'].info(f"Creating file {targetFilename}")
if rename_only:
shutil.move(sourcePath, targetPath)
else:
fc.runJob(sourcePath,
targetPath,
targetFormat,
context['video_encoder'],
chainIteration)
chainIteration,
cropArguments,
currentPattern)
#TODO: click.confirm('Warning! This file is not compliant to the defined source schema! Do you want to continue?', abort=True)
endTime = time.perf_counter()
ctx.obj['logger'].info(f"\nDONE\nTime elapsed {endTime - startTime}")

View File

@@ -8,6 +8,7 @@ class ConfigurationController():
DATABASE_PATH_CONFIG_KEY = 'databasePath'
LOG_DIRECTORY_CONFIG_KEY = 'logDirectory'
SUBTITLES_DIRECTORY_CONFIG_KEY = 'subtitlesDirectory'
OUTPUT_FILENAME_TEMPLATE_KEY = 'outputFilenameTemplate'
@@ -49,6 +50,12 @@ class ConfigurationController():
def getDatabaseFilePath(self):
return self.__databaseFilePath
def getSubtitlesDirectoryPath(self):
subtitlesDirectory = self.__configurationData.get(
ConfigurationController.SUBTITLES_DIRECTORY_CONFIG_KEY,
'',
)
return os.path.expanduser(str(subtitlesDirectory)) if subtitlesDirectory else ''
def getData(self):
return self.__configurationData

View File

@@ -4,12 +4,22 @@ DATABASE_VERSION = 2
DEFAULT_QUALITY = 32
DEFAULT_AV1_PRESET = 5
DEFAULT_VIDEO_ENCODER_LABEL = "vp9"
DEFAULT_CONTAINER_FORMAT = "webm"
DEFAULT_CONTAINER_EXTENSION = "webm"
SUPPORTED_INPUT_FILE_EXTENSIONS = ("mkv", "mp4", "avi", "flv", "webm")
FFMPEG_COMMAND_TOKENS = ("ffmpeg", "-y")
FFMPEG_NULL_OUTPUT_TOKENS = ("-f", "null", "/dev/null")
DEFAULT_STEREO_BANDWIDTH = "112"
DEFAULT_AC3_BANDWIDTH = "256"
DEFAULT_DTS_BANDWIDTH = "320"
DEFAULT_7_1_BANDWIDTH = "384"
DEFAULT_CROP_START = 60
DEFAULT_CROP_LENGTH = 180
DEFAULT_CROPDETECT_SEEK_SECONDS = 60
DEFAULT_CROPDETECT_DURATION_SECONDS = 180
DEFAULT_cut_start = 60
DEFAULT_cut_length = 180
DEFAULT_OUTPUT_FILENAME_TEMPLATE = '{{ ffx_show_name }} - {{ ffx_index }}{{ ffx_index_separator }}{{ ffx_episode_name }}{{ ffx_indicator_separator }}{{ ffx_indicator }}'

View File

@@ -1,8 +1,11 @@
import os, click
from sqlalchemy import create_engine
from sqlalchemy import create_engine, inspect
from sqlalchemy.orm import sessionmaker
# Import the full model package so SQLAlchemy registers every mapped class
# before metadata creation and the first ORM query.
import ffx.model
from ffx.model.show import Base
from ffx.model.property import Property
@@ -11,6 +14,7 @@ from ffx.constants import DATABASE_VERSION
DATABASE_VERSION_KEY = 'database_version'
EXPECTED_TABLE_NAMES = set(Base.metadata.tables.keys())
class DatabaseVersionException(Exception):
def __init__(self, errorMessage):
@@ -34,7 +38,7 @@ def databaseContext(databasePath: str = ''):
databaseContext['engine'] = create_engine(databaseContext['url'])
databaseContext['session'] = sessionmaker(bind=databaseContext['engine'])
Base.metadata.create_all(databaseContext['engine'])
bootstrapDatabaseIfNeeded(databaseContext)
# isSyncronuous = False
# while not isSyncronuous:
@@ -51,6 +55,19 @@ def databaseContext(databasePath: str = ''):
return databaseContext
def databaseNeedsBootstrap(databaseContext) -> bool:
inspector = inspect(databaseContext['engine'])
existingTableNames = set(inspector.get_table_names())
return not EXPECTED_TABLE_NAMES.issubset(existingTableNames)
def bootstrapDatabaseIfNeeded(databaseContext):
if not databaseNeedsBootstrap(databaseContext):
return
Base.metadata.create_all(databaseContext['engine'])
def ensureDatabaseVersion(databaseContext):
currentDatabaseVersion = getDatabaseVersion(databaseContext)
@@ -67,9 +84,9 @@ def getDatabaseVersion(databaseContext):
Session = databaseContext['session']
s = Session()
q = s.query(Property).filter(Property.key == DATABASE_VERSION_KEY)
versionProperty = s.query(Property).filter(Property.key == DATABASE_VERSION_KEY).first()
return int(q.first().value) if q.count() else 0
return int(versionProperty.value) if versionProperty is not None else 0
except Exception as ex:
raise click.ClickException(f"getDatabaseVersion(): {repr(ex)}")

View File

@@ -1,38 +1,50 @@
import os, click
from logging import Logger
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
from ffx.media_descriptor import MediaDescriptor
from ffx.track_descriptor import TrackDescriptor
from ffx.audio_layout import AudioLayout
from ffx.track_type import TrackType
from ffx.track_codec import TrackCodec
from ffx.video_encoder import VideoEncoder
from ffx.process import executeProcess
from ffx.track_disposition import TrackDisposition
from ffx.track_codec import TrackCodec
from ffx.constants import DEFAULT_CROP_START, DEFAULT_CROP_LENGTH
from ffx.constants import (
DEFAULT_CONTAINER_EXTENSION,
DEFAULT_CONTAINER_FORMAT,
DEFAULT_VIDEO_ENCODER_LABEL,
DEFAULT_cut_start,
DEFAULT_cut_length,
FFMPEG_COMMAND_TOKENS,
FFMPEG_NULL_OUTPUT_TOKENS,
SUPPORTED_INPUT_FILE_EXTENSIONS,
)
from ffx.filter.quality_filter import QualityFilter
from ffx.filter.preset_filter import PresetFilter
from ffx.filter.crop_filter import CropFilter
from ffx.model.pattern import Pattern
class FfxController():
COMMAND_TOKENS = ['ffmpeg', '-y']
NULL_TOKENS = ['-f', 'null', '/dev/null'] # -f null /dev/null
COMMAND_TOKENS = list(FFMPEG_COMMAND_TOKENS)
NULL_TOKENS = list(FFMPEG_NULL_OUTPUT_TOKENS) # -f null /dev/null
TEMP_FILE_NAME = "ffmpeg2pass-0.log"
DEFAULT_VIDEO_ENCODER = VideoEncoder.VP9.label()
DEFAULT_VIDEO_ENCODER = DEFAULT_VIDEO_ENCODER_LABEL
DEFAULT_FILE_FORMAT = 'webm'
DEFAULT_FILE_EXTENSION = 'webm'
DEFAULT_FILE_FORMAT = DEFAULT_CONTAINER_FORMAT
DEFAULT_FILE_EXTENSION = DEFAULT_CONTAINER_EXTENSION
INPUT_FILE_EXTENSIONS = ['mkv', 'mp4', 'avi', 'flv', 'webm']
INPUT_FILE_EXTENSIONS = list(SUPPORTED_INPUT_FILE_EXTENSIONS)
CHANNEL_MAP_5_1 = 'FL-FL|FR-FR|FC-FC|LFE-LFE|SL-BL|SR-BR:5.1'
#!
SIGNATURE_TAGS = {'RECODED_WITH': 'FFX'}
# SIGNATURE_TAGS = {'RECODED_WITH': 'FFX'}
def __init__(self,
context : dict,
@@ -40,12 +52,22 @@ class FfxController():
sourceMediaDescriptor : MediaDescriptor = None):
self.__context = context
self.__sourceMediaDescriptor = sourceMediaDescriptor
self.__targetMediaDescriptor = targetMediaDescriptor
self.__sourceMediaDescriptor = sourceMediaDescriptor
self.__configurationData = self.__context['config'].getData()
self.__mdcs = MediaDescriptorChangeSet(context,
targetMediaDescriptor,
sourceMediaDescriptor)
self.__logger = context['logger']
self.__logger: Logger = context['logger']
def executeCommandSequence(self, commandSequence):
out, err, rc = executeProcess(commandSequence, context=self.__context)
if rc:
raise click.ClickException(f"Command resulted in error: rc={rc} error={err}")
return out, err, rc
def generateAV1Tokens(self, quality, preset, subIndex : int = 0):
@@ -55,6 +77,14 @@ class FfxController():
'-pix_fmt', 'yuv420p10le']
# -c:v libx264 -preset slow -crf 17
def generateH264Tokens(self, quality, subIndex : int = 0):
return [f"-c:v:{int(subIndex)}", 'libx264',
"-preset", "slow",
'-crf', str(quality)]
# -c:v:0 libvpx-vp9 -row-mt 1 -crf 32 -pass 1 -speed 4 -frame-parallel 0 -g 9999 -aq-mode 0
def generateVP9Pass1Tokens(self, quality, subIndex : int = 0):
@@ -82,33 +112,84 @@ class FfxController():
'-auto-alt-ref', '1',
'-lag-in-frames', '25']
def generateVideoCopyTokens(self, subIndex):
return [f"-c:v:{int(subIndex)}",
'copy']
def generateAudioCopyTokens(self, subIndex):
return [f"-c:a:{int(subIndex)}", 'copy']
def generateSubtitleCopyTokens(self, subIndex):
return [f"-c:s:{int(subIndex)}", 'copy']
def generateAttachmentCopyTokens(self, subIndex):
return [f"-c:t:{int(subIndex)}", 'copy']
def generateCopyTokens(self):
copyTokens = []
for trackDescriptor in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
copyTokens += self.generateVideoCopyTokens(trackDescriptor.getSubIndex())
for trackDescriptor in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.AUDIO):
copyTokens += self.generateAudioCopyTokens(trackDescriptor.getSubIndex())
for trackDescriptor in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.SUBTITLE):
copyTokens += self.generateSubtitleCopyTokens(trackDescriptor.getSubIndex())
attachmentDescriptors = (
self.__sourceMediaDescriptor.getTrackDescriptors(trackType=TrackType.ATTACHMENT)
if self.__sourceMediaDescriptor is not None
else self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.ATTACHMENT)
)
for trackDescriptor in attachmentDescriptors:
copyTokens += self.generateAttachmentCopyTokens(trackDescriptor.getSubIndex())
return copyTokens
def generateCropTokens(self):
if 'crop_start' in self.__context.keys() and 'crop_length' in self.__context.keys():
cropStart = int(self.__context['crop_start'])
cropLength = int(self.__context['crop_length'])
if 'cut_start' in self.__context.keys() and 'cut_length' in self.__context.keys():
cropStart = int(self.__context['cut_start'])
cropLength = int(self.__context['cut_length'])
else:
cropStart = DEFAULT_CROP_START
cropLength = DEFAULT_CROP_LENGTH
cropStart = DEFAULT_cut_start
cropLength = DEFAULT_cut_length
return ['-ss', str(cropStart), '-t', str(cropLength)]
def generateOutputTokens(self, filePathBase, format = '', ext = ''):
outputFilePath = f"{filePathBase}{'.'+str(ext) if ext else ''}"
self.__logger.debug(f"FfxController.generateOutputTokens(): base='{filePathBase}' format='{format}' ext='{ext}'")
outputFilePath = f"{filePathBase}{('.'+str(ext)) if ext else ''}"
if format:
return ['-f', format, outputFilePath]
else:
return [outputFilePath]
def generateEncodingMetadataTags(self, videoEncoder: VideoEncoder, quality, preset) -> dict:
metadataTags = {}
if videoEncoder in (VideoEncoder.AV1, VideoEncoder.H264, VideoEncoder.VP9):
metadataTags["ENCODING_QUALITY"] = str(quality)
if videoEncoder == VideoEncoder.AV1:
metadataTags["ENCODING_PRESET"] = str(preset)
return metadataTags
def generateAudioEncodingTokens(self):
"""Generates ffmpeg options audio streams including channel remapping, codec and bitrate"""
audioTokens = []
targetAudioTrackDescriptors = [td for td in self.__targetMediaDescriptor.getAllTrackDescriptors() if td.getType() == TrackType.AUDIO]
# targetAudioTrackDescriptors = [td for td in self.__targetMediaDescriptor.getAllTrackDescriptors() if td.getType() == TrackType.AUDIO]
targetAudioTrackDescriptors = self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.AUDIO)
trackSubIndex = 0
for trackDescriptor in targetAudioTrackDescriptors:
@@ -144,136 +225,125 @@ class FfxController():
f"channelmap={FfxController.CHANNEL_MAP_5_1}",
f"-b:a:{trackSubIndex}",
self.__context['bitrates']['ac3']]
# -ac 5 ?
if trackAudioLayout == AudioLayout.LAYOUT_5_0:
audioTokens += [f"-c:a:{trackSubIndex}",
'libopus',
f"-filter:a:{trackSubIndex}",
'channelmap=channel_layout=5.0',
f"-b:a:{trackSubIndex}",
self.__context['bitrates']['ac3']]
trackSubIndex += 1
return audioTokens
# -disposition:s:0 default -disposition:s:1 0
def generateDispositionTokens(self):
targetTrackDescriptors = self.__targetMediaDescriptor.getAllTrackDescriptors()
sourceTrackDescriptors = ([] if self.__sourceMediaDescriptor is None
else self.__sourceMediaDescriptor.getAllTrackDescriptors())
dispositionTokens = []
for trackIndex in range(len(targetTrackDescriptors)):
td = targetTrackDescriptors[trackIndex]
#HINT: No dispositions for pgs subtitle tracks that have no external file source
if (td.getExternalSourceFilePath()
or td.getCodec() != TrackCodec.PGS):
subIndex = td.getSubIndex()
streamIndicator = td.getType().indicator()
sourceDispositionSet = sourceTrackDescriptors[td.getSourceIndex()].getDispositionSet() if sourceTrackDescriptors else set()
#TODO: Alles discarden was im targetDescriptor vorhanden ist (?)
sourceDispositionSet.discard(TrackDisposition.DEFAULT)
dispositionSet = td.getDispositionSet() | sourceDispositionSet
if dispositionSet:
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in dispositionSet])]
else:
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
return dispositionTokens
def generateMetadataTokens(self):
metadataTokens = []
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
removeTrackKeys = metadataConfiguration['streams']['remove'] if 'streams' in metadataConfiguration.keys() and 'remove' in metadataConfiguration['streams'].keys() else []
mediaTags = {k:v for k,v in self.__targetMediaDescriptor.getTags().items() if not k in removeGlobalKeys}
if (not 'no_signature' in self.__context.keys()
or not self.__context['no_signature']):
outputMediaTags = mediaTags | signatureTags
else:
outputMediaTags = mediaTags
for tagKey, tagValue in outputMediaTags.items():
metadataTokens += [f"-metadata:g",
f"{tagKey}={tagValue}"]
for removeKey in removeGlobalKeys:
metadataTokens += [f"-metadata:g",
f"{removeKey}="]
removeMkvmergeMetadata = (not 'keep_mkvmerge_metadata' in self.__context.keys()
or not self.__context['keep_mkvmerge_metadata'])
#HINT: With current ffmpeg version track metadata tags are not passed to the outfile
for td in self.__targetMediaDescriptor.getAllTrackDescriptors():
typeIndicator = td.getType().indicator()
subIndex = td.getSubIndex()
for tagKey, tagValue in td.getTags().items():
if not tagKey in removeTrackKeys:
metadataTokens += [f"-metadata:s:{typeIndicator}:{subIndex}",
f"{tagKey}={tagValue}"]
for removeKey in removeTrackKeys:
metadataTokens += [f"-metadata:s:{typeIndicator}:{subIndex}",
f"{removeKey}="]
return metadataTokens
def runJob(self,
sourcePath,
targetPath,
targetFormat: str = '',
videoEncoder: VideoEncoder = VideoEncoder.VP9,
chainIteration: list = []):
chainIteration: list = [],
cropArguments: dict = {},
currentPattern: Pattern = None):
# quality: int = DEFAULT_QUALITY,
# preset: int = DEFAULT_AV1_PRESET):
videoEncoder: VideoEncoder = self.__context.get('video_encoder', VideoEncoder.VP9)
qualityFilters = [fy for fy in chainIteration if fy['identifier'] == 'quality']
presetFilters = [fy for fy in chainIteration if fy['identifier'] == 'preset']
cropFilters = [fy for fy in chainIteration if fy['identifier'] == 'crop']
denoiseFilters = [fy for fy in chainIteration if fy['identifier'] == 'nlmeans']
deinterlaceFilters = [fy for fy in chainIteration if fy['identifier'] == 'bwdif']
if qualityFilters and (quality := qualityFilters[0]['parameters']['quality']):
self.__logger.info(f"Setting quality {quality} from command line parameter")
elif currentPattern is not None and (quality := currentPattern.quality):
self.__logger.info(f"Setting quality {quality} from pattern default")
else:
quality = (QualityFilter.DEFAULT_H264_QUALITY
if (videoEncoder == VideoEncoder.H264)
else QualityFilter.DEFAULT_VP9_QUALITY)
self.__logger.info(f"Setting quality {quality} from default")
quality = qualityFilters[0]['parameters']['quality'] if qualityFilters else QualityFilter.DEFAULT_QUALITY
preset = presetFilters[0]['parameters']['preset'] if presetFilters else PresetFilter.DEFAULT_PRESET
self.__context['encoding_metadata_tags'] = self.generateEncodingMetadataTags(
videoEncoder,
quality,
preset,
)
denoiseTokens = denoiseFilters[0]['tokens'] if denoiseFilters else []
filterParamTokens = []
if cropArguments:
cropParams = (f"crop="
+ f"{cropArguments[CropFilter.OUTPUT_WIDTH_KEY]}"
+ f":{cropArguments[CropFilter.OUTPUT_HEIGHT_KEY]}"
+ f":{cropArguments[CropFilter.OFFSET_X_KEY]}"
+ f":{cropArguments[CropFilter.OFFSET_Y_KEY]}")
filterParamTokens.append(cropParams)
filterParamTokens.extend(denoiseFilters[0]['tokens'] if denoiseFilters else [])
filterParamTokens.extend(deinterlaceFilters[0]['tokens'] if deinterlaceFilters else [])
deinterlaceFilters
filterTokens = ['-vf', ', '.join(filterParamTokens)] if filterParamTokens else []
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
if videoEncoder == VideoEncoder.COPY:
commandSequence = (commandTokens
+ self.__targetMediaDescriptor.getImportFileTokens()
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
+ self.__mdcs.generateDispositionTokens())
commandSequence += self.__mdcs.generateMetadataTokens()
commandSequence += self.generateCopyTokens()
if self.__context['perform_cut']:
commandSequence += self.generateCropTokens()
commandSequence += self.generateOutputTokens(targetPath,
targetFormat)
self.__logger.debug("FfxController.runJob(): Running command sequence")
if not self.__context['dry_run']:
self.executeCommandSequence(commandSequence)
return
if videoEncoder == VideoEncoder.AV1:
commandSequence = (commandTokens
+ self.__targetMediaDescriptor.getImportFileTokens()
+ self.__targetMediaDescriptor.getInputMappingTokens()
+ self.generateDispositionTokens())
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
+ self.__mdcs.generateDispositionTokens())
# Optional tokens
commandSequence += self.generateMetadataTokens()
commandSequence += denoiseTokens
commandSequence += self.__mdcs.generateMetadataTokens()
commandSequence += filterTokens
commandSequence += (self.generateAudioEncodingTokens()
+ self.generateAV1Tokens(int(quality), int(preset))
+ self.generateAudioEncodingTokens())
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec != TrackCodec.PNG:
commandSequence += self.generateAV1Tokens(int(quality), int(preset))
if self.__context['perform_crop']:
commandSequence += FfxController.generateCropTokens()
commandSequence += self.generateAudioEncodingTokens()
if self.__context['perform_cut']:
commandSequence += self.generateCropTokens()
commandSequence += self.generateOutputTokens(targetPath,
targetFormat)
@@ -281,7 +351,38 @@ class FfxController():
self.__logger.debug(f"FfxController.runJob(): Running command sequence")
if not self.__context['dry_run']:
executeProcess(commandSequence, context = self.__context)
self.executeCommandSequence(commandSequence)
if videoEncoder == VideoEncoder.H264:
commandSequence = (commandTokens
+ self.__targetMediaDescriptor.getImportFileTokens()
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
+ self.__mdcs.generateDispositionTokens())
# Optional tokens
commandSequence += self.__mdcs.generateMetadataTokens()
commandSequence += filterTokens
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec != TrackCodec.PNG:
commandSequence += self.generateH264Tokens(int(quality))
commandSequence += self.generateAudioEncodingTokens()
if self.__context['perform_cut']:
commandSequence += self.generateCropTokens()
commandSequence += self.generateOutputTokens(targetPath,
targetFormat)
self.__logger.debug(f"FfxController.runJob(): Running command sequence")
if not self.__context['dry_run']:
self.executeCommandSequence(commandSequence)
if videoEncoder == VideoEncoder.VP9:
@@ -294,11 +395,14 @@ class FfxController():
# the required bitrate for the second run is determined and recorded
# TODO: Results seems to be slightly better with first pass omitted,
# Confirm or find better filter settings for 2-pass
# commandSequence1 += self.__context['denoiser'].generateDenoiseTokens()
# commandSequence1 += self.__context['denoiser'].generatefilterTokens()
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec != TrackCodec.PNG:
commandSequence1 += self.generateVP9Pass1Tokens(int(quality))
if self.__context['perform_crop']:
if self.__context['perform_cut']:
commandSequence1 += self.generateCropTokens()
commandSequence1 += FfxController.NULL_TOKENS
@@ -309,20 +413,25 @@ class FfxController():
self.__logger.debug(f"FfxController.runJob(): Running command sequence 1")
if not self.__context['dry_run']:
executeProcess(commandSequence1, context = self.__context)
self.executeCommandSequence(commandSequence1)
commandSequence2 = (commandTokens
+ self.__targetMediaDescriptor.getImportFileTokens()
+ self.__targetMediaDescriptor.getInputMappingTokens()
+ self.generateDispositionTokens())
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
+ self.__mdcs.generateDispositionTokens())
# Optional tokens
commandSequence2 += self.generateMetadataTokens()
commandSequence2 += denoiseTokens
commandSequence2 += self.__mdcs.generateMetadataTokens()
commandSequence2 += filterTokens
commandSequence2 += self.generateVP9Pass2Tokens(int(quality)) + self.generateAudioEncodingTokens()
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec != TrackCodec.PNG:
commandSequence2 += self.generateVP9Pass2Tokens(int(quality))
if self.__context['perform_crop']:
commandSequence2 += self.generateAudioEncodingTokens()
if self.__context['perform_cut']:
commandSequence2 += self.generateCropTokens()
commandSequence2 += self.generateOutputTokens(targetPath,
@@ -331,9 +440,7 @@ class FfxController():
self.__logger.debug(f"FfxController.runJob(): Running command sequence 2")
if not self.__context['dry_run']:
out, err, rc = executeProcess(commandSequence2, context = self.__context)
if rc:
raise click.ClickException(f"Command resulted in error: rc={rc} error={err}")
self.executeCommandSequence(commandSequence2)
@@ -358,4 +465,4 @@ class FfxController():
str(length),
path]
out, err, rc = executeProcess(commandTokens, context = self.__context)
self.executeCommandSequence(commandTokens)

View File

@@ -1,21 +1,33 @@
import os, re, json
from .constants import (
DEFAULT_CROPDETECT_DURATION_SECONDS,
DEFAULT_CROPDETECT_SEEK_SECONDS,
FFMPEG_COMMAND_TOKENS,
FFMPEG_NULL_OUTPUT_TOKENS,
)
from .media_descriptor import MediaDescriptor
from .pattern_controller import PatternController
from ffx.filter.crop_filter import CropFilter
from .process import executeProcess
from ffx.model.pattern import Pattern
class FileProperties():
_cropdetect_cache: dict[tuple[str, int, int, int, int], dict[str, str]] = {}
FILE_EXTENSIONS = ['mkv', 'mp4', 'avi', 'flv', 'webm']
FFPROBE_COMMAND_TOKENS = ["ffprobe", "-hide_banner", "-show_format", "-show_streams", "-of", "json"]
SE_INDICATOR_PATTERN = '([sS][0-9]+[eE][0-9]+)'
SEASON_EPISODE_INDICATOR_MATCH = '[sS]([0-9]+)[eE]([0-9]+)'
EPISODE_INDICATOR_MATCH = '[eE]([0-9]+)'
CROPDETECT_PATTERN = 'crop=[0-9]+:[0-9]+:[0-9]+:[0-9]+$'
DEFAULT_INDEX_DIGITS = 3
def __init__(self, context, sourcePath):
@@ -40,9 +52,10 @@ class FileProperties():
self.__sourceFilenameExtension = ''
self.__pc = PatternController(context)
self.__usePattern = bool(self.context.get('use_pattern', True))
# Checking if database contains matching pattern
matchResult = self.__pc.matchFilename(self.__sourceFilename)
matchResult = self.__pc.matchFilename(self.__sourceFilename) if self.__usePattern else {}
self.__logger.debug(f"FileProperties.__init__(): Match result: {matchResult}")
@@ -73,6 +86,54 @@ class FileProperties():
self.__season = -1
self.__episode = -1
self.__ffprobeData = None
def _getCropdetectWindow(self):
cropdetectContext = self.context.get('cropdetect', {})
seekSeconds = int(cropdetectContext.get('seek_seconds', DEFAULT_CROPDETECT_SEEK_SECONDS))
durationSeconds = int(cropdetectContext.get('duration_seconds', DEFAULT_CROPDETECT_DURATION_SECONDS))
if seekSeconds < 0:
raise ValueError("Crop detection seek seconds must be zero or greater.")
if durationSeconds <= 0:
raise ValueError("Crop detection duration seconds must be greater than zero.")
return seekSeconds, durationSeconds
def _getCropdetectCacheKey(self):
sourceStat = os.stat(self.__sourcePath)
seekSeconds, durationSeconds = self._getCropdetectWindow()
return (
os.path.abspath(self.__sourcePath),
sourceStat.st_mtime_ns,
sourceStat.st_size,
seekSeconds,
durationSeconds,
)
@classmethod
def _clear_cropdetect_cache(cls):
cls._cropdetect_cache.clear()
def _getFfprobeData(self):
if self.__ffprobeData is not None:
return self.__ffprobeData
ffprobeOutput, ffprobeError, returnCode = executeProcess(
FileProperties.FFPROBE_COMMAND_TOKENS + [self.__sourcePath]
)
if 'Invalid data found when processing input' in ffprobeError:
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
if returnCode != 0:
raise Exception(f"ffprobe returned with error {returnCode}")
self.__ffprobeData = json.loads(ffprobeOutput)
return self.__ffprobeData
def getFormatData(self):
"""
@@ -94,22 +155,7 @@ class FileProperties():
}
}
"""
# ffprobe -hide_banner -show_format -of json
ffprobeOutput, ffprobeError, returnCode = executeProcess(["ffprobe",
"-hide_banner",
"-show_format",
"-of", "json",
self.__sourcePath]) #,
#context = self.context)
if 'Invalid data found when processing input' in ffprobeError:
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
if returnCode != 0:
raise Exception(f"ffprobe returned with error {returnCode}")
return json.loads(ffprobeOutput)['format']
return self._getFfprobeData()['format']
def getStreamData(self):
@@ -154,24 +200,64 @@ class FileProperties():
}
}
"""
return self._getFfprobeData()['streams']
# ffprobe -hide_banner -show_streams -of json
ffprobeOutput, ffprobeError, returnCode = executeProcess(["ffprobe",
"-hide_banner",
"-show_streams",
"-of", "json",
self.__sourcePath]) #,
#context = self.context)
if 'Invalid data found when processing input' in ffprobeError:
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
def findCropArguments(self):
""""""
cacheKey = self._getCropdetectCacheKey()
cachedCropArguments = FileProperties._cropdetect_cache.get(cacheKey)
if cachedCropArguments is not None:
self.__logger.debug(
"FileProperties.findCropArguments(): Reusing cached cropdetect result for %s",
self.__sourcePath,
)
return dict(cachedCropArguments)
seekSeconds, durationSeconds = self._getCropdetectWindow()
cropdetectCommand = (
list(FFMPEG_COMMAND_TOKENS)
+ ["-ss", str(seekSeconds), "-i", self.__sourcePath, "-t", str(durationSeconds), "-vf", "cropdetect"]
+ list(FFMPEG_NULL_OUTPUT_TOKENS)
)
_ffmpegOutput, ffmpegError, returnCode = executeProcess(cropdetectCommand, context=self.context)
errorLines = ffmpegError.split('\n')
crops = {}
for el in errorLines:
cropdetect_match = re.search(FileProperties.CROPDETECT_PATTERN, el)
if cropdetect_match is not None:
cropParam = str(cropdetect_match.group(0))
crops[cropParam] = crops.get(cropParam, 0) + 1
if crops:
cropString = max(crops.items(), key=lambda item: (item[1], item[0]))[0]
cropTokens = cropString.split('=')
cropValueTokens = cropTokens[1]
cropValues = cropValueTokens.split(':')
cropArguments = {
CropFilter.OUTPUT_WIDTH_KEY: cropValues[0],
CropFilter.OUTPUT_HEIGHT_KEY: cropValues[1],
CropFilter.OFFSET_X_KEY: cropValues[2],
CropFilter.OFFSET_Y_KEY: cropValues[3]
}
FileProperties._cropdetect_cache[cacheKey] = dict(cropArguments)
return cropArguments
if returnCode != 0:
raise Exception(f"ffprobe returned with error {returnCode}")
raise Exception(f"ffmpeg cropdetect returned with error {returnCode}")
return json.loads(ffprobeOutput)['streams']
FileProperties._cropdetect_cache[cacheKey] = {}
return {}
def getMediaDescriptor(self):

View File

@@ -0,0 +1,51 @@
import itertools
from .filter import Filter
class CropFilter(Filter):
IDENTIFIER = 'crop'
OUTPUT_WIDTH_KEY = 'output_width'
OUTPUT_HEIGHT_KEY = 'output_height'
OFFSET_X_KEY = 'x_offset'
OFFSET_Y_KEY = 'y_offset'
def __init__(self, **kwargs):
self.__outputWidth = int(kwargs.get(CropFilter.OUTPUT_WIDTH_KEY, 0))
self.__outputHeight = int(kwargs.get(CropFilter.OUTPUT_HEIGHT_KEY, 0))
self.__offsetX = int(kwargs.get(CropFilter.OFFSET_X_KEY, 0))
self.__offsetY = int(kwargs.get(CropFilter.OFFSET_Y_KEY, 0))
super().__init__(self)
def setArguments(self, **kwargs):
self.__outputWidth = int(kwargs.get(CropFilter.OUTPUT_WIDTH_KEY))
self.__outputHeight = int(kwargs.get(CropFilter.OUTPUT_HEIGHT_KEY))
self.__offsetX = int(kwargs.get(CropFilter.OFFSET_X_KEY,))
self.__offsetY = int(kwargs.get(CropFilter.OFFSET_Y_KEY,))
def getPayload(self):
payload = {'identifier': CropFilter.IDENTIFIER,
'parameters': {
CropFilter.OUTPUT_WIDTH_KEY: self.__outputWidth,
CropFilter.OUTPUT_HEIGHT_KEY: self.__outputHeight,
CropFilter.OFFSET_X_KEY: self.__offsetX,
CropFilter.OFFSET_Y_KEY: self.__offsetY
},
'suffices': [],
'variant': f"C{self.__outputWidth}-{self.__outputHeight}-{self.__offsetX}-{self.__offsetY}",
'tokens': ['crop='
+ f"{self.__outputWidth}"
+ f":{self.__outputHeight}"
+ f":{self.__offsetX}"
+ f":{self.__offsetY}"]}
return payload
def getYield(self):
yield self.getPayload()

View File

@@ -0,0 +1,140 @@
import itertools
from .filter import Filter
class DeinterlaceFilter(Filter):
IDENTIFIER = 'bwdif'
# DEFAULT_STRENGTH: float = 2.8
# DEFAULT_PATCH_SIZE: int = 13
# DEFAULT_CHROMA_PATCH_SIZE: int = 9
# DEFAULT_RESEARCH_WINDOW: int = 23
# DEFAULT_CHROMA_RESEARCH_WINDOW: int= 17
# STRENGTH_KEY = 'strength'
# PATCH_SIZE_KEY = 'patch_size'
# CHROMA_PATCH_SIZE_KEY = 'chroma_patch_size'
# RESEARCH_WINDOW_KEY = 'research_window'
# CHROMA_RESEARCH_WINDOW_KEY = 'chroma_research_window'
def __init__(self, **kwargs):
# self.__useHardware = kwargs.get('use_hardware', False)
# self.__strengthList = []
# strength = kwargs.get(NlmeansFilter.STRENGTH_KEY, '')
# if strength:
# strengthTokens = strength.split(',')
# for st in strengthTokens:
# try:
# strengthValue = float(st)
# except:
# raise ValueError('NlmeansFilter: Strength value has to be of type float')
# if strengthValue < 1.0 or strengthValue > 30.0:
# raise ValueError('NlmeansFilter: Strength value has to be between 1.0 and 30.0')
# self.__strengthList.append(strengthValue)
# else:
# self.__strengthList = [NlmeansFilter.DEFAULT_STRENGTH]
# self.__patchSizeList = []
# patchSize = kwargs.get(NlmeansFilter.PATCH_SIZE_KEY, '')
# if patchSize:
# patchSizeTokens = patchSize.split(',')
# for pst in patchSizeTokens:
# try:
# patchSizeValue = int(pst)
# except:
# raise ValueError('NlmeansFilter: Patch size value has to be of type int')
# if patchSizeValue < 0 or patchSizeValue > 99:
# raise ValueError('NlmeansFilter: Patch size value has to be between 0 and 99')
# if patchSizeValue % 2 == 0:
# raise ValueError('NlmeansFilter: Patch size value has to an odd number')
# self.__patchSizeList.append(patchSizeValue)
# else:
# self.__patchSizeList = [NlmeansFilter.DEFAULT_PATCH_SIZE]
# self.__chromaPatchSizeList = []
# chromaPatchSize = kwargs.get(NlmeansFilter.CHROMA_PATCH_SIZE_KEY, '')
# if chromaPatchSize:
# chromaPatchSizeTokens = chromaPatchSize.split(',')
# for cpst in chromaPatchSizeTokens:
# try:
# chromaPatchSizeValue = int(pst)
# except:
# raise ValueError('NlmeansFilter: Chroma patch size value has to be of type int')
# if chromaPatchSizeValue < 0 or chromaPatchSizeValue > 99:
# raise ValueError('NlmeansFilter: Chroma patch value has to be between 0 and 99')
# if chromaPatchSizeValue % 2 == 0:
# raise ValueError('NlmeansFilter: Chroma patch value has to an odd number')
# self.__chromaPatchSizeList.append(chromaPatchSizeValue)
# else:
# self.__chromaPatchSizeList = [NlmeansFilter.DEFAULT_CHROMA_PATCH_SIZE]
# self.__researchWindowList = []
# researchWindow = kwargs.get(NlmeansFilter.RESEARCH_WINDOW_KEY, '')
# if researchWindow:
# researchWindowTokens = researchWindow.split(',')
# for rwt in researchWindowTokens:
# try:
# researchWindowValue = int(rwt)
# except:
# raise ValueError('NlmeansFilter: Research window value has to be of type int')
# if researchWindowValue < 0 or researchWindowValue > 99:
# raise ValueError('NlmeansFilter: Research window value has to be between 0 and 99')
# if researchWindowValue % 2 == 0:
# raise ValueError('NlmeansFilter: Research window value has to an odd number')
# self.__researchWindowList.append(researchWindowValue)
# else:
# self.__researchWindowList = [NlmeansFilter.DEFAULT_RESEARCH_WINDOW]
# self.__chromaResearchWindowList = []
# chromaResearchWindow = kwargs.get(NlmeansFilter.CHROMA_RESEARCH_WINDOW_KEY, '')
# if chromaResearchWindow:
# chromaResearchWindowTokens = chromaResearchWindow.split(',')
# for crwt in chromaResearchWindowTokens:
# try:
# chromaResearchWindowValue = int(crwt)
# except:
# raise ValueError('NlmeansFilter: Chroma research window value has to be of type int')
# if chromaResearchWindowValue < 0 or chromaResearchWindowValue > 99:
# raise ValueError('NlmeansFilter: Chroma research window value has to be between 0 and 99')
# if chromaResearchWindowValue % 2 == 0:
# raise ValueError('NlmeansFilter: Chroma research window value has to an odd number')
# self.__chromaResearchWindowList.append(chromaResearchWindowValue)
# else:
# self.__chromaResearchWindowList = [NlmeansFilter.DEFAULT_CHROMA_RESEARCH_WINDOW]
super().__init__(self)
def getPayload(self):
# strength = iteration[0]
# patchSize = iteration[1]
# chromaPatchSize = iteration[2]
# researchWindow = iteration[3]
# chromaResearchWindow = iteration[4]
suffices = []
# filterName = 'nlmeans_opencl' if self.__useHardware else 'nlmeans'
payload = {'identifier': DeinterlaceFilter.IDENTIFIER,
'parameters': {},
'suffices': suffices,
'variant': f"DEINT",
'tokens': ['bwdif=mode=1']}
return payload
def getYield(self):
# for it in itertools.product(self.__strengthList,
# self.__patchSizeList,
# self.__chromaPatchSizeList,
# self.__researchWindowList,
# self.__chromaResearchWindowList):
yield self.getPayload()

View File

@@ -144,7 +144,7 @@ class NlmeansFilter(Filter):
'suffices': suffices,
'variant': f"DS{strength}-DP{patchSize}-DPC{chromaPatchSize}"
+ f"-DR{researchWindow}-DRC{chromaResearchWindow}",
'tokens': ['-vf', f"{filterName}=s={strength}"
'tokens': [f"{filterName}=s={strength}"
+ f":p={patchSize}"
+ f":pc={chromaPatchSize}"
+ f":r={researchWindow}"

View File

@@ -1,18 +1,24 @@
import itertools
import click
from .filter import Filter
from ffx.video_encoder import VideoEncoder
class QualityFilter(Filter):
IDENTIFIER = 'quality'
DEFAULT_QUALITY = 32
DEFAULT_VP9_QUALITY = 32
DEFAULT_H264_QUALITY = 17
QUALITY_KEY = 'quality'
def __init__(self, **kwargs):
context = click.get_current_context().obj
self.__qualitiesList = []
qualities = kwargs.get(QualityFilter.QUALITY_KEY, '')
if qualities:
@@ -26,7 +32,9 @@ class QualityFilter(Filter):
raise ValueError('QualityFilter: Quality value has to be between 0 and 63')
self.__qualitiesList.append(qualityValue)
else:
self.__qualitiesList = [QualityFilter.DEFAULT_QUALITY]
self.__qualitiesList = [None]
super().__init__(self)

View File

@@ -1,8 +1,9 @@
import logging
import re
from jinja2 import Environment, Undefined
from .constants import DEFAULT_OUTPUT_FILENAME_TEMPLATE
from .configuration_controller import ConfigurationController
from .logging_utils import get_ffx_logger
class EmptyStringUndefined(Undefined):
@@ -15,8 +16,55 @@ DIFF_REMOVED_KEY = 'removed'
DIFF_CHANGED_KEY = 'changed'
DIFF_UNCHANGED_KEY = 'unchanged'
FILENAME_FILTER_TRANSLATION = str.maketrans(
{
"/": "-",
":": ";",
"*": "",
"'": "",
"?": "#",
"": "",
"": "",
}
)
TMDB_FILLER_MARKERS = (" (*)", "(*)")
TMDB_EPISODE_RANGE_SUFFIX_REGEX = re.compile(r"\(([0-9]+)[-/]([0-9]+)\)$")
TMDB_EPISODE_PART_SUFFIX_REGEX = re.compile(r"\(([0-9]+)\)$")
RICH_COLOR_REGEX = re.compile(r"\[[a-z_]+\](.+)\[/[a-z_]+\]")
def dictDiff(a : dict, b : dict):
def dictDiff(a : dict, b : dict, ignoreKeys: list = [], removeKeys: list = []):
"""
ignoreKeys: Ignored keys are filtered from calculating diff at all
removeKeys: Override diff calculation to remove keys certainly
"""
a_filtered = {k:v for k,v in a.items() if not k in ignoreKeys}
b_filtered = {k:v for k,v in b.items() if not k in ignoreKeys and k not in removeKeys}
a_only = {k:v for k,v in a_filtered.items() if not k in b_filtered.keys()}
b_only = {k:v for k,v in b_filtered.items() if not k in a_filtered.keys()}
a_b = set(a_filtered.keys()) & set(b_filtered.keys())
changed = {k:b_filtered[k] for k in a_b if a_filtered[k] != b_filtered[k]}
unchanged = {k:b_filtered[k] for k in a_b if a_filtered[k] == b_filtered[k]}
diffResult = {}
if a_only:
diffResult[DIFF_REMOVED_KEY] = a_only
diffResult[DIFF_UNCHANGED_KEY] = unchanged
if b_only:
diffResult[DIFF_ADDED_KEY] = b_only
if changed:
diffResult[DIFF_CHANGED_KEY] = changed
return diffResult
def dictKeysDiff(a : dict, b : dict):
a_keys = set(a.keys())
b_keys = set(b.keys())
@@ -40,9 +88,10 @@ def dictDiff(a : dict, b : dict):
return diffResult
def dictCache(element: dict, cache: list = []):
for index in range(len(cache)):
diff = dictDiff(cache[index], element)
diff = dictKeysDiff(cache[index], element)
if not diff:
return index, cache
cache.append(element)
@@ -53,11 +102,13 @@ def setDiff(a : set, b : set) -> set:
a_only = a - b
b_only = b - a
a_and_b = a & b
diffResult = {}
if a_only:
diffResult[DIFF_REMOVED_KEY] = a_only
diffResult[DIFF_UNCHANGED_KEY] = a_and_b
if b_only:
diffResult[DIFF_ADDED_KEY] = b_only
@@ -78,16 +129,35 @@ def filterFilename(fileName: str) -> str:
"""This filter replaces charactes from TMDB responses with characters
less problemating when using in filenames or removes them"""
# This appears in TMDB episode names
fileName = str(fileName).replace(' (*)', '')
fileName = str(fileName).replace('(*)', '')
return str(fileName).translate(FILENAME_FILTER_TRANSLATION).strip()
fileName = str(fileName).replace(':', ';')
fileName = str(fileName).replace('*', '')
fileName = str(fileName).replace("'", '')
fileName = str(fileName).replace("?", '#')
def substituteTmdbFilename(fileName: str) -> str:
"""If chaining this method with filterFilename use this one first as the latter will destroy some patterns"""
return fileName.strip()
normalizedFileName = str(fileName)
for fillerMarker in TMDB_FILLER_MARKERS:
normalizedFileName = normalizedFileName.replace(fillerMarker, '')
episodeRangeMatch = TMDB_EPISODE_RANGE_SUFFIX_REGEX.search(normalizedFileName)
if episodeRangeMatch is not None:
partFirstIndex, partLastIndex = episodeRangeMatch.groups()
return TMDB_EPISODE_RANGE_SUFFIX_REGEX.sub(
f"Teil {partFirstIndex}-{partLastIndex}",
normalizedFileName,
count=1,
)
episodePartMatch = TMDB_EPISODE_PART_SUFFIX_REGEX.search(normalizedFileName)
if episodePartMatch is not None:
partIndex = episodePartMatch.group(1)
return TMDB_EPISODE_PART_SUFFIX_REGEX.sub(
f"Teil {partIndex}",
normalizedFileName,
count=1,
)
return normalizedFileName
def getEpisodeFileBasename(showName,
@@ -133,8 +203,7 @@ def getEpisodeFileBasename(showName,
if context is not None and 'logger' in context.keys():
logger = context['logger']
else:
logger = logging.getLogger('FFX')
logger.addHandler(logging.NullHandler())
logger = get_ffx_logger()
indexSeparator = ' ' if indexSeasonDigits or indexEpisodeDigits else ''
@@ -164,3 +233,16 @@ def getEpisodeFileBasename(showName,
# return ''.join(filenameTokens)
def formatRichColor(text: str, color: str = None):
if color is None:
return text
else:
return f"[{color}]{text}[/{color}]"
def removeRichColor(text: str):
richColorMatch = RICH_COLOR_REGEX.search(str(text))
if richColorMatch is None:
return text
else:
return str(richColorMatch.group(1))

View File

@@ -1,79 +1,196 @@
from enum import Enum
import difflib
class IsoLanguage(Enum):
AFRIKAANS = {"name": "Afrikaans", "iso639_1": "af", "iso639_2": "afr"}
ALBANIAN = {"name": "Albanian", "iso639_1": "sq", "iso639_2": "alb"}
ARABIC = {"name": "Arabic", "iso639_1": "ar", "iso639_2": "ara"}
ARMENIAN = {"name": "Armenian", "iso639_1": "hy", "iso639_2": "arm"}
AZERBAIJANI = {"name": "Azerbaijani", "iso639_1": "az", "iso639_2": "aze"}
BASQUE = {"name": "Basque", "iso639_1": "eu", "iso639_2": "baq"}
BELARUSIAN = {"name": "Belarusian", "iso639_1": "be", "iso639_2": "bel"}
BULGARIAN = {"name": "Bulgarian", "iso639_1": "bg", "iso639_2": "bul"}
CATALAN = {"name": "Catalan", "iso639_1": "ca", "iso639_2": "cat"}
CHINESE = {"name": "Chinese", "iso639_1": "zh", "iso639_2": "chi"}
CROATIAN = {"name": "Croatian", "iso639_1": "hr", "iso639_2": "hrv"}
CZECH = {"name": "Czech", "iso639_1": "cs", "iso639_2": "cze"}
DANISH = {"name": "Danish", "iso639_1": "da", "iso639_2": "dan"}
DUTCH = {"name": "Dutch", "iso639_1": "nl", "iso639_2": "dut"}
ENGLISH = {"name": "English", "iso639_1": "en", "iso639_2": "eng"}
ESTONIAN = {"name": "Estonian", "iso639_1": "et", "iso639_2": "est"}
FINNISH = {"name": "Finnish", "iso639_1": "fi", "iso639_2": "fin"}
FRENCH = {"name": "French", "iso639_1": "fr", "iso639_2": "fre"}
GEORGIAN = {"name": "Georgian", "iso639_1": "ka", "iso639_2": "geo"}
GERMAN = {"name": "German", "iso639_1": "de", "iso639_2": "ger"}
GREEK = {"name": "Greek", "iso639_1": "el", "iso639_2": "gre"}
HEBREW = {"name": "Hebrew", "iso639_1": "he", "iso639_2": "heb"}
HINDI = {"name": "Hindi", "iso639_1": "hi", "iso639_2": "hin"}
HUNGARIAN = {"name": "Hungarian", "iso639_1": "hu", "iso639_2": "hun"}
ICELANDIC = {"name": "Icelandic", "iso639_1": "is", "iso639_2": "ice"}
INDONESIAN = {"name": "Indonesian", "iso639_1": "id", "iso639_2": "ind"}
IRISH = {"name": "Irish", "iso639_1": "ga", "iso639_2": "gle"}
ITALIAN = {"name": "Italian", "iso639_1": "it", "iso639_2": "ita"}
JAPANESE = {"name": "Japanese", "iso639_1": "ja", "iso639_2": "jpn"}
KAZAKH = {"name": "Kazakh", "iso639_1": "kk", "iso639_2": "kaz"}
KOREAN = {"name": "Korean", "iso639_1": "ko", "iso639_2": "kor"}
LATIN = {"name": "Latin", "iso639_1": "la", "iso639_2": "lat"}
LATVIAN = {"name": "Latvian", "iso639_1": "lv", "iso639_2": "lav"}
LITHUANIAN = {"name": "Lithuanian", "iso639_1": "lt", "iso639_2": "lit"}
MACEDONIAN = {"name": "Macedonian", "iso639_1": "mk", "iso639_2": "mac"}
MALAY = {"name": "Malay", "iso639_1": "ms", "iso639_2": "may"}
MALTESE = {"name": "Maltese", "iso639_1": "mt", "iso639_2": "mlt"}
NORWEGIAN = {"name": "Norwegian", "iso639_1": "no", "iso639_2": "nor"}
PERSIAN = {"name": "Persian", "iso639_1": "fa", "iso639_2": "per"}
POLISH = {"name": "Polish", "iso639_1": "pl", "iso639_2": "pol"}
PORTUGUESE = {"name": "Portuguese", "iso639_1": "pt", "iso639_2": "por"}
ROMANIAN = {"name": "Romanian", "iso639_1": "ro", "iso639_2": "rum"}
RUSSIAN = {"name": "Russian", "iso639_1": "ru", "iso639_2": "rus"}
NORTHERN_SAMI = {"name": "Northern Sami", "iso639_1": "se", "iso639_2": "sme"}
SAMOAN = {"name": "Samoan", "iso639_1": "sm", "iso639_2": "smo"}
SANGO = {"name": "Sango", "iso639_1": "sg", "iso639_2": "sag"}
SANSKRIT = {"name": "Sanskrit", "iso639_1": "sa", "iso639_2": "san"}
SARDINIAN = {"name": "Sardinian", "iso639_1": "sc", "iso639_2": "srd"}
SERBIAN = {"name": "Serbian", "iso639_1": "sr", "iso639_2": "srp"}
SHONA = {"name": "Shona", "iso639_1": "sn", "iso639_2": "sna"}
SINDHI = {"name": "Sindhi", "iso639_1": "sd", "iso639_2": "snd"}
SINHALA = {"name": "Sinhala", "iso639_1": "si", "iso639_2": "sin"}
SLOVAK = {"name": "Slovak", "iso639_1": "sk", "iso639_2": "slk"}
SLOVENIAN = {"name": "Slovenian", "iso639_1": "sl", "iso639_2": "slv"}
SOMALI = {"name": "Somali", "iso639_1": "so", "iso639_2": "som"}
SOUTHERN_SOTHO = {"name": "Southern Sotho", "iso639_1": "st", "iso639_2": "sot"}
SPANISH = {"name": "Spanish", "iso639_1": "es", "iso639_2": "spa"}
SUNDANESE = {"name": "Sundanese", "iso639_1": "su", "iso639_2": "sun"}
SWAHILI = {"name": "Swahili", "iso639_1": "sw", "iso639_2": "swa"}
SWATI = {"name": "Swati", "iso639_1": "ss", "iso639_2": "ssw"}
SWEDISH = {"name": "Swedish", "iso639_1": "sv", "iso639_2": "swe"}
TAGALOG = {"name": "Tagalog", "iso639_1": "tl", "iso639_2": "tgl"}
TAMIL = {"name": "Tamil", "iso639_1": "ta", "iso639_2": "tam"}
THAI = {"name": "Thai", "iso639_1": "th", "iso639_2": "tha"}
TURKISH = {"name": "Turkish", "iso639_1": "tr", "iso639_2": "tur"}
UKRAINIAN = {"name": "Ukrainian", "iso639_1": "uk", "iso639_2": "ukr"}
URDU = {"name": "Urdu", "iso639_1": "ur", "iso639_2": "urd"}
VIETNAMESE = {"name": "Vietnamese", "iso639_1": "vi", "iso639_2": "vie"}
WELSH = {"name": "Welsh", "iso639_1": "cy", "iso639_2": "wel"}
ABKHAZIAN = {"name": "Abkhazian", "iso639_1": "ab", "iso639_2": ["abk"]}
AFAR = {"name": "Afar", "iso639_1": "aa", "iso639_2": ["aar"]}
AFRIKAANS = {"name": "Afrikaans", "iso639_1": "af", "iso639_2": ["afr"]}
AKAN = {"name": "Akan", "iso639_1": "ak", "iso639_2": ["aka"]}
ALBANIAN = {"name": "Albanian", "iso639_1": "sq", "iso639_2": ["sqi", "alb"]}
AMHARIC = {"name": "Amharic", "iso639_1": "am", "iso639_2": ["amh"]}
ARABIC = {"name": "Arabic", "iso639_1": "ar", "iso639_2": ["ara"]}
ARAGONESE = {"name": "Aragonese", "iso639_1": "an", "iso639_2": ["arg"]}
ARMENIAN = {"name": "Armenian", "iso639_1": "hy", "iso639_2": ["hye", "arm"]}
ASSAMESE = {"name": "Assamese", "iso639_1": "as", "iso639_2": ["asm"]}
AVARIC = {"name": "Avaric", "iso639_1": "av", "iso639_2": ["ava"]}
AVESTAN = {"name": "Avestan", "iso639_1": "ae", "iso639_2": ["ave"]}
AYMARA = {"name": "Aymara", "iso639_1": "ay", "iso639_2": ["aym"]}
AZERBAIJANI = {"name": "Azerbaijani", "iso639_1": "az", "iso639_2": ["aze"]}
BAMBARA = {"name": "Bambara", "iso639_1": "bm", "iso639_2": ["bam"]}
BASHKIR = {"name": "Bashkir", "iso639_1": "ba", "iso639_2": ["bak"]}
BASQUE = {"name": "Basque", "iso639_1": "eu", "iso639_2": ["eus", "baq"]}
BELARUSIAN = {"name": "Belarusian", "iso639_1": "be", "iso639_2": ["bel"]}
BENGALI = {"name": "Bengali", "iso639_1": "bn", "iso639_2": ["ben"]}
BISLAMA = {"name": "Bislama", "iso639_1": "bi", "iso639_2": ["bis"]}
BOKMAL = {"name": "Bokmål", "iso639_1": "nb", "iso639_2": ["nob"]}
BOSNIAN = {"name": "Bosnian", "iso639_1": "bs", "iso639_2": ["bos"]}
BRETON = {"name": "Breton", "iso639_1": "br", "iso639_2": ["bre"]}
BULGARIAN = {"name": "Bulgarian", "iso639_1": "bg", "iso639_2": ["bul"]}
BURMESE = {"name": "Burmese", "iso639_1": "my", "iso639_2": ["mya", "bur"]}
CATALAN = {"name": "Catalan", "iso639_1": "ca", "iso639_2": ["cat"]}
CHAMORRO = {"name": "Chamorro", "iso639_1": "ch", "iso639_2": ["cha"]}
CHECHEN = {"name": "Chechen", "iso639_1": "ce", "iso639_2": ["che"]}
CHICHEWA = {"name": "Chichewa", "iso639_1": "ny", "iso639_2": ["nya"]}
CHINESE = {"name": "Chinese", "iso639_1": "zh", "iso639_2": ["zho", "chi"]}
CHURCH_SLAVIC = {"name": "Church Slavic", "iso639_1": "cu", "iso639_2": ["chu"]}
CHUVASH = {"name": "Chuvash", "iso639_1": "cv", "iso639_2": ["chv"]}
CORNISH = {"name": "Cornish", "iso639_1": "kw", "iso639_2": ["cor"]}
CORSICAN = {"name": "Corsican", "iso639_1": "co", "iso639_2": ["cos"]}
CREE = {"name": "Cree", "iso639_1": "cr", "iso639_2": ["cre"]}
CROATIAN = {"name": "Croatian", "iso639_1": "hr", "iso639_2": ["hrv"]}
CZECH = {"name": "Czech", "iso639_1": "cs", "iso639_2": ["ces", "cze"]}
DANISH = {"name": "Danish", "iso639_1": "da", "iso639_2": ["dan"]}
DIVEHI = {"name": "Divehi", "iso639_1": "dv", "iso639_2": ["div"]}
DUTCH = {"name": "Dutch", "iso639_1": "nl", "iso639_2": ["nld", "dut"]}
DZONGKHA = {"name": "Dzongkha", "iso639_1": "dz", "iso639_2": ["dzo"]}
ENGLISH = {"name": "English", "iso639_1": "en", "iso639_2": ["eng"]}
ESPERANTO = {"name": "Esperanto", "iso639_1": "eo", "iso639_2": ["epo"]}
ESTONIAN = {"name": "Estonian", "iso639_1": "et", "iso639_2": ["est"]}
EWE = {"name": "Ewe", "iso639_1": "ee", "iso639_2": ["ewe"]}
FAROESE = {"name": "Faroese", "iso639_1": "fo", "iso639_2": ["fao"]}
FIJIAN = {"name": "Fijian", "iso639_1": "fj", "iso639_2": ["fij"]}
FINNISH = {"name": "Finnish", "iso639_1": "fi", "iso639_2": ["fin"]}
FRENCH = {"name": "French", "iso639_1": "fr", "iso639_2": ["fra", "fre"]}
FULAH = {"name": "Fulah", "iso639_1": "ff", "iso639_2": ["ful"]}
GALICIAN = {"name": "Galician", "iso639_1": "gl", "iso639_2": ["glg"]}
GANDA = {"name": "Ganda", "iso639_1": "lg", "iso639_2": ["lug"]}
GEORGIAN = {"name": "Georgian", "iso639_1": "ka", "iso639_2": ["kat", "geo"]}
GERMAN = {"name": "German", "iso639_1": "de", "iso639_2": ["deu", "ger"]}
GREEK = {"name": "Greek", "iso639_1": "el", "iso639_2": ["ell", "gre"]}
GUARANI = {"name": "Guarani", "iso639_1": "gn", "iso639_2": ["grn"]}
GUJARATI = {"name": "Gujarati", "iso639_1": "gu", "iso639_2": ["guj"]}
HAITIAN = {"name": "Haitian", "iso639_1": "ht", "iso639_2": ["hat"]}
HAUSA = {"name": "Hausa", "iso639_1": "ha", "iso639_2": ["hau"]}
HEBREW = {"name": "Hebrew", "iso639_1": "he", "iso639_2": ["heb"]}
HERERO = {"name": "Herero", "iso639_1": "hz", "iso639_2": ["her"]}
HINDI = {"name": "Hindi", "iso639_1": "hi", "iso639_2": ["hin"]}
HIRI_MOTU = {"name": "Hiri Motu", "iso639_1": "ho", "iso639_2": ["hmo"]}
HUNGARIAN = {"name": "Hungarian", "iso639_1": "hu", "iso639_2": ["hun"]}
ICELANDIC = {"name": "Icelandic", "iso639_1": "is", "iso639_2": ["isl", "ice"]}
IDO = {"name": "Ido", "iso639_1": "io", "iso639_2": ["ido"]}
IGBO = {"name": "Igbo", "iso639_1": "ig", "iso639_2": ["ibo"]}
INDONESIAN = {"name": "Indonesian", "iso639_1": "id", "iso639_2": ["ind"]}
INTERLINGUA = {"name": "Interlingua", "iso639_1": "ia", "iso639_2": ["ina"]}
INTERLINGUE = {"name": "Interlingue", "iso639_1": "ie", "iso639_2": ["ile"]}
INUKTITUT = {"name": "Inuktitut", "iso639_1": "iu", "iso639_2": ["iku"]}
INUPIAQ = {"name": "Inupiaq", "iso639_1": "ik", "iso639_2": ["ipk"]}
IRISH = {"name": "Irish", "iso639_1": "ga", "iso639_2": ["gle"]}
ITALIAN = {"name": "Italian", "iso639_1": "it", "iso639_2": ["ita"]}
JAPANESE = {"name": "Japanese", "iso639_1": "ja", "iso639_2": ["jpn"]}
JAVANESE = {"name": "Javanese", "iso639_1": "jv", "iso639_2": ["jav"]}
KALAALLISUT = {"name": "Kalaallisut", "iso639_1": "kl", "iso639_2": ["kal"]}
KANNADA = {"name": "Kannada", "iso639_1": "kn", "iso639_2": ["kan"]}
KANURI = {"name": "Kanuri", "iso639_1": "kr", "iso639_2": ["kau"]}
KASHMIRI = {"name": "Kashmiri", "iso639_1": "ks", "iso639_2": ["kas"]}
KAZAKH = {"name": "Kazakh", "iso639_1": "kk", "iso639_2": ["kaz"]}
KHMER = {"name": "Khmer", "iso639_1": "km", "iso639_2": ["khm"]}
KIKUYU = {"name": "Kikuyu", "iso639_1": "ki", "iso639_2": ["kik"]}
KINYARWANDA = {"name": "Kinyarwanda", "iso639_1": "rw", "iso639_2": ["kin"]}
KIRGHIZ = {"name": "Kirghiz", "iso639_1": "ky", "iso639_2": ["kir"]}
KOMI = {"name": "Komi", "iso639_1": "kv", "iso639_2": ["kom"]}
KONGO = {"name": "Kongo", "iso639_1": "kg", "iso639_2": ["kon"]}
KOREAN = {"name": "Korean", "iso639_1": "ko", "iso639_2": ["kor"]}
KUANYAMA = {"name": "Kuanyama", "iso639_1": "kj", "iso639_2": ["kua"]}
KURDISH = {"name": "Kurdish", "iso639_1": "ku", "iso639_2": ["kur"]}
LAO = {"name": "Lao", "iso639_1": "lo", "iso639_2": ["lao"]}
LATIN = {"name": "Latin", "iso639_1": "la", "iso639_2": ["lat"]}
LATVIAN = {"name": "Latvian", "iso639_1": "lv", "iso639_2": ["lav"]}
LIMBURGAN = {"name": "Limburgan", "iso639_1": "li", "iso639_2": ["lim"]}
LINGALA = {"name": "Lingala", "iso639_1": "ln", "iso639_2": ["lin"]}
LITHUANIAN = {"name": "Lithuanian", "iso639_1": "lt", "iso639_2": ["lit"]}
LUBA_KATANGA = {"name": "Luba-Katanga", "iso639_1": "lu", "iso639_2": ["lub"]}
LUXEMBOURGISH = {"name": "Luxembourgish", "iso639_1": "lb", "iso639_2": ["ltz"]}
MACEDONIAN = {"name": "Macedonian", "iso639_1": "mk", "iso639_2": ["mkd", "mac"]}
MALAGASY = {"name": "Malagasy", "iso639_1": "mg", "iso639_2": ["mlg"]}
MALAY = {"name": "Malay", "iso639_1": "ms", "iso639_2": ["msa", "may"]}
MALAYALAM = {"name": "Malayalam", "iso639_1": "ml", "iso639_2": ["mal"]}
MALTESE = {"name": "Maltese", "iso639_1": "mt", "iso639_2": ["mlt"]}
MANX = {"name": "Manx", "iso639_1": "gv", "iso639_2": ["glv"]}
MAORI = {"name": "Maori", "iso639_1": "mi", "iso639_2": ["mri", "mao"]}
MARATHI = {"name": "Marathi", "iso639_1": "mr", "iso639_2": ["mar"]}
MARSHALLESE = {"name": "Marshallese", "iso639_1": "mh", "iso639_2": ["mah"]}
MONGOLIAN = {"name": "Mongolian", "iso639_1": "mn", "iso639_2": ["mon"]}
NAURU = {"name": "Nauru", "iso639_1": "na", "iso639_2": ["nau"]}
NAVAJO = {"name": "Navajo", "iso639_1": "nv", "iso639_2": ["nav"]}
NDONGA = {"name": "Ndonga", "iso639_1": "ng", "iso639_2": ["ndo"]}
NEPALI = {"name": "Nepali", "iso639_1": "ne", "iso639_2": ["nep"]}
NORTH_NDEBELE = {"name": "North Ndebele", "iso639_1": "nd", "iso639_2": ["nde"]}
NORTHERN_SAMI = {"name": "Northern Sami", "iso639_1": "se", "iso639_2": ["sme"]}
NORWEGIAN = {"name": "Norwegian", "iso639_1": "no", "iso639_2": ["nor"]}
NORWEGIAN_NYNORSK = {"name": "Nynorsk", "iso639_1": "nn", "iso639_2": ["nno"]}
OCCITAN = {"name": "Occitan", "iso639_1": "oc", "iso639_2": ["oci"]}
OJIBWA = {"name": "Ojibwa", "iso639_1": "oj", "iso639_2": ["oji"]}
ORIYA = {"name": "Oriya", "iso639_1": "or", "iso639_2": ["ori"]}
OROMO = {"name": "Oromo", "iso639_1": "om", "iso639_2": ["orm"]}
OSSETIAN = {"name": "Ossetian", "iso639_1": "os", "iso639_2": ["oss"]}
PALI = {"name": "Pali", "iso639_1": "pi", "iso639_2": ["pli"]}
PANJABI = {"name": "Panjabi", "iso639_1": "pa", "iso639_2": ["pan"]}
PERSIAN = {"name": "Persian", "iso639_1": "fa", "iso639_2": ["fas", "per"]}
POLISH = {"name": "Polish", "iso639_1": "pl", "iso639_2": ["pol"]}
PORTUGUESE = {"name": "Portuguese", "iso639_1": "pt", "iso639_2": ["por"]}
PUSHTO = {"name": "Pushto", "iso639_1": "ps", "iso639_2": ["pus"]}
QUECHUA = {"name": "Quechua", "iso639_1": "qu", "iso639_2": ["que"]}
ROMANIAN = {"name": "Romanian", "iso639_1": "ro", "iso639_2": ["ron", "rum"]}
ROMANSH = {"name": "Romansh", "iso639_1": "rm", "iso639_2": ["roh"]}
RUNDI = {"name": "Rundi", "iso639_1": "rn", "iso639_2": ["run"]}
RUSSIAN = {"name": "Russian", "iso639_1": "ru", "iso639_2": ["rus"]}
SAMOAN = {"name": "Samoan", "iso639_1": "sm", "iso639_2": ["smo"]}
SANGO = {"name": "Sango", "iso639_1": "sg", "iso639_2": ["sag"]}
SANSKRIT = {"name": "Sanskrit", "iso639_1": "sa", "iso639_2": ["san"]}
SARDINIAN = {"name": "Sardinian", "iso639_1": "sc", "iso639_2": ["srd"]}
SCOTTISH_GAELIC = {"name": "Scottish Gaelic", "iso639_1": "gd", "iso639_2": ["gla"]}
SERBIAN = {"name": "Serbian", "iso639_1": "sr", "iso639_2": ["srp"]}
SHONA = {"name": "Shona", "iso639_1": "sn", "iso639_2": ["sna"]}
SICHUAN_YI = {"name": "Sichuan Yi", "iso639_1": "ii", "iso639_2": ["iii"]}
SINDHI = {"name": "Sindhi", "iso639_1": "sd", "iso639_2": ["snd"]}
SINHALA = {"name": "Sinhala", "iso639_1": "si", "iso639_2": ["sin"]}
SLOVAK = {"name": "Slovak", "iso639_1": "sk", "iso639_2": ["slk", "slo"]}
SLOVENIAN = {"name": "Slovenian", "iso639_1": "sl", "iso639_2": ["slv"]}
SOMALI = {"name": "Somali", "iso639_1": "so", "iso639_2": ["som"]}
SOUTH_NDEBELE = {"name": "South Ndebele", "iso639_1": "nr", "iso639_2": ["nbl"]}
SOUTHERN_SOTHO = {"name": "Southern Sotho", "iso639_1": "st", "iso639_2": ["sot"]}
SPANISH = {"name": "Spanish", "iso639_1": "es", "iso639_2": ["spa"]}
SUNDANESE = {"name": "Sundanese", "iso639_1": "su", "iso639_2": ["sun"]}
SWAHILI = {"name": "Swahili", "iso639_1": "sw", "iso639_2": ["swa"]}
SWATI = {"name": "Swati", "iso639_1": "ss", "iso639_2": ["ssw"]}
SWEDISH = {"name": "Swedish", "iso639_1": "sv", "iso639_2": ["swe"]}
TAGALOG = {"name": "Tagalog", "iso639_1": "tl", "iso639_2": ["tgl"]}
TAHITIAN = {"name": "Tahitian", "iso639_1": "ty", "iso639_2": ["tah"]}
TAJIK = {"name": "Tajik", "iso639_1": "tg", "iso639_2": ["tgk"]}
TAMIL = {"name": "Tamil", "iso639_1": "ta", "iso639_2": ["tam"]}
TATAR = {"name": "Tatar", "iso639_1": "tt", "iso639_2": ["tat"]}
TELUGU = {"name": "Telugu", "iso639_1": "te", "iso639_2": ["tel"]}
THAI = {"name": "Thai", "iso639_1": "th", "iso639_2": ["tha"]}
TIBETAN = {"name": "Tibetan", "iso639_1": "bo", "iso639_2": ["bod", "tib"]}
TIGRINYA = {"name": "Tigrinya", "iso639_1": "ti", "iso639_2": ["tir"]}
TONGA = {"name": "Tonga", "iso639_1": "to", "iso639_2": ["ton"]}
TSONGA = {"name": "Tsonga", "iso639_1": "ts", "iso639_2": ["tso"]}
TSWANA = {"name": "Tswana", "iso639_1": "tn", "iso639_2": ["tsn"]}
TURKISH = {"name": "Turkish", "iso639_1": "tr", "iso639_2": ["tur"]}
TURKMEN = {"name": "Turkmen", "iso639_1": "tk", "iso639_2": ["tuk"]}
TWI = {"name": "Twi", "iso639_1": "tw", "iso639_2": ["twi"]}
UIGHUR = {"name": "Uighur", "iso639_1": "ug", "iso639_2": ["uig"]}
UKRAINIAN = {"name": "Ukrainian", "iso639_1": "uk", "iso639_2": ["ukr"]}
URDU = {"name": "Urdu", "iso639_1": "ur", "iso639_2": ["urd"]}
UZBEK = {"name": "Uzbek", "iso639_1": "uz", "iso639_2": ["uzb"]}
VENDA = {"name": "Venda", "iso639_1": "ve", "iso639_2": ["ven"]}
VIETNAMESE = {"name": "Vietnamese", "iso639_1": "vi", "iso639_2": ["vie"]}
VOLAPUK = {"name": "Volapük", "iso639_1": "vo", "iso639_2": ["vol"]}
WALLOON = {"name": "Walloon", "iso639_1": "wa", "iso639_2": ["wln"]}
WELSH = {"name": "Welsh", "iso639_1": "cy", "iso639_2": ["cym", "wel"]}
WESTERN_FRISIAN = {"name": "Western Frisian", "iso639_1": "fy", "iso639_2": ["fry"]}
WOLOF = {"name": "Wolof", "iso639_1": "wo", "iso639_2": ["wol"]}
XHOSA = {"name": "Xhosa", "iso639_1": "xh", "iso639_2": ["xho"]}
YIDDISH = {"name": "Yiddish", "iso639_1": "yi", "iso639_2": ["yid"]}
YORUBA = {"name": "Yoruba", "iso639_1": "yo", "iso639_2": ["yor"]}
ZHUANG = {"name": "Zhuang", "iso639_1": "za", "iso639_2": ["zha"]}
ZULU = {"name": "Zulu", "iso639_1": "zu", "iso639_2": ["zul"]}
UNDEFINED = {"name": "undefined", "iso639_1": "xx", "iso639_2": "und"}
FILIPINO = {"name": "Filipino", "iso639_1": "tl", "iso639_2": ["fil"]}
UNDEFINED = {"name": "undefined", "iso639_1": "xx", "iso639_2": ["und"]}
@staticmethod
@@ -82,25 +199,22 @@ class IsoLanguage(Enum):
closestMatches = difflib.get_close_matches(label, [l.value["name"] for l in IsoLanguage], n=1)
if closestMatches:
foundLangs = [l for l in IsoLanguage if l.value['name'] == closestMatches[0]]
foundLangs = [l for l in IsoLanguage if l.value["name"] == closestMatches[0]]
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
else:
return IsoLanguage.UNDEFINED
@staticmethod
def findThreeLetter(theeLetter : str):
foundLangs = [l for l in IsoLanguage if l.value['iso639_2'] == str(theeLetter)]
foundLangs = [l for l in IsoLanguage if str(theeLetter) in l.value["iso639_2"]]
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
def label(self):
return str(self.value['name'])
return str(self.value["name"])
def twoLetter(self):
return str(self.value['iso639_1'])
return str(self.value["iso639_1"])
def threeLetter(self):
return str(self.value['iso639_2'])
return str(self.value["iso639_2"][0])

68
src/ffx/logging_utils.py Normal file
View File

@@ -0,0 +1,68 @@
import logging
import os
FFX_LOGGER_NAME = "FFX"
CONSOLE_HANDLER_NAME = "ffx-console"
FILE_HANDLER_NAME = "ffx-file"
def get_ffx_logger(name: str = FFX_LOGGER_NAME) -> logging.Logger:
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
if not logger.handlers:
logger.addHandler(logging.NullHandler())
return logger
def configure_ffx_logger(
log_file_path: str,
file_level: int,
console_level: int,
name: str = FFX_LOGGER_NAME,
) -> logging.Logger:
logger = get_ffx_logger(name)
logger.propagate = False
for handler in list(logger.handlers):
if isinstance(handler, logging.NullHandler):
logger.removeHandler(handler)
console_handler = next(
(handler for handler in logger.handlers if handler.get_name() == CONSOLE_HANDLER_NAME),
None,
)
if console_handler is None:
console_handler = logging.StreamHandler()
console_handler.set_name(CONSOLE_HANDLER_NAME)
logger.addHandler(console_handler)
console_handler.setLevel(console_level)
console_handler.setFormatter(logging.Formatter("%(message)s"))
normalized_log_path = os.path.abspath(log_file_path)
file_handler = next(
(handler for handler in logger.handlers if handler.get_name() == FILE_HANDLER_NAME),
None,
)
if (
file_handler is not None
and os.path.abspath(file_handler.baseFilename) != normalized_log_path
):
logger.removeHandler(file_handler)
file_handler.close()
file_handler = None
if file_handler is None:
file_handler = logging.FileHandler(normalized_log_path)
file_handler.set_name(FILE_HANDLER_NAME)
logger.addHandler(file_handler)
file_handler.setLevel(file_level)
file_handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
return logger

View File

@@ -25,14 +25,14 @@ class MediaController():
pid = int(patternId)
s = self.Session()
q = s.query(Pattern).filter(Pattern.id == pid)
pattern = s.query(Pattern).filter(Pattern.id == pid).first()
if q.count():
pattern = q.first
if pattern is not None:
for mediaTagKey, mediaTagValue in mediaDescriptor.getTags():
self.__tac.updateMediaTag(pid, mediaTagKey, mediaTagValue)
for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
# for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
for trackDescriptor in mediaDescriptor.getTrackDescriptors():
self.__tc.addTrack(trackDescriptor, patternId = pid)
s.commit()

View File

@@ -1,4 +1,4 @@
import os, re, click, logging
import os, re, click
from typing import List, Self
@@ -9,8 +9,7 @@ from ffx.track_disposition import TrackDisposition
from ffx.track_codec import TrackCodec
from ffx.track_descriptor import TrackDescriptor
from ffx.helper import dictDiff, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY
from ffx.logging_utils import get_ffx_logger
class MediaDescriptor:
@@ -22,6 +21,7 @@ class MediaDescriptor:
TRACKS_KEY = "tracks"
TRACK_DESCRIPTOR_LIST_KEY = "track_descriptors"
ATTACHMENT_DESCRIPTOR_LIST_KEY = "attachment_descriptors"
CLEAR_TAGS_FLAG_KEY = "clear_tags"
FFPROBE_DISPOSITION_KEY = "disposition"
@@ -31,7 +31,9 @@ class MediaDescriptor:
#407 remove as well
EXCLUDED_MEDIA_TAGS = ["creation_time"]
SEASON_EPISODE_STREAM_LANGUAGE_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
STREAM_LANGUAGE_DISPOSITIONS_MATCH = '([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
SUBTITLE_FILE_EXTENSION = 'vtt'
def __init__(self, **kwargs):
@@ -45,8 +47,7 @@ class MediaDescriptor:
self.__logger = self.__context['logger']
else:
self.__context = {}
self.__logger = logging.getLogger('FFX')
self.__logger.addHandler(logging.NullHandler())
self.__logger = get_ffx_logger()
if MediaDescriptor.TAGS_KEY in kwargs.keys():
if type(kwargs[MediaDescriptor.TAGS_KEY]) is not dict:
@@ -69,9 +70,9 @@ class MediaDescriptor:
raise TypeError(
f"TrackDesciptor.__init__(): All elements of argument list {MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY} are required to be of type TrackDescriptor"
)
self.__trackDescriptors = kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY]
self.__trackDescriptors: List[TrackDescriptor] = kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY]
else:
self.__trackDescriptors = []
self.__trackDescriptors: List[TrackDescriptor] = []
def setTrackLanguage(self, language: str, index: int, trackType: TrackType = None):
@@ -107,14 +108,16 @@ class MediaDescriptor:
def setDefaultSubTrack(self, trackType: TrackType, subIndex: int):
for t in self.getAllTrackDescriptors():
# for t in self.getAllTrackDescriptors():
for t in self.getTrackDescriptors():
if t.getType() == trackType:
t.setDispositionFlag(
TrackDisposition.DEFAULT, t.getSubIndex() == int(subIndex)
)
def setForcedSubTrack(self, trackType: TrackType, subIndex: int):
for t in self.getAllTrackDescriptors():
# for t in self.getAllTrackDescriptors():
for t in self.getTrackDescriptors():
if t.getType() == trackType:
t.setDispositionFlag(
TrackDisposition.FORCED, t.getSubIndex() == int(subIndex)
@@ -190,7 +193,8 @@ class MediaDescriptor:
def applySourceIndices(self, sourceMediaDescriptor: Self):
sourceTrackDescriptors = sourceMediaDescriptor.getAllTrackDescriptors()
# sourceTrackDescriptors = sourceMediaDescriptor.getAllTrackDescriptors()
sourceTrackDescriptors = sourceMediaDescriptor.getTrackDescriptors()
numTrackDescriptors = len(self.__trackDescriptors)
if len(sourceTrackDescriptors) != numTrackDescriptors:
@@ -203,7 +207,7 @@ class MediaDescriptor:
def rearrangeTrackDescriptors(self, newOrder: List[int]):
if len(newOrder) != len(self.__trackDescriptors):
raise ValueError('Length of list with reordered indices does not match number of track descriptors')
reorderedTrackDescriptors = {}
reorderedTrackDescriptors = []
for oldIndex in newOrder:
reorderedTrackDescriptors.append(self.__trackDescriptors[oldIndex])
self.__trackDescriptors = reorderedTrackDescriptors
@@ -285,9 +289,9 @@ class MediaDescriptor:
tdList[trackIndex].setIndex(trackIndex)
def getAllTrackDescriptors(self):
"""Returns all track descriptors sorted by type: video, audio then subtitles"""
return self.getVideoTracks() + self.getAudioTracks() + self.getSubtitleTracks()
# def getAllTrackDescriptors(self):
# """Returns all track descriptors sorted by type: video, audio then subtitles"""
# return self.getVideoTracks() + self.getAudioTracks() + self.getSubtitleTracks()
def getTrackDescriptors(self,
@@ -317,82 +321,16 @@ class MediaDescriptor:
if s.getType() == TrackType.SUBTITLE
]
def compare(self, vsMediaDescriptor: Self):
if not isinstance(vsMediaDescriptor, self.__class__):
self.__logger.error(f"MediaDescriptor.compare(): Argument is required to be of type {self.__class__}")
raise click.Abort()
vsTags = vsMediaDescriptor.getTags()
tags = self.getTags()
# HINT: Some tags differ per file, for example creation_time, so these are removed before diff
for emt in MediaDescriptor.EXCLUDED_MEDIA_TAGS:
if emt in tags.keys():
del tags[emt]
if emt in vsTags.keys():
del vsTags[emt]
tagsDiff = dictDiff(vsTags, tags)
compareResult = {}
if tagsDiff:
compareResult[MediaDescriptor.TAGS_KEY] = tagsDiff
# Target track configuration (from DB)
# tracks = self.getAllTrackDescriptors()
tracks = self.getAllTrackDescriptors() # filtern
numTracks = len(tracks)
# Current track configuration (of file)
vsTracks = vsMediaDescriptor.getAllTrackDescriptors()
numVsTracks = len(vsTracks)
maxNumOfTracks = max(numVsTracks, numTracks)
trackCompareResult = {}
for tp in range(maxNumOfTracks):
#!
vsTrackIndex = tracks[tp].getSourceIndex()
# Will trigger if tracks are missing in file
if tp > (numVsTracks - 1):
if DIFF_ADDED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_ADDED_KEY] = set()
trackCompareResult[DIFF_ADDED_KEY].add(tracks[tp].getIndex())
continue
# Will trigger if tracks are missing in DB definition
# New tracks will be added per update via this way
if tp > (numTracks - 1):
if DIFF_REMOVED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_REMOVED_KEY] = {}
trackCompareResult[DIFF_REMOVED_KEY][
vsTracks[vsTrackIndex].getIndex()
] = vsTracks[vsTrackIndex]
continue
# assumption is made here that the track order will not change for all files of a sequence
trackDiff = tracks[tp].compare(vsTracks[vsTrackIndex])
if trackDiff:
if DIFF_CHANGED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_CHANGED_KEY] = {}
trackCompareResult[DIFF_CHANGED_KEY][
vsTracks[vsTrackIndex].getIndex()
] = trackDiff
if trackCompareResult:
compareResult[MediaDescriptor.TRACKS_KEY] = trackCompareResult
return compareResult
def getAttachmentTracks(self) -> List[TrackDescriptor]:
return [
s
for s in self.__trackDescriptors
if s.getType() == TrackType.ATTACHMENT
]
def getImportFileTokens(self, use_sub_index: bool = True):
"""Generate ffmpeg import options for external stream files"""
importFileTokens = []
@@ -415,25 +353,47 @@ class MediaDescriptor:
return importFileTokens
def getInputMappingTokens(self, use_sub_index: bool = True, only_video: bool = False):
def getInputMappingTokens(self,
use_sub_index: bool = True,
only_video: bool = False,
sourceMediaDescriptor: Self = None):
"""Tracks must be reordered for source index order"""
inputMappingTokens = []
sortedTrackDescriptors = sorted(self.__trackDescriptors, key=lambda d: d.getIndex())
sourceTrackDescriptorsByIndex = {
td.getIndex(): td
for td in (
sourceMediaDescriptor.getTrackDescriptors()
if sourceMediaDescriptor is not None
else sortedTrackDescriptors
)
}
# raise click.ClickException(' '.join([f"\nindex={td.getIndex()} subIndex={td.getSubIndex()} srcIndex={td.getSourceIndex()} type={td.getType().label()}" for td in self.__trackDescriptors]))
filePointer = 1
for trackIndex in range(len(self.__trackDescriptors)):
for trackIndex in range(len(sortedTrackDescriptors)):
td = self.__trackDescriptors[trackIndex]
td: TrackDescriptor = sortedTrackDescriptors[trackIndex]
stdi = self.__trackDescriptors[td.getSourceIndex()].getIndex()
stdsi = self.__trackDescriptors[td.getSourceIndex()].getSubIndex()
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec() != TrackCodec.PNG:
# sti = self.__trackDescriptors[trackIndex].getSourceIndex()
# sotd = sourceOrderTrackDescriptors[sti]
sourceTrackDescriptor = sourceTrackDescriptorsByIndex.get(td.getSourceIndex())
if sourceTrackDescriptor is None:
raise ValueError(f"No source track descriptor found for source index {td.getSourceIndex()}")
stdi = sourceTrackDescriptor.getIndex()
stdsi = sourceTrackDescriptor.getSubIndex()
trackType = td.getType()
trackCodec = td.getCodec()
if (trackType != TrackType.ATTACHMENT
and (trackType == TrackType.VIDEO or not only_video)):
if (trackType == TrackType.VIDEO or not only_video):
importedFilePath = td.getExternalSourceFilePath()
@@ -449,42 +409,59 @@ class MediaDescriptor:
else:
if td.getCodec() != TrackCodec.PGS:
if not trackCodec in [TrackCodec.PGS, TrackCodec.VOBSUB]:
inputMappingTokens += [
"-map",
f"0:{trackType.indicator()}:{stdsi}",
]
else:
if td.getCodec() != TrackCodec.PGS:
if not trackCodec in [TrackCodec.PGS, TrackCodec.VOBSUB]:
inputMappingTokens += ["-map", f"0:{stdi}"]
if sourceMediaDescriptor:
fontDescriptors = [ftd for ftd in sourceMediaDescriptor.getAttachmentTracks()
if ftd.getCodec() == TrackCodec.TTF]
else:
fontDescriptors = [ftd for ftd in self.__trackDescriptors
if ftd.getType() == TrackType.ATTACHMENT
and ftd.getCodec() == TrackCodec.TTF]
for ad in sorted(fontDescriptors, key=lambda d: d.getIndex()):
inputMappingTokens += ["-map", f"0:{ad.getIndex()}"]
return inputMappingTokens
def searchSubtitleFiles(self, searchDirectory, prefix):
sesl_match = re.compile(MediaDescriptor.SEASON_EPISODE_STREAM_LANGUAGE_MATCH)
sesld_match = re.compile(f"{prefix}_{MediaDescriptor.SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH}")
sld_match = re.compile(f"{prefix}_{MediaDescriptor.STREAM_LANGUAGE_DISPOSITIONS_MATCH}")
subtitleFileDescriptors = []
for subtitleFilename in os.listdir(searchDirectory):
if subtitleFilename.startswith(prefix) and subtitleFilename.endswith(
"." + MediaDescriptor.SUBTITLE_FILE_EXTENSION
):
sesl_result = sesl_match.search(subtitleFilename)
if sesl_result is not None:
sesld_result = sesld_match.search(subtitleFilename)
sld_result = None if not sesld_result is None else sld_match.search(subtitleFilename)
if not sesld_result is None:
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
if os.path.isfile(subtitleFilePath):
subtitleFileDescriptor = {}
subtitleFileDescriptor["path"] = subtitleFilePath
subtitleFileDescriptor["season"] = int(sesl_result.group(1))
subtitleFileDescriptor["episode"] = int(sesl_result.group(2))
subtitleFileDescriptor["index"] = int(sesl_result.group(3))
subtitleFileDescriptor["language"] = sesl_result.group(4)
subtitleFileDescriptor["season"] = int(sesld_result.group(1))
subtitleFileDescriptor["episode"] = int(sesld_result.group(2))
subtitleFileDescriptor["index"] = int(sesld_result.group(3))
subtitleFileDescriptor["language"] = sesld_result.group(4)
dispSet = set()
dispCaptGroups = sesl_result.groups()
dispCaptGroups = sesld_result.groups()
numCaptGroups = len(dispCaptGroups)
if numCaptGroups > 4:
for groupIndex in range(numCaptGroups - 4):
@@ -495,6 +472,29 @@ class MediaDescriptor:
subtitleFileDescriptors.append(subtitleFileDescriptor)
if not sld_result is None:
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
if os.path.isfile(subtitleFilePath):
subtitleFileDescriptor = {}
subtitleFileDescriptor["path"] = subtitleFilePath
subtitleFileDescriptor["index"] = int(sld_result.group(1))
subtitleFileDescriptor["language"] = sld_result.group(2)
dispSet = set()
dispCaptGroups = sld_result.groups()
numCaptGroups = len(dispCaptGroups)
if numCaptGroups > 2:
for groupIndex in range(numCaptGroups - 2):
disp = TrackDisposition.fromIndicator(dispCaptGroups[groupIndex + 2])
if disp is not None:
dispSet.add(disp)
subtitleFileDescriptor["disposition_set"] = dispSet
subtitleFileDescriptors.append(subtitleFileDescriptor)
self.__logger.debug(f"searchSubtitleFiles(): Available subtitle files {subtitleFileDescriptors}")
return subtitleFileDescriptors
@@ -518,7 +518,11 @@ class MediaDescriptor:
[
d
for d in availableFileSubtitleDescriptors
if d["season"] == int(season) and d["episode"] == int(episode)
if ((season == -1 and episode == -1)
or (
d.get("season") == int(season)
and d.get("episode") == int(episode)
))
],
key=lambda d: d["index"],
)
@@ -533,15 +537,20 @@ class MediaDescriptor:
if matchingSubtitleTrackDescriptor:
# click.echo(f"Found matching subtitle file {msfd["path"]}\n")
self.__logger.debug(f"importSubtitles(): Found matching subtitle file {msfd['path']}")
matchingSubtitleTrackDescriptor[0].setExternalSourceFilePath(msfd["path"])
matchingTrack = matchingSubtitleTrackDescriptor[0]
matchingTrack.setExternalSourceFilePath(msfd["path"])
# TODO: Check if useful
# matchingSubtitleTrackDescriptor[0].setDispositionSet(msfd["disposition_set"])
# Prefer metadata coming from the external single-track source when
# it is provided explicitly by the filename contract.
matchingTrack.getTags()["language"] = msfd["language"]
if msfd["disposition_set"]:
matchingTrack.setDispositionSet(msfd["disposition_set"])
def getConfiguration(self, label: str = ''):
yield f"--- {label if label else 'MediaDescriptor '+str(id(self))} {' '.join([str(k)+'='+str(v) for k,v in self.__mediaTags.items()])}"
for td in self.getAllTrackDescriptors():
# for td in self.getAllTrackDescriptors():
for td in self.getTrackDescriptors():
yield (f"{td.getIndex()}:{td.getType().indicator()}:{td.getSubIndex()} "
+ '|'.join([d.indicator() for d in td.getDispositionSet()])
+ ' ' + ' '.join([str(k)+'='+str(v) for k,v in td.getTags().items()]))

View File

@@ -0,0 +1,346 @@
import click
from ffx.iso_language import IsoLanguage
from ffx.media_descriptor import MediaDescriptor
from ffx.track_descriptor import TrackDescriptor
from ffx.helper import dictDiff, setDiff, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY, DIFF_UNCHANGED_KEY
from ffx.track_codec import TrackCodec
from ffx.track_disposition import TrackDisposition
class MediaDescriptorChangeSet():
TAGS_KEY = "tags"
TRACKS_KEY = "tracks"
DISPOSITION_SET_KEY = "disposition_set"
TRACK_DESCRIPTOR_KEY = "track_descriptor"
def __init__(self, context,
targetMediaDescriptor: MediaDescriptor = None,
sourceMediaDescriptor: MediaDescriptor = None):
self.__context = context
self.__logger = context['logger']
self.__configurationData = self.__context['config'].getData()
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
if 'streams' in metadataConfiguration.keys()
and 'remove' in metadataConfiguration['streams'].keys() else [])
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
if 'streams' in metadataConfiguration.keys()
and 'ignore' in metadataConfiguration['streams'].keys() else [])
self.__targetTrackDescriptors = targetMediaDescriptor.getTrackDescriptors() if targetMediaDescriptor is not None else []
self.__sourceTrackDescriptors = sourceMediaDescriptor.getTrackDescriptors() if sourceMediaDescriptor is not None else []
self.__targetTrackDescriptorsByIndex = {
trackDescriptor.getIndex(): trackDescriptor
for trackDescriptor in self.__targetTrackDescriptors
}
self.__sourceTrackDescriptorsByIndex = {
trackDescriptor.getIndex(): trackDescriptor
for trackDescriptor in self.__sourceTrackDescriptors
}
targetMediaTags = targetMediaDescriptor.getTags() if targetMediaDescriptor is not None else {}
sourceMediaTags = sourceMediaDescriptor.getTags() if sourceMediaDescriptor is not None else {}
self.__changeSetObj = {}
#if targetMediaDescriptor is not None:
#!!#
tagsDiff = dictDiff(sourceMediaTags,
targetMediaTags,
ignoreKeys=self.__ignoreGlobalKeys,
removeKeys=self.__removeGlobalKeys)
if tagsDiff:
self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY] = tagsDiff
self.__numTargetTracks = len(self.__targetTrackDescriptors)
# Current track configuration (of file)
self.__numSourceTracks = len(self.__sourceTrackDescriptors)
trackCompareResult = {}
for targetTrackDescriptor in self.__targetTrackDescriptors:
sourceTrackDescriptor = self.__sourceTrackDescriptorsByIndex.get(
targetTrackDescriptor.getSourceIndex()
)
if sourceTrackDescriptor is None:
if DIFF_ADDED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_ADDED_KEY] = {}
trackCompareResult[DIFF_ADDED_KEY][targetTrackDescriptor.getIndex()] = targetTrackDescriptor
continue
trackDiff = self.compareTracks(targetTrackDescriptor, sourceTrackDescriptor)
if trackDiff:
if DIFF_CHANGED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_CHANGED_KEY] = {}
trackCompareResult[DIFF_CHANGED_KEY][targetTrackDescriptor.getIndex()] = trackDiff
targetSourceIndices = {
targetTrackDescriptor.getSourceIndex()
for targetTrackDescriptor in self.__targetTrackDescriptors
}
for sourceTrackDescriptor in self.__sourceTrackDescriptors:
if sourceTrackDescriptor.getIndex() not in targetSourceIndices:
if DIFF_REMOVED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_REMOVED_KEY] = {}
trackCompareResult[DIFF_REMOVED_KEY][sourceTrackDescriptor.getIndex()] = sourceTrackDescriptor
if trackCompareResult:
self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY] = trackCompareResult
def compareTracks(self,
targetTrackDescriptor: TrackDescriptor = None,
sourceTrackDescriptor: TrackDescriptor = None):
sourceTrackTags = sourceTrackDescriptor.getTags() if sourceTrackDescriptor is not None else {}
targetTrackTags = (
self.normalizeTrackTags(targetTrackDescriptor.getTags())
if targetTrackDescriptor is not None
else {}
)
trackCompareResult = {}
tagsDiffResult = dictDiff(sourceTrackTags,
targetTrackTags,
ignoreKeys=self.__ignoreTrackKeys,
removeKeys=self.__removeTrackKeys)
if tagsDiffResult:
trackCompareResult[MediaDescriptorChangeSet.TAGS_KEY] = tagsDiffResult
sourceDispositionSet = sourceTrackDescriptor.getDispositionSet() if sourceTrackDescriptor is not None else set()
targetDispositionSet = targetTrackDescriptor.getDispositionSet() if targetTrackDescriptor is not None else set()
# if targetTrackDescriptor.getIndex() == 3:
# raise click.ClickException(f"{sourceDispositionSet} {targetDispositionSet}")
dispositionDiffResult = setDiff(sourceDispositionSet, targetDispositionSet)
if dispositionDiffResult:
trackCompareResult[MediaDescriptorChangeSet.DISPOSITION_SET_KEY] = dispositionDiffResult
return trackCompareResult
def normalizeTrackTagValue(self, tagKey, tagValue):
if tagKey != "language":
return tagValue
if isinstance(tagValue, IsoLanguage):
return tagValue.threeLetter()
trackLanguage = IsoLanguage.findThreeLetter(str(tagValue))
if trackLanguage != IsoLanguage.UNDEFINED:
return trackLanguage.threeLetter()
return tagValue
def normalizeTrackTags(self, trackTags: dict):
return {
tagKey: self.normalizeTrackTagValue(tagKey, tagValue)
for tagKey, tagValue in trackTags.items()
}
def generateDispositionTokens(self):
"""
#Example: -disposition:s:0 default -disposition:s:1 0
"""
dispositionTokens = []
# if MediaDescriptorChangeSet.TRACKS_KEY in self.__changeSetObj.keys():
#
# if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
# addedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY]
# trackDescriptor: TrackDescriptor
# for trackDescriptor in addedTracks.values():
#
# dispositionSet = trackDescriptor.getDispositionSet()
#
# if dispositionSet:
# dispositionTokens += [f"-disposition:{trackDescriptor.getType().indicator()}:{trackDescriptor.getSubIndex()}",
# '+'.join([d.label() for d in dispositionSet])]
#
# if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
# changedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
# trackDiffObj: dict
#
#
# for trackIndex, trackDiffObj in changedTracks.items():
#
# if MediaDescriptorChangeSet.DISPOSITION_SET_KEY in trackDiffObj.keys():
#
# dispositionDiffObj: dict = trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY]
#
# addedDispositions = dispositionDiffObj[DIFF_ADDED_KEY] if DIFF_ADDED_KEY in dispositionDiffObj.keys() else set()
# removedDispositions = dispositionDiffObj[DIFF_REMOVED_KEY] if DIFF_REMOVED_KEY in dispositionDiffObj.keys() else set()
# unchangedDispositions = dispositionDiffObj[DIFF_UNCHANGED_KEY] if DIFF_UNCHANGED_KEY in dispositionDiffObj.keys() else set()
#
# targetDispositions = addedDispositions | unchangedDispositions
#
# trackDescriptor = self.__targetTrackDescriptors[trackIndex]
# streamIndicator = trackDescriptor.getType().indicator()
# subIndex = trackDescriptor.getSubIndex()
#
# if targetDispositions:
# dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in targetDispositions])]
# # if not targetDispositions and removedDispositions:
# else:
# dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
for ttd in self.__targetTrackDescriptors:
targetDispositions = ttd.getDispositionSet()
streamIndicator = ttd.getType().indicator()
subIndex = ttd.getSubIndex()
if targetDispositions:
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in targetDispositions])]
# if not targetDispositions and removedDispositions:
else:
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
return dispositionTokens
def generateMetadataTokens(self):
metadataTokens = []
if MediaDescriptorChangeSet.TAGS_KEY in self.__changeSetObj.keys():
addedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY]
if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
removedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY]
if DIFF_REMOVED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
changedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY]
if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
outputMediaTags = addedMediaTags | changedMediaTags
if (not 'no_signature' in self.__context.keys()
or not self.__context['no_signature']):
outputMediaTags = outputMediaTags | self.__signatureTags
# outputMediaTags = {k:v for k,v in outputMediaTags.items() if k not in self.__removeGlobalKeys}
for tagKey, tagValue in outputMediaTags.items():
metadataTokens += [f"-metadata:g",
f"{tagKey}={tagValue}"]
for tagKey, tagValue in changedMediaTags.items():
metadataTokens += [f"-metadata:g",
f"{tagKey}={tagValue}"]
for removeKey in removedMediaTags.keys():
metadataTokens += [f"-metadata:g",
f"{removeKey}="]
if MediaDescriptorChangeSet.TRACKS_KEY in self.__changeSetObj.keys():
if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
addedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY]
trackDescriptor: TrackDescriptor
for trackDescriptor in addedTracks.values():
for tagKey, tagValue in self.normalizeTrackTags(trackDescriptor.getTags()).items():
if not tagKey in self.__removeTrackKeys:
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
+ f":{trackDescriptor.getSubIndex()}",
f"{tagKey}={tagValue}"]
if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
changedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
trackDiffObj: dict
for trackIndex, trackDiffObj in changedTracks.items():
if MediaDescriptorChangeSet.TAGS_KEY in trackDiffObj.keys():
tagsDiffObj = trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY]
addedTrackTags = tagsDiffObj[DIFF_ADDED_KEY] if DIFF_ADDED_KEY in tagsDiffObj.keys() else {}
changedTrackTags = tagsDiffObj[DIFF_CHANGED_KEY] if DIFF_CHANGED_KEY in tagsDiffObj.keys() else {}
unchangedTrackTags = tagsDiffObj[DIFF_UNCHANGED_KEY] if DIFF_UNCHANGED_KEY in tagsDiffObj.keys() else {}
removedTrackTags = tagsDiffObj[DIFF_REMOVED_KEY] if DIFF_REMOVED_KEY in tagsDiffObj.keys() else {}
outputTrackTags = addedTrackTags | changedTrackTags
trackDescriptor = self.__targetTrackDescriptorsByIndex[trackIndex]
for tagKey, tagValue in self.normalizeTrackTags(outputTrackTags).items():
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
+ f":{trackDescriptor.getSubIndex()}",
f"{tagKey}={tagValue}"]
if trackDescriptor.getExternalSourceFilePath():
# When a single-track external file substitutes the
# media payload, keep metadata from the regular
# source track unless the external/target side
# overrides it explicitly.
preservedTrackTags = (
{
tagKey: tagValue
for tagKey, tagValue in removedTrackTags.items()
if tagKey not in self.__removeTrackKeys
}
| unchangedTrackTags
)
for tagKey, tagValue in self.normalizeTrackTags(preservedTrackTags).items():
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
+ f":{trackDescriptor.getSubIndex()}",
f"{tagKey}={tagValue}"]
else:
for removeKey in removedTrackTags.keys():
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
+ f":{trackDescriptor.getSubIndex()}",
f"{removeKey}="]
for tagKey, tagValue in self.__context.get('encoding_metadata_tags', {}).items():
metadataTokens += [f"-metadata:g", f"{tagKey}={tagValue}"]
metadataTokens += self.generateConfiguredRemovalMetadataTokens()
return metadataTokens
def getChangeSetObj(self):
return self.__changeSetObj
def generateConfiguredRemovalMetadataTokens(self):
metadataTokens = []
for removeKey in self.__removeGlobalKeys:
metadataTokens += ["-metadata:g", f"{removeKey}="]
for trackDescriptor in self.__targetTrackDescriptors:
for removeKey in self.__removeTrackKeys:
metadataTokens += [
f"-metadata:s:{trackDescriptor.getType().indicator()}:{trackDescriptor.getSubIndex()}",
f"{removeKey}=",
]
return metadataTokens

View File

@@ -6,15 +6,12 @@ from textual.containers import Grid
from ffx.audio_layout import AudioLayout
from .pattern_controller import PatternController
from .show_controller import ShowController
from .track_controller import TrackController
from .tag_controller import TagController
from .show_details_screen import ShowDetailsScreen
from .pattern_details_screen import PatternDetailsScreen
from .screen_support import build_screen_bootstrap, build_screen_controllers
from ffx.track_type import TrackType
from ffx.track_codec import TrackCodec
from ffx.model.track import Track
from ffx.track_disposition import TrackDisposition
@@ -26,7 +23,9 @@ from textual.widgets._data_table import CellDoesNotExist
from ffx.media_descriptor import MediaDescriptor
from ffx.file_properties import FileProperties
from ffx.helper import DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
from ffx.helper import formatRichColor, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY, DIFF_UNCHANGED_KEY
# Screen[dict[int, str, int]]
@@ -36,8 +35,8 @@ class MediaDetailsScreen(Screen):
Grid {
grid-size: 5 8;
grid-rows: 8 2 2 2 8 2 2 8;
grid-columns: 25 25 120 10 75;
grid-rows: 8 2 2 2 2 8 2 2 8;
grid-columns: 15 25 90 10 105;
height: 100%;
width: 100%;
padding: 1;
@@ -90,6 +89,10 @@ class MediaDetailsScreen(Screen):
border: solid green;
}
.purple {
tint: purple 40%;
}
.yellow {
tint: yellow 40%;
}
@@ -105,6 +108,19 @@ class MediaDetailsScreen(Screen):
"""
TRACKS_TABLE_INDEX_COLUMN_LABEL = "Index"
TRACKS_TABLE_TYPE_COLUMN_LABEL = "Type"
TRACKS_TABLE_SUB_INDEX_COLUMN_LABEL = "SubIndex"
TRACKS_TABLE_CODEC_COLUMN_LABEL = "Codec"
TRACKS_TABLE_LAYOUT_COLUMN_LABEL = "Layout"
TRACKS_TABLE_LANGUAGE_COLUMN_LABEL = "Language"
TRACKS_TABLE_TITLE_COLUMN_LABEL = "Title"
TRACKS_TABLE_DEFAULT_COLUMN_LABEL = "Default"
TRACKS_TABLE_FORCED_COLUMN_LABEL = "Forced"
DIFFERENCES_TABLE_DIFFERENCES_COLUMN_LABEL = 'Differences (file->db/output)'
BINDINGS = [
("n", "new_pattern", "New Pattern"),
("u", "update_pattern", "Update Pattern"),
@@ -115,13 +131,23 @@ class MediaDetailsScreen(Screen):
def __init__(self):
super().__init__()
self.context = self.app.getContext()
self.Session = self.context['database']['session'] # convenience
bootstrap = build_screen_bootstrap(self.app.getContext())
self.context = bootstrap.context
self.__pc = PatternController(context = self.context)
self.__sc = ShowController(context = self.context)
self.__tc = TrackController(context = self.context)
self.__tac = TagController(context = self.context)
self.__removeGlobalKeys = bootstrap.remove_global_keys
self.__ignoreGlobalKeys = bootstrap.ignore_global_keys
controllers = build_screen_controllers(
self.context,
pattern=True,
show=True,
track=True,
tag=True,
)
self.__pc = controllers['pattern']
self.__sc = controllers['show']
self.__tc = controllers['track']
self.__tac = controllers['tag']
if not 'command' in self.context.keys() or self.context['command'] != 'inspect':
raise click.ClickException(f"MediaDetailsScreen.__init__(): Can only perform command 'inspect'")
@@ -137,7 +163,25 @@ class MediaDetailsScreen(Screen):
self.loadProperties()
def getRowIndexFromShowId(self, showId : int) -> int:
def removeShow(self, showId : int = -1):
"""Remove show entry from DataTable.
Removes the <New show> entry if showId is not set"""
for rowKey, row in self.showsTable.rows.items(): # dict[RowKey, Row]
rowData = self.showsTable.get_row(rowKey)
try:
if (showId == -1 and rowData[0] == ' '
or showId == int(rowData[0])):
self.showsTable.remove_row(rowKey)
return
except:
continue
def getRowIndexFromShowId(self, showId : int = -1) -> int:
"""Find the index of the row where the value in the specified column matches the target_value."""
for rowKey, row in self.showsTable.rows.items(): # dict[RowKey, Row]
@@ -145,7 +189,8 @@ class MediaDetailsScreen(Screen):
rowData = self.showsTable.get_row(rowKey)
try:
if showId == int(rowData[0]):
if ((showId == -1 and rowData[0] == ' ')
or showId == int(rowData[0])):
return int(self.showsTable.get_row_index(rowKey))
except:
continue
@@ -156,7 +201,7 @@ class MediaDetailsScreen(Screen):
def loadProperties(self):
self.__mediaFileProperties = FileProperties(self.context, self.__mediaFilename)
self.__currentMediaDescriptor = self.__mediaFileProperties.getMediaDescriptor()
self.__sourceMediaDescriptor = self.__mediaFileProperties.getMediaDescriptor()
#HINT: This is None if the filename did not match anything in database
self.__currentPattern = self.__mediaFileProperties.getPattern()
@@ -167,9 +212,13 @@ class MediaDetailsScreen(Screen):
# Enumerating differences between media descriptors
# from file (=current) vs from stored in database (=target)
try:
self.__mediaDifferences = self.__targetMediaDescriptor.compare(self.__currentMediaDescriptor) if self.__currentPattern is not None else {}
mdcs = MediaDescriptorChangeSet(self.context,
self.__targetMediaDescriptor,
self.__sourceMediaDescriptor)
self.__mediaChangeSetObj = mdcs.getChangeSetObj()
except ValueError:
self.__mediaDifferences = {}
self.__mediaChangeSetObj = {}
def updateDifferences(self):
@@ -178,73 +227,87 @@ class MediaDetailsScreen(Screen):
self.differencesTable.clear()
if MediaDescriptor.TAGS_KEY in self.__mediaDifferences.keys():
currentTags = self.__currentMediaDescriptor.getTags()
targetTags = self.__targetMediaDescriptor.getTags()
if MediaDescriptorChangeSet.TAGS_KEY in self.__mediaChangeSetObj.keys():
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for addedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
row = (f"added media tag: key='{addedTagKey}' value='{targetTags[addedTagKey]}'",)
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY].items():
if tagKey not in self.__ignoreGlobalKeys:
row = (f"add media tag: key='{tagKey}' value='{tagValue}'",)
self.differencesTable.add_row(*map(str, row))
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for removedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
row = (f"removed media tag: key='{removedTagKey}' value='{currentTags[removedTagKey]}'",)
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY].items():
if tagKey not in self.__ignoreGlobalKeys and tagKey not in self.__removeGlobalKeys:
row = (f"remove media tag: key='{tagKey}' value='{tagValue}'",)
self.differencesTable.add_row(*map(str, row))
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for changedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_CHANGED_KEY]:
row = (f"changed media tag: key='{changedTagKey}' value='{currentTags[changedTagKey]}'->'{targetTags[changedTagKey]}'",)
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY].items():
if tagKey not in self.__ignoreGlobalKeys:
row = (f"change media tag: key='{tagKey}' value='{tagValue}'",)
self.differencesTable.add_row(*map(str, row))
if MediaDescriptor.TRACKS_KEY in self.__mediaDifferences.keys():
currentTracks = self.__currentMediaDescriptor.getAllTrackDescriptors() # 0,1,2,3
targetTracks = self.__targetMediaDescriptor.getAllTrackDescriptors() # 0 <- from DB
if MediaDescriptorChangeSet.TRACKS_KEY in self.__mediaChangeSetObj.keys():
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
#raise click.ClickException(f"add track {self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]}")
for addedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]:
addedTrack : Track = targetTracks[addedTrackIndex]
row = (f"added {addedTrack.getType().label()} track: index={addedTrackIndex} lang={addedTrack.getLanguage().threeLetter()}",)
trackDescriptor: TrackDescriptor
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY].items():
row = (f"add {trackDescriptor.getType().label()} track: index={trackDescriptor.getIndex()} lang={trackDescriptor.getLanguage().threeLetter()}",)
self.differencesTable.add_row(*map(str, row))
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
for removedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_REMOVED_KEY]:
row = (f"removed track: index={removedTrackIndex}",)
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_REMOVED_KEY].items():
row = (f"remove stream #{trackIndex}",)
self.differencesTable.add_row(*map(str, row))
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
for changedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY].keys():
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
changedTrack : Track = targetTracks[changedTrackIndex]
changedTrackDiff : dict = self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY][changedTrackIndex]
changedTracks: dict = self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
if MediaDescriptor.TAGS_KEY in changedTrackDiff.keys():
targetTrackDescriptors = self.__targetMediaDescriptor.getTrackDescriptors()
if DIFF_ADDED_KEY in changedTrackDiff[MediaDescriptor.TAGS_KEY]:
for addedTagKey in changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
addedTagValue = changedTrack.getTags()[addedTagKey]
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} added key={addedTagKey} value={addedTagValue}",)
trackDiffObj: dict
for trackIndex, trackDiffObj in changedTracks.items():
ttd: TrackDescriptor = targetTrackDescriptors[trackIndex]
if MediaDescriptorChangeSet.TAGS_KEY in trackDiffObj.keys():
removedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY]
if DIFF_REMOVED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
for tagKey, tagValue in removedTags.items():
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) remove key={tagKey} value={tagValue}",)
self.differencesTable.add_row(*map(str, row))
if DIFF_REMOVED_KEY in changedTrackDiff[MediaDescriptor.TAGS_KEY]:
for removedTagKey in changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} removed key={removedTagKey}",)
addedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY]
if DIFF_ADDED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
for tagKey, tagValue in addedTags.items():
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) add key={tagKey} value={tagValue}",)
self.differencesTable.add_row(*map(str, row))
if TrackDescriptor.DISPOSITION_SET_KEY in changedTrackDiff.keys():
if DIFF_ADDED_KEY in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]:
for addedDisposition in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY][DIFF_ADDED_KEY]:
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} added disposition={addedDisposition.label()}",)
changedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY]
if DIFF_CHANGED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
for tagKey, tagValue in changedTags.items():
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) change key={tagKey} value={tagValue}",)
self.differencesTable.add_row(*map(str, row))
if DIFF_REMOVED_KEY in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]:
for removedDisposition in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY][DIFF_REMOVED_KEY]:
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} removed disposition={removedDisposition.label()}",)
if MediaDescriptorChangeSet.DISPOSITION_SET_KEY in trackDiffObj.keys():
addedDispositions = (trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY][DIFF_ADDED_KEY]
if DIFF_ADDED_KEY in trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY].keys() else set())
for ad in addedDispositions:
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) add disposition={ad.label()}",)
self.differencesTable.add_row(*map(str, row))
removedDispositions = (trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY][DIFF_REMOVED_KEY]
if DIFF_REMOVED_KEY in trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY].keys() else set())
for rd in removedDispositions:
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) remove disposition={rd.label()}",)
self.differencesTable.add_row(*map(str, row))
@@ -258,8 +321,15 @@ class MediaDetailsScreen(Screen):
row = (int(show.id), show.name, show.year) # Convert each element to a string before adding
self.showsTable.add_row(*map(str, row))
for mediaTagKey, mediaTagValue in self.__currentMediaDescriptor.getTags().items():
row = (mediaTagKey, mediaTagValue) # Convert each element to a string before adding
for mediaTagKey, mediaTagValue in self.__sourceMediaDescriptor.getTags().items():
textColor = None
if mediaTagKey in self.__ignoreGlobalKeys:
textColor = 'blue'
if mediaTagKey in self.__removeGlobalKeys:
textColor = 'red'
row = (formatRichColor(mediaTagKey, textColor), formatRichColor(mediaTagValue, textColor)) # Convert each element to a string before adding
self.mediaTagsTable.add_row(*map(str, row))
self.updateTracks()
@@ -293,7 +363,8 @@ class MediaDetailsScreen(Screen):
self.tracksTable.clear()
trackDescriptorList = self.__currentMediaDescriptor.getAllTrackDescriptors()
# trackDescriptorList = self.__sourceMediaDescriptor.getAllTrackDescriptors()
trackDescriptorList = self.__sourceMediaDescriptor.getTrackDescriptors()
typeCounter = {}
@@ -328,7 +399,7 @@ class MediaDetailsScreen(Screen):
# Define the columns with headers
self.column_key_show_id = self.showsTable.add_column("ID", width=10)
self.column_key_show_name = self.showsTable.add_column("Name", width=50)
self.column_key_show_name = self.showsTable.add_column("Name", width=80)
self.column_key_show_year = self.showsTable.add_column("Year", width=10)
self.showsTable.cursor_type = 'row'
@@ -337,8 +408,8 @@ class MediaDetailsScreen(Screen):
self.mediaTagsTable = DataTable(classes="two")
# Define the columns with headers
self.column_key_track_tag_key = self.mediaTagsTable.add_column("Key", width=50)
self.column_key_track_tag_value = self.mediaTagsTable.add_column("Value", width=100)
self.column_key_track_tag_key = self.mediaTagsTable.add_column("Key", width=30)
self.column_key_track_tag_value = self.mediaTagsTable.add_column("Value", width=70)
self.mediaTagsTable.cursor_type = 'row'
@@ -346,15 +417,15 @@ class MediaDetailsScreen(Screen):
self.tracksTable = DataTable(classes="two")
# Define the columns with headers
self.column_key_track_index = self.tracksTable.add_column("Index", width=5)
self.column_key_track_type = self.tracksTable.add_column("Type", width=10)
self.column_key_track_sub_index = self.tracksTable.add_column("SubIndex", width=8)
self.column_key_track_codec = self.tracksTable.add_column("Codec", width=10)
self.column_key_track_layout = self.tracksTable.add_column("Layout", width=10)
self.column_key_track_language = self.tracksTable.add_column("Language", width=15)
self.column_key_track_title = self.tracksTable.add_column("Title", width=48)
self.column_key_track_default = self.tracksTable.add_column("Default", width=8)
self.column_key_track_forced = self.tracksTable.add_column("Forced", width=8)
self.column_key_track_index = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_INDEX_COLUMN_LABEL, width=5)
self.column_key_track_type = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_TYPE_COLUMN_LABEL, width=10)
self.column_key_track_sub_index = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_SUB_INDEX_COLUMN_LABEL, width=8)
self.column_key_track_codec = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_CODEC_COLUMN_LABEL, width=10)
self.column_key_track_layout = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_LAYOUT_COLUMN_LABEL, width=10)
self.column_key_track_language = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_LANGUAGE_COLUMN_LABEL, width=15)
self.column_key_track_title = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_TITLE_COLUMN_LABEL, width=48)
self.column_key_track_default = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_DEFAULT_COLUMN_LABEL, width=8)
self.column_key_track_forced = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_FORCED_COLUMN_LABEL, width=8)
self.tracksTable.cursor_type = 'row'
@@ -363,7 +434,7 @@ class MediaDetailsScreen(Screen):
self.differencesTable = DataTable(id='differences-table') # classes="triple"
# Define the columns with headers
self.column_key_differences = self.differencesTable.add_column("Differences (file->db)", width=70)
self.column_key_differences = self.differencesTable.add_column(MediaDetailsScreen.DIFFERENCES_TABLE_DIFFERENCES_COLUMN_LABEL, width=100)
self.differencesTable.cursor_type = 'row'
@@ -378,33 +449,36 @@ class MediaDetailsScreen(Screen):
yield self.differencesTable
# 2
yield Static(" ", classes="four")
# 3
yield Static(" ")
yield Button("Substitute", id="pattern_button")
yield Static(" ", classes="two")
# 3
# 4
yield Static("Pattern")
yield Input(type="text", id='pattern_input', classes="two")
yield Static(" ")
# 4
# 5
yield Static(" ", classes="four")
# 5
# 6
yield Static("Media Tags")
yield self.mediaTagsTable
yield Static(" ")
# 6
# 7
yield Static(" ", classes="four")
# 7
# 8
yield Static(" ")
yield Button("Set Default", id="select_default_button")
yield Button("Set Forced", id="select_forced_button")
yield Static(" ")
# 8
# 9
yield Static("Streams")
yield self.tracksTable
yield Static(" ")
@@ -412,15 +486,15 @@ class MediaDetailsScreen(Screen):
yield Footer()
def getPatternDescriptorFromInput(self):
"""Returns show id and pattern from corresponding inputs"""
patternDescriptor = {}
def getPatternObjFromInput(self):
"""Returns show id and pattern as obj from corresponding inputs"""
patternObj = {}
try:
patternDescriptor['show_id'] = self.getSelectedShowDescriptor().getId()
patternDescriptor['pattern'] = str(self.query_one("#pattern_input", Input).value)
patternObj['show_id'] = self.getSelectedShowDescriptor().getId()
patternObj['pattern'] = str(self.query_one("#pattern_input", Input).value)
except:
pass
return patternDescriptor
return {}
return patternObj
def on_button_pressed(self, event: Button.Pressed) -> None:
@@ -437,12 +511,12 @@ class MediaDetailsScreen(Screen):
if event.button.id == "select_default_button":
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
self.__currentMediaDescriptor.setDefaultSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
self.__sourceMediaDescriptor.setDefaultSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
self.updateTracks()
if event.button.id == "select_forced_button":
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
self.__currentMediaDescriptor.setForcedSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
self.__sourceMediaDescriptor.setForcedSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
self.updateTracks()
@@ -462,7 +536,7 @@ class MediaDetailsScreen(Screen):
kwargs[TrackDescriptor.INDEX_KEY] = int(selected_track_data[0])
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.fromLabel(selected_track_data[1])
kwargs[TrackDescriptor.SUB_INDEX_KEY] = int(selected_track_data[2])
kwargs[TrackDescriptor.CODEC_NAME_KEY] = int(selected_track_data[3])
kwargs[TrackDescriptor.CODEC_KEY] = TrackCodec.fromLabel(selected_track_data[3])
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(selected_track_data[4])
return TrackDescriptor(**kwargs)
@@ -473,11 +547,10 @@ class MediaDetailsScreen(Screen):
return None
def getSelectedShowDescriptor(self):
def getSelectedShowDescriptor(self) -> ShowDescriptor:
try:
# Fetch the currently selected row when 'Enter' is pressed
#selected_row_index = self.table.cursor_row
row_key, col_key = self.showsTable.coordinate_to_cell_key(self.showsTable.cursor_coordinate)
if row_key is not None:
@@ -500,7 +573,15 @@ class MediaDetailsScreen(Screen):
def handle_new_pattern(self, showDescriptor: ShowDescriptor):
""""""
if type(showDescriptor) is not ShowDescriptor:
raise TypeError("MediaDetailsScreen.handle_new_pattern(): Argument 'showDescriptor' has to be of type ShowDescriptor")
self.removeShow()
showRowIndex = self.getRowIndexFromShowId(showDescriptor.getId())
if showRowIndex is None:
show = (showDescriptor.getId(), showDescriptor.getName(), showDescriptor.getYear())
self.showsTable.add_row(*map(str, show))
@@ -508,26 +589,29 @@ class MediaDetailsScreen(Screen):
if showRowIndex is not None:
self.showsTable.move_cursor(row=showRowIndex)
patternDescriptor = self.getPatternDescriptorFromInput()
patternObj = self.getPatternObjFromInput()
if patternDescriptor:
patternId = self.__pc.addPattern(patternDescriptor)
if patternObj:
mediaTags = {}
for tagKey, tagValue in self.__sourceMediaDescriptor.getTags().items():
# Filter tags that make no sense to preserve
if tagKey not in self.__ignoreGlobalKeys and not tagKey in self.__removeGlobalKeys:
mediaTags[tagKey] = tagValue
patternId = self.__pc.savePatternSchema(
patternObj,
trackDescriptors=self.__sourceMediaDescriptor.getTrackDescriptors(),
mediaTags=mediaTags,
)
if patternId:
self.highlightPattern(False)
for tagKey, tagValue in self.__currentMediaDescriptor.getTags().items():
self.__tac.updateMediaTag(patternId, tagKey, tagValue)
for trackDescriptor in self.__currentMediaDescriptor.getAllTrackDescriptors():
self.__tc.addTrack(trackDescriptor, patternId = patternId)
def action_new_pattern(self):
"""Adding new patterns
try:
self.__currentMediaDescriptor.checkConfiguration()
except ValueError:
return
If the corresponding show does not exists in DB it is added beforehand"""
selectedShowDescriptor = self.getSelectedShowDescriptor()
@@ -540,90 +624,104 @@ class MediaDetailsScreen(Screen):
def action_update_pattern(self):
"""When updating the database the actions must reverse the difference (eq to diff db->file)"""
"""Updating patterns
When updating the database the actions must reverse the difference (eq to diff db->file)"""
if self.__currentPattern is not None:
patternDescriptor = self.getPatternDescriptorFromInput()
if (patternDescriptor
and self.__currentPattern.getPattern() != patternDescriptor['pattern']):
return self.__pc.updatePattern(self.__currentPattern.getId(), patternDescriptor)
patternObj = self.getPatternObjFromInput()
if (patternObj
and self.__currentPattern.getPattern() != patternObj['pattern']):
return self.__pc.updatePattern(self.__currentPattern.getId(), patternObj)
self.loadProperties()
if MediaDescriptor.TAGS_KEY in self.__mediaDifferences.keys():
# __mediaChangeSetObj is file vs database
if MediaDescriptorChangeSet.TAGS_KEY in self.__mediaChangeSetObj.keys():
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for addedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
for addedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY].keys():
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} addedTagKey={addedTagKey}")
self.__tac.deleteMediaTagByKey(self.__currentPattern.getId(), addedTagKey)
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for removedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
currentTags = self.__currentMediaDescriptor.getTags()
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
for removedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY].keys():
currentTags = self.__sourceMediaDescriptor.getTags()
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} removedTagKey={removedTagKey} currentTags={currentTags[removedTagKey]}")
self.__tac.updateMediaTag(self.__currentPattern.getId(), removedTagKey, currentTags[removedTagKey])
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for changedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_CHANGED_KEY]:
currentTags = self.__currentMediaDescriptor.getTags()
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
for changedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY].keys():
currentTags = self.__sourceMediaDescriptor.getTags()
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} changedTagKey={changedTagKey} currentTags={currentTags[changedTagKey]}")
self.__tac.updateMediaTag(self.__currentPattern.getId(), changedTagKey, currentTags[changedTagKey])
if MediaDescriptor.TRACKS_KEY in self.__mediaDifferences.keys():
if MediaDescriptorChangeSet.TRACKS_KEY in self.__mediaChangeSetObj.keys():
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
for addedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]:
targetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == addedTrackIndex]
if targetTracks:
self.__tc.deleteTrack(targetTracks[0].getId()) # id
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
for removedTrackIndex, removedTrack in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_REMOVED_KEY].items():
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY].items():
#targetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == addedTrackIndex]
# if targetTracks:
# self.__tc.deleteTrack(targetTracks[0].getId()) # id
# self.__tc.deleteTrack(targetTracks[0].getId())
self.__tc.addTrack(trackDescriptor, patternId = self.__currentPattern.getId())
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
trackDescriptor: TrackDescriptor
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_REMOVED_KEY].items():
# Track per inspect/update hinzufügen
self.__tc.addTrack(removedTrack, patternId = self.__currentPattern.getId())
#self.__tc.addTrack(removedTrack, patternId = self.__currentPattern.getId())
self.__tc.deleteTrack(trackDescriptor.getId())
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
# [vsTracks[tp].getIndex()] = trackDiff
for changedTrackIndex, changedTrackDiff in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY].items():
for trackIndex, trackDiff in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY].items():
changedTargetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == changedTrackIndex]
changedTargeTrackId = changedTargetTracks[0].getId() if changedTargetTracks else None
changedTargetTrackIndex = changedTargetTracks[0].getIndex() if changedTargetTracks else None
targetTracks = [t for t in self.__targetMediaDescriptor.getTrackDescriptors() if t.getIndex() == trackIndex]
targetTrackId = targetTracks[0].getId() if targetTracks else None
targetTrackIndex = targetTracks[0].getIndex() if targetTracks else None
changedCurrentTracks = [t for t in self.__currentMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == changedTrackIndex]
changedCurrentTracks = [t for t in self.__sourceMediaDescriptor.getTrackDescriptors() if t.getIndex() == trackIndex]
# changedCurrentTrackId #HINT: Undefined as track descriptors do not come from file with track_id
if TrackDescriptor.TAGS_KEY in changedTrackDiff.keys():
changedTrackTagsDiff = changedTrackDiff[TrackDescriptor.TAGS_KEY]
if TrackDescriptor.TAGS_KEY in trackDiff.keys():
tagsDiff = trackDiff[TrackDescriptor.TAGS_KEY]
if DIFF_ADDED_KEY in changedTrackTagsDiff.keys():
for addedTrackTagKey in changedTrackTagsDiff[DIFF_ADDED_KEY]:
if DIFF_ADDED_KEY in tagsDiff.keys():
for tagKey, tagValue in tagsDiff[DIFF_ADDED_KEY].items():
if changedTargetTracks:
self.__tac.deleteTrackTagByKey(changedTargeTrackId, addedTrackTagKey)
# if targetTracks:
# self.__tac.deleteTrackTagByKey(targetTrackId, addedTrackTagKey)
self.__tac.updateTrackTag(targetTrackId, tagKey, tagValue)
if DIFF_REMOVED_KEY in changedTrackTagsDiff.keys():
for removedTrackTagKey in changedTrackTagsDiff[DIFF_REMOVED_KEY]:
if changedCurrentTracks:
self.__tac.updateTrackTag(changedTargeTrackId, removedTrackTagKey, changedCurrentTracks[0].getTags()[removedTrackTagKey])
if DIFF_CHANGED_KEY in changedTrackTagsDiff.keys():
for changedTrackTagKey in changedTrackTagsDiff[DIFF_CHANGED_KEY]:
if changedCurrentTracks:
self.__tac.updateTrackTag(changedTargeTrackId, changedTrackTagKey, changedCurrentTracks[0].getTags()[changedTrackTagKey])
if DIFF_REMOVED_KEY in tagsDiff.keys():
for tagKey, tagValue in tagsDiff[DIFF_REMOVED_KEY].items():
# if changedCurrentTracks:
# self.__tac.updateTrackTag(targetTrackId, removedTrackTagKey, changedCurrentTracks[0].getTags()[removedTrackTagKey])
self.__tac.deleteTrackTagByKey(targetTrackId, tagKey)
if TrackDescriptor.DISPOSITION_SET_KEY in changedTrackDiff.keys():
changedTrackDispositionDiff = changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]
if DIFF_CHANGED_KEY in tagsDiff.keys():
for tagKey, tagValue in tagsDiff[DIFF_CHANGED_KEY].items():
# if changedCurrentTracks:
# self.__tac.updateTrackTag(targetTrackId, changedTrackTagKey, changedCurrentTracks[0].getTags()[changedTrackTagKey])
self.__tac.updateTrackTag(targetTrackId, tagKey, tagValue)
if TrackDescriptor.DISPOSITION_SET_KEY in trackDiff.keys():
changedTrackDispositionDiff = trackDiff[TrackDescriptor.DISPOSITION_SET_KEY]
if DIFF_ADDED_KEY in changedTrackDispositionDiff.keys():
for changedTrackAddedDisposition in changedTrackDispositionDiff[DIFF_ADDED_KEY]:
if changedTargetTrackIndex is not None:
self.__tc.setDispositionState(self.__currentPattern.getId(), changedTargetTrackIndex, changedTrackAddedDisposition, False)
for changedDisposition in changedTrackDispositionDiff[DIFF_ADDED_KEY]:
if targetTrackIndex is not None:
self.__tc.setDispositionState(self.__currentPattern.getId(), targetTrackIndex, changedDisposition, True)
if DIFF_REMOVED_KEY in changedTrackDispositionDiff.keys():
for changedTrackRemovedDisposition in changedTrackDispositionDiff[DIFF_REMOVED_KEY]:
if changedTargetTrackIndex is not None:
self.__tc.setDispositionState(self.__currentPattern.getId(), changedTargetTrackIndex, changedTrackRemovedDisposition, True)
for changedDisposition in changedTrackDispositionDiff[DIFF_REMOVED_KEY]:
if targetTrackIndex is not None:
self.__tc.setDispositionState(self.__currentPattern.getId(), targetTrackIndex, changedDisposition, False)
self.updateDifferences()
@@ -632,11 +730,11 @@ class MediaDetailsScreen(Screen):
def action_edit_pattern(self):
patternDescriptor = self.getPatternDescriptorFromInput()
patternObj = self.getPatternObjFromInput()
if patternDescriptor['pattern']:
if patternObj['pattern']:
selectedPatternId = self.__pc.findPattern(patternDescriptor)
selectedPatternId = self.__pc.findPattern(patternObj)
if selectedPatternId is None:
raise click.ClickException(f"MediaDetailsScreen.action_edit_pattern(): Pattern to edit has no id")
@@ -647,4 +745,3 @@ class MediaDetailsScreen(Screen):
def handle_edit_pattern(self, screenResult):
self.query_one("#pattern_input", Input).value = screenResult['pattern']
self.updateDifferences()

View File

@@ -0,0 +1,20 @@
"""Load ORM model modules so SQLAlchemy relationship strings can resolve."""
from .show import Base, Show
from .pattern import Pattern
from .track import Track
from .track_tag import TrackTag
from .media_tag import MediaTag
from .shifted_season import ShiftedSeason
from .property import Property
__all__ = [
'Base',
'Show',
'Pattern',
'Track',
'TrackTag',
'MediaTag',
'ShiftedSeason',
'Property',
]

View File

@@ -1,6 +1,6 @@
import click
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy import Column, Integer, String, Text, ForeignKey, UniqueConstraint
from sqlalchemy.orm import relationship
from .show import Base, Show
@@ -12,6 +12,9 @@ from ffx.show_descriptor import ShowDescriptor
class Pattern(Base):
__tablename__ = 'patterns'
__table_args__ = (
UniqueConstraint('show_id', 'pattern', name='uq_patterns_show_id_pattern'),
)
# v1.x
id = Column(Integer, primary_key=True)
@@ -31,9 +34,13 @@ class Pattern(Base):
tracks = relationship('Track', back_populates='pattern', cascade="all, delete", lazy='joined')
media_tags = relationship('MediaTag', back_populates='pattern', cascade="all, delete", lazy='joined')
quality = Column(Integer, default=0)
notes = Column(Text, default='')
def getId(self):
return int(self.id)

View File

@@ -1,156 +1,411 @@
import click, re
import re
import click
from ffx.model.media_tag import MediaTag
from ffx.model.pattern import Pattern
from ffx.model.track import Track
from ffx.model.track_tag import TrackTag
from ffx.track_descriptor import TrackDescriptor
from ffx.track_disposition import TrackDisposition
class PatternController():
class DuplicatePatternMatchError(click.ClickException):
pass
class InvalidPatternSchemaError(click.ClickException):
pass
class PatternController:
_compiled_regex_cache: dict[str, re.Pattern] = {}
def __init__(self, context):
self.context = context
self.Session = self.context['database']['session'] # convenience
self.Session = self.context["database"]["session"]
self.__configurationData = self.context["config"].getData()
def addPattern(self, patternDescriptor):
metadataConfiguration = (
self.__configurationData["metadata"]
if "metadata" in self.__configurationData.keys()
else {}
)
self.__removeTrackKeys = (
metadataConfiguration["streams"]["remove"]
if "streams" in metadataConfiguration.keys()
and "remove" in metadataConfiguration["streams"].keys()
else []
)
self.__ignoreTrackKeys = (
metadataConfiguration["streams"]["ignore"]
if "streams" in metadataConfiguration.keys()
and "ignore" in metadataConfiguration["streams"].keys()
else []
)
@classmethod
def _clear_regex_cache(cls):
cls._compiled_regex_cache.clear()
@classmethod
def _compile_pattern_expression(cls, pattern_id: int, expression: str) -> re.Pattern:
expression_text = str(expression)
compiled = cls._compiled_regex_cache.get(expression_text)
if compiled is None:
try:
compiled = re.compile(expression_text)
except re.error as ex:
raise click.ClickException(
f"Pattern #{pattern_id} contains an invalid regex {expression_text!r}: {ex}"
)
cls._compiled_regex_cache[expression_text] = compiled
return compiled
def _coerce_pattern_fields(self, patternObj):
return {
"show_id": int(patternObj["show_id"]),
"pattern": str(patternObj["pattern"]),
"quality": int(patternObj.get("quality", 0) or 0),
"notes": str(patternObj.get("notes", "")),
}
def _coerce_media_tags(self, mediaTags):
return {
str(tagKey): str(tagValue)
for tagKey, tagValue in (mediaTags or {}).items()
}
def _normalize_track_descriptors(self, trackDescriptors):
if trackDescriptors is None:
raise InvalidPatternSchemaError(
"Patterns must define at least one track before they can be stored."
)
normalized_descriptors = []
for trackDescriptor in trackDescriptors:
if type(trackDescriptor) is not TrackDescriptor:
raise TypeError(
"PatternController: All track descriptors are required to be of type TrackDescriptor"
)
normalized_descriptors.append(trackDescriptor)
if not normalized_descriptors:
raise InvalidPatternSchemaError(
"Patterns must define at least one track before they can be stored."
)
normalized_descriptors = sorted(
normalized_descriptors, key=lambda descriptor: descriptor.getIndex()
)
index_set = {descriptor.getIndex() for descriptor in normalized_descriptors}
expected_indexes = set(range(len(normalized_descriptors)))
if index_set != expected_indexes:
raise click.ClickException(
"Pattern tracks must use a contiguous zero-based index order."
)
return normalized_descriptors
def _ensure_unique_pattern_definition(
self,
session,
show_id: int,
pattern_expression: str,
exclude_pattern_id: int | None = None,
):
query = session.query(Pattern).filter(
Pattern.show_id == show_id,
Pattern.pattern == pattern_expression,
)
if exclude_pattern_id is not None:
query = query.filter(Pattern.id != int(exclude_pattern_id))
existing_pattern = query.first()
if existing_pattern is not None:
raise click.ClickException(
f"Pattern {pattern_expression!r} already exists for show #{show_id}."
)
def _build_track_row(self, trackDescriptor: TrackDescriptor) -> Track:
track = Track(
track_type=int(trackDescriptor.getType().index()),
codec_name=str(trackDescriptor.getCodec().identifier()),
index=int(trackDescriptor.getIndex()),
source_index=int(trackDescriptor.getSourceIndex()),
disposition_flags=int(
TrackDisposition.toFlags(trackDescriptor.getDispositionSet())
),
audio_layout=trackDescriptor.getAudioLayout().index(),
)
for tagKey, tagValue in trackDescriptor.getTags().items():
if tagKey in self.__ignoreTrackKeys or tagKey in self.__removeTrackKeys:
continue
track.track_tags.append(TrackTag(key=str(tagKey), value=str(tagValue)))
return track
def _replace_pattern_schema(
self,
session,
pattern: Pattern,
mediaTags: dict[str, str],
trackDescriptors: list[TrackDescriptor],
):
for mediaTag in list(pattern.media_tags):
session.delete(mediaTag)
for track in list(pattern.tracks):
session.delete(track)
session.flush()
for tagKey, tagValue in mediaTags.items():
pattern.media_tags.append(MediaTag(key=str(tagKey), value=str(tagValue)))
for trackDescriptor in trackDescriptors:
pattern.tracks.append(self._build_track_row(trackDescriptor))
def _validate_persisted_pattern(self, pattern: Pattern):
if not pattern.tracks:
raise InvalidPatternSchemaError(
f"Pattern #{pattern.getId()} ({pattern.getPattern()!r}) is invalid because it has no tracks."
)
def savePatternSchema(
self,
patternObj,
trackDescriptors,
mediaTags=None,
patternId: int | None = None,
) -> int:
fields = self._coerce_pattern_fields(patternObj)
normalized_tracks = self._normalize_track_descriptors(trackDescriptors)
normalized_tags = self._coerce_media_tags(mediaTags)
session = None
try:
session = self.Session()
self._ensure_unique_pattern_definition(
session,
fields["show_id"],
fields["pattern"],
exclude_pattern_id=patternId,
)
s = self.Session()
q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']),
Pattern.pattern == str(patternDescriptor['pattern']))
if not q.count():
pattern = Pattern(show_id = int(patternDescriptor['show_id']),
pattern = str(patternDescriptor['pattern']))
s.add(pattern)
s.commit()
return pattern.getId()
if patternId is None:
pattern = Pattern(
show_id=fields["show_id"],
pattern=fields["pattern"],
quality=fields["quality"],
notes=fields["notes"],
)
session.add(pattern)
session.flush()
else:
return 0
pattern = session.query(Pattern).filter(Pattern.id == int(patternId)).first()
if pattern is None:
raise click.ClickException(
f"PatternController.savePatternSchema(): Pattern #{patternId} not found"
)
pattern.show_id = fields["show_id"]
pattern.pattern = fields["pattern"]
pattern.quality = fields["quality"]
pattern.notes = fields["notes"]
self._replace_pattern_schema(
session,
pattern,
normalized_tags,
normalized_tracks,
)
session.commit()
self._clear_regex_cache()
return pattern.getId()
except click.ClickException:
raise
except Exception as ex:
raise click.ClickException(f"PatternController.addPattern(): {repr(ex)}")
raise click.ClickException(
f"PatternController.savePatternSchema(): {repr(ex)}"
)
finally:
s.close()
if session is not None:
session.close()
def addPattern(self, patternObj, trackDescriptors=None, mediaTags=None):
return self.savePatternSchema(
patternObj,
trackDescriptors=trackDescriptors,
mediaTags=mediaTags,
)
def updatePattern(self, patternId, patternDescriptor):
def updatePattern(self, patternId, patternObj):
fields = self._coerce_pattern_fields(patternObj)
session = None
try:
s = self.Session()
q = s.query(Pattern).filter(Pattern.id == int(patternId))
session = self.Session()
pattern = session.query(Pattern).filter(Pattern.id == int(patternId)).first()
if q.count():
if pattern is not None:
self._ensure_unique_pattern_definition(
session,
fields["show_id"],
fields["pattern"],
exclude_pattern_id=patternId,
)
self._validate_persisted_pattern(pattern)
pattern = q.first()
pattern.show_id = fields["show_id"]
pattern.pattern = fields["pattern"]
pattern.quality = fields["quality"]
pattern.notes = fields["notes"]
pattern.show_id = int(patternDescriptor['show_id'])
pattern.pattern = str(patternDescriptor['pattern'])
s.commit()
session.commit()
self._clear_regex_cache()
return True
else:
return False
except click.ClickException:
raise
except Exception as ex:
raise click.ClickException(f"PatternController.updatePattern(): {repr(ex)}")
finally:
s.close()
if session is not None:
session.close()
def findPattern(self, patternDescriptor):
def findPattern(self, patternObj):
session = None
try:
s = self.Session()
q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']), Pattern.pattern == str(patternDescriptor['pattern']))
session = self.Session()
pattern = (
session.query(Pattern)
.filter(
Pattern.show_id == int(patternObj["show_id"]),
Pattern.pattern == str(patternObj["pattern"]),
)
.first()
)
if q.count():
pattern = q.first()
if pattern is not None:
return int(pattern.id)
else:
return None
except Exception as ex:
raise click.ClickException(f"PatternController.findPattern(): {repr(ex)}")
finally:
s.close()
if session is not None:
session.close()
def getPatternsForShow(self, showId: int) -> list[Pattern]:
def getPattern(self, patternId : int):
if type(showId) is not int:
raise ValueError(
"PatternController.getPatternsForShow(): Argument showId is required to be of type int"
)
session = None
try:
session = self.Session()
return (
session.query(Pattern)
.filter(Pattern.show_id == int(showId))
.order_by(Pattern.id)
.all()
)
except Exception as ex:
raise click.ClickException(f"PatternController.getPatternsForShow(): {repr(ex)}")
finally:
if session is not None:
session.close()
def getPattern(self, patternId: int):
if type(patternId) is not int:
raise ValueError(f"PatternController.getPattern(): Argument patternId is required to be of type int")
raise ValueError(
"PatternController.getPattern(): Argument patternId is required to be of type int"
)
session = None
try:
s = self.Session()
q = s.query(Pattern).filter(Pattern.id == int(patternId))
return q.first() if q.count() else None
session = self.Session()
return session.query(Pattern).filter(Pattern.id == int(patternId)).first()
except Exception as ex:
raise click.ClickException(f"PatternController.getPattern(): {repr(ex)}")
finally:
s.close()
if session is not None:
session.close()
def deletePattern(self, patternId):
session = None
try:
s = self.Session()
q = s.query(Pattern).filter(Pattern.id == int(patternId))
session = self.Session()
pattern = session.query(Pattern).filter(Pattern.id == int(patternId)).first()
if q.count():
#DAFUQ: https://stackoverflow.com/a/19245058
# q.delete()
pattern = q.first()
s.delete(pattern)
s.commit()
if pattern is not None:
session.delete(pattern)
session.commit()
self._clear_regex_cache()
return True
return False
except Exception as ex:
raise click.ClickException(f"PatternController.deletePattern(): {repr(ex)}")
finally:
s.close()
if session is not None:
session.close()
def matchFilename(self, filename : str) -> dict:
"""Returns dict {'match': <a regex match obj>, 'pattern': <ffx pattern obj>} or empty dict of no pattern was found"""
def matchFilename(self, filename: str) -> dict:
"""Return {'match': regex match, 'pattern': Pattern} or {} when unmatched."""
session = None
try:
s = self.Session()
q = s.query(Pattern)
session = self.Session()
matches = []
query = session.query(Pattern).order_by(Pattern.show_id, Pattern.id)
matchResult = {}
for pattern in query.all():
compiled = self._compile_pattern_expression(
pattern.getId(),
pattern.getPattern(),
)
patternMatch = compiled.search(str(filename))
if patternMatch is None:
continue
for pattern in q.all():
patternMatch = re.search(str(pattern.pattern), str(filename))
if patternMatch is not None:
matchResult['match'] = patternMatch
matchResult['pattern'] = pattern
self._validate_persisted_pattern(pattern)
matches.append({"match": patternMatch, "pattern": pattern})
return matchResult
if not matches:
return {}
if len(matches) > 1:
duplicateDescriptions = ", ".join(
[
f"show #{match['pattern'].getShowId()} pattern #{match['pattern'].getId()} {match['pattern'].getPattern()!r}"
for match in matches
]
)
raise DuplicatePatternMatchError(
f"Filename {filename!r} matched more than one pattern: {duplicateDescriptions}"
)
return matches[0]
except click.ClickException:
raise
except Exception as ex:
raise click.ClickException(f"PatternController.matchFilename(): {repr(ex)}")
finally:
s.close()
# def getMediaDescriptor(self, context, patternId):
#
# try:
# s = self.Session()
# q = s.query(Pattern).filter(Pattern.id == int(patternId))
#
# if q.count():
# return q.first().getMediaDescriptor(context)
# else:
# return None
#
# except Exception as ex:
# raise click.ClickException(f"PatternController.getMediaDescriptor(): {repr(ex)}")
# finally:
# s.close()
if session is not None:
session.close()

View File

@@ -2,22 +2,17 @@ import click, re
from typing import List
from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button, Input, DataTable
from textual.widgets import Header, Footer, Static, Button, Input, DataTable, TextArea
from textual.containers import Grid
from ffx.model.pattern import Pattern
from ffx.model.track import Track
from .pattern_controller import PatternController
from .show_controller import ShowController
from .track_controller import TrackController
from .tag_controller import TagController
from .track_details_screen import TrackDetailsScreen
from .track_delete_screen import TrackDeleteScreen
from .tag_details_screen import TagDetailsScreen
from .tag_delete_screen import TagDeleteScreen
from .screen_support import build_screen_bootstrap, build_screen_controllers
from ffx.track_type import TrackType
@@ -30,6 +25,8 @@ from ffx.file_properties import FileProperties
from ffx.iso_language import IsoLanguage
from ffx.audio_layout import AudioLayout
from ffx.helper import formatRichColor, removeRichColor
# Screen[dict[int, str, int]]
class PatternDetailsScreen(Screen):
@@ -37,8 +34,8 @@ class PatternDetailsScreen(Screen):
CSS = """
Grid {
grid-size: 7 13;
grid-rows: 2 2 2 2 2 8 2 2 8 2 2 2 2;
grid-size: 7 17;
grid-rows: 2 2 2 2 2 2 6 2 2 8 2 2 8 2 2 2 2;
grid-columns: 25 25 25 25 25 25 25;
height: 100%;
width: 100%;
@@ -87,6 +84,12 @@ class PatternDetailsScreen(Screen):
column-span: 7;
}
.four_box {
min-height: 6;
}
.box {
height: 100%;
border: solid green;
@@ -100,54 +103,43 @@ class PatternDetailsScreen(Screen):
def __init__(self, patternId = None, showId = None):
super().__init__()
self.context = self.app.getContext()
self.Session = self.context['database']['session'] # convenience
bootstrap = build_screen_bootstrap(self.app.getContext())
self.context = bootstrap.context
self.__pc = PatternController(context = self.context)
self.__sc = ShowController(context = self.context)
self.__tc = TrackController(context = self.context)
self.__tac = TagController(context = self.context)
self.__removeGlobalKeys = bootstrap.remove_global_keys
self.__ignoreGlobalKeys = bootstrap.ignore_global_keys
controllers = build_screen_controllers(
self.context,
pattern=True,
show=True,
track=True,
tag=True,
)
self.__pc = controllers['pattern']
self.__sc = controllers['show']
self.__tc = controllers['track']
self.__tac = controllers['tag']
self.__pattern : Pattern = self.__pc.getPattern(patternId) if patternId is not None else None
self.__showDescriptor = self.__sc.getShowDescriptor(showId) if showId is not None else None
#TODO: per controller
def loadTracks(self, show_id):
try:
tracks = {}
tracks['audio'] = {}
tracks['subtitle'] = {}
s = self.Session()
q = s.query(Pattern).filter(Pattern.show_id == int(show_id))
return [{'id': int(p.id), 'pattern': p.pattern} for p in q.all()]
except Exception as ex:
raise click.ClickException(f"loadTracks(): {repr(ex)}")
finally:
s.close()
self.__draftTracks : List[TrackDescriptor] = []
self.__draftTags : dict[str, str] = {}
def updateTracks(self):
self.tracksTable.clear()
if self.__pattern is not None:
tracks = self.__tc.findTracks(self.__pattern.getId())
tracks = self.getCurrentTrackDescriptors()
typeCounter = {}
tr: Track
for tr in tracks:
td: TrackDescriptor
for td in tracks:
td : TrackDescriptor = tr.getDescriptor(self.context)
if (trackType := td.getType()) != TrackType.ATTACHMENT:
trackType = td.getType()
if not trackType in typeCounter.keys():
typeCounter[trackType] = 0
@@ -155,6 +147,7 @@ class PatternDetailsScreen(Screen):
trackLanguage = td.getLanguage()
audioLayout = td.getAudioLayout()
row = (td.getIndex(),
trackType.label(),
typeCounter[trackType],
@@ -172,11 +165,47 @@ class PatternDetailsScreen(Screen):
typeCounter[trackType] += 1
def getCurrentTrackDescriptors(self) -> List[TrackDescriptor]:
if self.__pattern is not None:
return self.__tc.findSiblingDescriptors(self.__pattern.getId())
return list(self.__draftTracks)
def normalizeDraftTracks(self):
typeCounter = {}
for index, trackDescriptor in enumerate(self.__draftTracks):
trackDescriptor.setIndex(index)
trackType = trackDescriptor.getType()
subIndex = typeCounter.get(trackType, 0)
trackDescriptor.setSubIndex(subIndex)
typeCounter[trackType] = subIndex + 1
if trackDescriptor.getSourceIndex() < 0:
trackDescriptor.setSourceIndex(index)
def swapTracks(self, trackIndex1: int, trackIndex2: int):
ti1 = int(trackIndex1)
ti2 = int(trackIndex2)
if self.__pattern is None:
numSiblings = len(self.__draftTracks)
if ti1 < 0 or ti1 >= numSiblings:
raise ValueError(f"PatternDetailsScreen.swapTracks(): trackIndex1 ({ti1}) is out of range ({numSiblings})")
if ti2 < 0 or ti2 >= numSiblings:
raise ValueError(f"PatternDetailsScreen.swapTracks(): trackIndex2 ({ti2}) is out of range ({numSiblings})")
self.__draftTracks[ti1], self.__draftTracks[ti2] = self.__draftTracks[ti2], self.__draftTracks[ti1]
self.normalizeDraftTracks()
self.updateTracks()
return
siblingDescriptors: List[TrackDescriptor] = self.__tc.findSiblingDescriptors(self.__pattern.getId())
numSiblings = len(siblingDescriptors)
@@ -212,12 +241,21 @@ class PatternDetailsScreen(Screen):
self.tagsTable.clear()
if self.__pattern is not None:
tags = self.__tac.findAllMediaTags(self.__pattern.getId())
tags = (
self.__tac.findAllMediaTags(self.__pattern.getId())
if self.__pattern is not None
else self.__draftTags
)
for tagKey, tagValue in tags.items():
row = (tagKey, tagValue)
textColor = None
if tagKey in self.__ignoreGlobalKeys:
textColor = 'blue'
if tagKey in self.__removeGlobalKeys:
textColor = 'red'
row = (formatRichColor(tagKey, textColor), formatRichColor(tagValue, textColor))
self.tagsTable.add_row(*map(str, row))
@@ -230,6 +268,12 @@ class PatternDetailsScreen(Screen):
self.query_one("#pattern_input", Input).value = str(self.__pattern.getPattern())
if self.__pattern and self.__pattern.quality:
self.query_one("#quality_input", Input).value = str(self.__pattern.quality)
if self.__pattern and self.__pattern.notes:
self.query_one("#notes_textarea", TextArea).text = str(self.__pattern.notes)
self.updateTags()
self.updateTracks()
@@ -276,64 +320,71 @@ class PatternDetailsScreen(Screen):
# 3
yield Static(" ", classes="seven")
# 4
yield Static(" ", classes="seven")
yield Static("Quality")
yield Input(type="integer", id="quality_input")
yield Static(' ', classes="five")
# 5
yield Static(" ", classes="seven")
# 6
yield Static("Notes")
yield Static(" ", classes="six")
# 7
yield TextArea(id="notes_textarea", classes="four_box seven")
# 8
yield Static(" ", classes="seven")
# 9
yield Static("Media Tags")
if self.__pattern is not None:
yield Button("Add", id="button_add_tag")
yield Button("Edit", id="button_edit_tag")
yield Button("Delete", id="button_delete_tag")
else:
yield Static(" ")
yield Static(" ")
yield Static(" ")
yield Static(" ")
yield Static(" ")
yield Static(" ")
# 6
yield self.tagsTable
# 7
yield Static(" ", classes="seven")
# 8
yield Static("Streams")
if self.__pattern is not None:
yield Button("Add", id="button_add_track")
yield Button("Edit", id="button_edit_track")
yield Button("Delete", id="button_delete_track")
else:
yield Static(" ")
yield Static(" ")
yield Static(" ")
yield Static(" ")
yield Button("Up", id="button_track_up")
yield Button("Down", id="button_track_down")
# 9
yield self.tracksTable
# 10
yield Static(" ", classes="seven")
yield self.tagsTable
# 11
yield Static(" ", classes="seven")
# 12
yield Static("Streams")
yield Button("Add", id="button_add_track")
yield Button("Edit", id="button_edit_track")
yield Button("Delete", id="button_delete_track")
yield Static(" ")
yield Button("Up", id="button_track_up")
yield Button("Down", id="button_track_down")
# 13
yield self.tracksTable
# 14
yield Static(" ", classes="seven")
# 15
yield Static(" ", classes="seven")
# 16
yield Button("Save", id="save_button")
yield Button("Cancel", id="cancel_button")
yield Static(" ", classes="five")
# 13
# 17
yield Static(" ", classes="seven")
yield Footer()
@@ -342,17 +393,20 @@ class PatternDetailsScreen(Screen):
def getPatternFromInput(self):
return str(self.query_one("#pattern_input", Input).value)
def getQualityFromInput(self):
try:
return int(self.query_one("#quality_input", Input).value)
except ValueError:
return 0
def getNotesFromInput(self):
return str(self.query_one("#notes_textarea", TextArea).text)
def getSelectedTrackDescriptor(self):
if not self.__pattern:
return None
try:
# Fetch the currently selected row when 'Enter' is pressed
#selected_row_index = self.table.cursor_row
row_key, col_key = self.tracksTable.coordinate_to_cell_key(self.tracksTable.cursor_coordinate)
if row_key is not None:
@@ -361,9 +415,11 @@ class PatternDetailsScreen(Screen):
trackIndex = int(selected_track_data[0])
trackSubIndex = int(selected_track_data[2])
return self.__tc.getTrack(self.__pattern.getId(), trackIndex).getDescriptor(self.context, subIndex=trackSubIndex)
for trackDescriptor in self.getCurrentTrackDescriptors():
if (trackDescriptor.getIndex() == trackIndex
and trackDescriptor.getSubIndex() == trackSubIndex):
return trackDescriptor
else:
return None
except CellDoesNotExist:
@@ -382,8 +438,8 @@ class PatternDetailsScreen(Screen):
if row_key is not None:
selected_tag_data = self.tagsTable.get_row(row_key)
tagKey = str(selected_tag_data[0])
tagValue = str(selected_tag_data[1])
tagKey = removeRichColor(selected_tag_data[0])
tagValue = removeRichColor(selected_tag_data[1])
return tagKey, tagValue
@@ -403,6 +459,8 @@ class PatternDetailsScreen(Screen):
patternDescriptor = {}
patternDescriptor['show_id'] = self.__showDescriptor.getId()
patternDescriptor['pattern'] = self.getPatternFromInput()
patternDescriptor['quality'] = self.getQualityFromInput()
patternDescriptor['notes'] = self.getNotesFromInput()
if self.__pattern is not None:
@@ -413,7 +471,11 @@ class PatternDetailsScreen(Screen):
self.app.pop_screen()
else:
patternId = self.__pc.addPattern(patternDescriptor)
patternId = self.__pc.savePatternSchema(
patternDescriptor,
trackDescriptors=self.__draftTracks,
mediaTags=self.__draftTags,
)
if patternId:
self.dismiss(patternDescriptor)
else:
@@ -425,32 +487,51 @@ class PatternDetailsScreen(Screen):
self.app.pop_screen()
# Save pattern when just created before adding streams
if self.__pattern is not None:
numTracks = len(self.tracksTable.rows)
numTracks = len(self.getCurrentTrackDescriptors())
if event.button.id == "button_add_track":
self.app.push_screen(TrackDetailsScreen(patternId = self.__pattern.getId(), index = numTracks), self.handle_add_track)
self.app.push_screen(
TrackDetailsScreen(
patternId=self.__pattern.getId() if self.__pattern is not None else None,
patternLabel=self.getPatternFromInput(),
siblingTrackDescriptors=self.getCurrentTrackDescriptors(),
index=numTracks,
),
self.handle_add_track,
)
selectedTrack = self.getSelectedTrackDescriptor()
if selectedTrack is not None:
if event.button.id == "button_edit_track":
self.app.push_screen(TrackDetailsScreen(trackDescriptor = selectedTrack), self.handle_edit_track)
self.app.push_screen(
TrackDetailsScreen(
trackDescriptor=selectedTrack,
patternId=self.__pattern.getId() if self.__pattern is not None else None,
patternLabel=self.getPatternFromInput(),
siblingTrackDescriptors=self.getCurrentTrackDescriptors(),
),
self.handle_edit_track,
)
if event.button.id == "button_delete_track":
self.app.push_screen(TrackDeleteScreen(trackDescriptor = selectedTrack), self.handle_delete_track)
self.app.push_screen(
TrackDeleteScreen(trackDescriptor = selectedTrack),
self.handle_delete_track,
)
if event.button.id == "button_add_tag":
if self.__pattern is not None:
self.app.push_screen(TagDetailsScreen(), self.handle_update_tag)
if event.button.id == "button_edit_tag":
tagKey, tagValue = self.getSelectedTag()
selectedTag = self.getSelectedTag()
if selectedTag is not None:
tagKey, tagValue = selectedTag
self.app.push_screen(TagDetailsScreen(key=tagKey, value=tagValue), self.handle_update_tag)
if event.button.id == "button_delete_tag":
tagKey, tagValue = self.getSelectedTag()
selectedTag = self.getSelectedTag()
if selectedTag is not None:
tagKey, tagValue = selectedTag
self.app.push_screen(TagDeleteScreen(key=tagKey, value=tagValue), self.handle_delete_tag)
@@ -468,6 +549,7 @@ class PatternDetailsScreen(Screen):
if event.button.id == "button_track_up":
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
if selectedTrackDescriptor is not None:
selectedTrackIndex = selectedTrackDescriptor.getIndex()
if selectedTrackIndex > 0 and selectedTrackIndex < self.tracksTable.row_count:
@@ -478,6 +560,7 @@ class PatternDetailsScreen(Screen):
if event.button.id == "button_track_down":
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
if selectedTrackDescriptor is not None:
selectedTrackIndex = selectedTrackDescriptor.getIndex()
if selectedTrackIndex >= 0 and selectedTrackIndex < (self.tracksTable.row_count - 1):
@@ -486,59 +569,88 @@ class PatternDetailsScreen(Screen):
def handle_add_track(self, trackDescriptor : TrackDescriptor):
if trackDescriptor is None:
return
dispoSet = trackDescriptor.getDispositionSet()
trackType = trackDescriptor.getType()
index = trackDescriptor.getIndex()
subIndex = trackDescriptor.getSubIndex()
language = trackDescriptor.getLanguage()
title = trackDescriptor.getTitle()
if self.__pattern is not None:
self.__tc.addTrack(trackDescriptor, patternId=self.__pattern.getId())
else:
self.__draftTracks.append(trackDescriptor)
self.normalizeDraftTracks()
row = (index,
trackType.label(),
subIndex,
" ",
language.label(),
title,
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
'Yes' if TrackDisposition.FORCED in dispoSet else 'No')
self.tracksTable.add_row(*map(str, row))
self.updateTracks()
def handle_edit_track(self, trackDescriptor : TrackDescriptor):
if trackDescriptor is None:
return
try:
if self.__pattern is not None:
if not self.__tc.updateTrack(trackDescriptor.getId(), trackDescriptor):
raise click.ClickException("PatternDetailsScreen.handle_edit_track(): track update failed")
else:
selectedTrack = self.getSelectedTrackDescriptor()
for index, currentTrack in enumerate(self.__draftTracks):
if (selectedTrack is not None
and currentTrack.getIndex() == selectedTrack.getIndex()
and currentTrack.getSubIndex() == selectedTrack.getSubIndex()):
self.__draftTracks[index] = trackDescriptor
break
self.normalizeDraftTracks()
row_key, col_key = self.tracksTable.coordinate_to_cell_key(self.tracksTable.cursor_coordinate)
self.tracksTable.update_cell(row_key, self.column_key_track_audio_layout, trackDescriptor.getAudioLayout().label())
self.tracksTable.update_cell(row_key, self.column_key_track_language, trackDescriptor.getLanguage().label())
self.tracksTable.update_cell(row_key, self.column_key_track_title, trackDescriptor.getTitle())
self.tracksTable.update_cell(row_key, self.column_key_track_default, 'Yes' if TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet() else 'No')
self.tracksTable.update_cell(row_key, self.column_key_track_forced, 'Yes' if TrackDisposition.FORCED in trackDescriptor.getDispositionSet() else 'No')
except CellDoesNotExist:
pass
self.updateTracks()
def handle_delete_track(self, trackDescriptor : TrackDescriptor):
if trackDescriptor is None:
return
if self.__pattern is not None:
track = self.__tc.getTrack(trackDescriptor.getPatternId(), trackDescriptor.getIndex())
if track is None:
raise click.ClickException(
f"Track is none: patternId={trackDescriptor.getPatternId()} type={trackDescriptor.getType()} subIndex={trackDescriptor.getSubIndex()}"
)
self.__tc.deleteTrack(track.getId())
else:
self.__draftTracks = [
currentTrack
for currentTrack in self.__draftTracks
if not (
currentTrack.getIndex() == trackDescriptor.getIndex()
and currentTrack.getSubIndex() == trackDescriptor.getSubIndex()
)
]
self.normalizeDraftTracks()
self.updateTracks()
def handle_update_tag(self, tag):
if tag is None:
return
if self.__pattern is None:
raise click.ClickException(f"PatternDetailsScreen.handle_update_tag: pattern not set")
self.__draftTags[str(tag[0])] = str(tag[1])
else:
if self.__tac.updateMediaTag(self.__pattern.getId(), tag[0], tag[1]) is None:
raise click.ClickException("PatternDetailsScreen.handle_update_tag(): tag update failed")
if self.__tac.updateMediaTag(self.__pattern.getId(), tag[0], tag[1]) is not None:
self.updateTags()
def handle_delete_tag(self, tag):
if tag is None:
return
if self.__pattern is None:
raise click.ClickException(f"PatternDetailsScreen.handle_delete_tag: pattern not set")
self.__draftTags.pop(str(tag[0]), None)
self.updateTags()
return
if self.__tac.deleteMediaTagByKey(self.__pattern.getId(), tag[0]):
self.updateTags()
else:
raise click.ClickException('tag delete failed')

View File

@@ -1,39 +1,169 @@
import subprocess, logging
from typing import List
import os
import shlex
import subprocess
from typing import Iterable, List
def executeProcess(commandSequence: List[str], directory: str = None, context: dict = None):
from .logging_utils import get_ffx_logger
COMMAND_TIMED_OUT_RETURN_CODE = 124
COMMAND_NOT_FOUND_RETURN_CODE = 127
MIN_NICENESS = -20
MAX_NICENESS = 19
DISABLED_NICENESS_SENTINEL = 99
DISABLED_CPU_PERCENT_SENTINEL = 0
MIN_CPU_PERCENT = 1
MAX_CPU_PERCENT = 100
def formatCommandSequence(commandSequence: Iterable[str]) -> str:
return shlex.join([str(token) for token in commandSequence])
def normalizeNiceness(niceness) -> int | None:
if niceness is None:
return None
niceness = int(niceness)
if niceness == DISABLED_NICENESS_SENTINEL:
return None
if niceness < MIN_NICENESS or niceness > MAX_NICENESS:
raise ValueError(
f"Niceness must be between {MIN_NICENESS} and {MAX_NICENESS}, "
+ f"or {DISABLED_NICENESS_SENTINEL} to disable."
)
return niceness
def getPresentCpuCount() -> int:
if hasattr(os, 'sched_getaffinity'):
affinity = os.sched_getaffinity(0)
if affinity:
return len(affinity)
cpuCount = os.cpu_count()
return cpuCount if cpuCount and cpuCount > 0 else 1
def normalizeCpuPercent(cpuPercent) -> int | None:
if cpuPercent is None:
return None
cpuPercent = str(cpuPercent).strip()
if cpuPercent.endswith('%'):
percentValue = int(cpuPercent[:-1].strip())
if percentValue == DISABLED_CPU_PERCENT_SENTINEL:
return None
if percentValue < MIN_CPU_PERCENT or percentValue > MAX_CPU_PERCENT:
raise ValueError(
f"CPU percentage must be between {MIN_CPU_PERCENT}% and {MAX_CPU_PERCENT}%, "
+ f"or {DISABLED_CPU_PERCENT_SENTINEL} to disable."
)
return percentValue * getPresentCpuCount()
cpuPercent = int(cpuPercent)
if cpuPercent == DISABLED_CPU_PERCENT_SENTINEL:
return None
if cpuPercent < MIN_CPU_PERCENT:
raise ValueError(
"CPU limit must be a positive absolute value such as 200, "
+ f"a percentage such as 25%, or {DISABLED_CPU_PERCENT_SENTINEL} to disable."
)
return cpuPercent
def getWrappedCommandSequence(commandSequence: List[str], context: dict = None) -> List[str]:
"""
niceness -20 bis +19
cpu_percent: 1 bis 99
niceness: -20 to 19, disabled when unset
cpu limit: positive absolute cpulimit value, or a machine-wide percentage
When both limits are configured, cpulimit wraps a nice-adjusted command:
cpulimit -l <cpu> -- nice -n <niceness> <command>
"""
if context is None:
logger = logging.getLogger('FFX')
logger.addHandler(logging.NullHandler())
else:
logger = context['logger']
resourceLimits = (context or {}).get('resource_limits', {})
niceness = normalizeNiceness(resourceLimits.get('niceness'))
cpu_percent = normalizeCpuPercent(
resourceLimits.get('cpu_limit', resourceLimits.get('cpu_percent'))
)
wrappedCommandSequence = [str(token) for token in commandSequence]
niceSequence = []
if niceness is not None:
wrappedCommandSequence = ['nice', '-n', str(niceness)] + wrappedCommandSequence
if cpu_percent is not None:
wrappedCommandSequence = ['cpulimit', '-l', str(cpu_percent), '--'] + wrappedCommandSequence
niceness = (int(context['resource_limits']['niceness'])
if not context is None
and 'resource_limits' in context.keys()
and 'niceness' in context['resource_limits'].keys() else 99)
cpu_percent = (int(context['resource_limits']['cpu_percent'])
if not context is None
and 'resource_limits' in context.keys()
and 'cpu_percent' in context['resource_limits'].keys() else 0)
return wrappedCommandSequence
if niceness >= -20 and niceness <= 19:
niceSequence += ['nice', '-n', str(niceness)]
if cpu_percent >= 1:
niceSequence += ['cpulimit', '-l', str(cpu_percent), '--']
niceCommand = niceSequence + commandSequence
def getProcessTimeoutSeconds(context: dict = None, timeoutSeconds: float = None):
if timeoutSeconds is None:
timeoutSeconds = (context or {}).get('resource_limits', {}).get('timeout_seconds')
logger.debug(f"executeProcess() command sequence: {' '.join(niceCommand)}")
if timeoutSeconds is None:
return None
process = subprocess.Popen(niceCommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8', cwd = directory)
output, error = process.communicate()
timeoutSeconds = float(timeoutSeconds)
return output, error, process.returncode
return timeoutSeconds if timeoutSeconds > 0 else None
def executeProcess(
commandSequence: List[str],
directory: str = None,
context: dict = None,
timeoutSeconds: float = None,
):
logger = context['logger'] if context is not None and 'logger' in context else get_ffx_logger()
wrappedCommandSequence = getWrappedCommandSequence(commandSequence, context=context)
timeoutSeconds = getProcessTimeoutSeconds(context=context, timeoutSeconds=timeoutSeconds)
logger.debug(
"executeProcess() cwd=%s timeout=%s command=%s",
directory or '.',
timeoutSeconds if timeoutSeconds is not None else 'none',
formatCommandSequence(wrappedCommandSequence),
)
try:
completed = subprocess.run(
wrappedCommandSequence,
capture_output=True,
text=True,
cwd=directory,
timeout=timeoutSeconds,
check=False,
)
except FileNotFoundError as ex:
error = (
"Command not found while running "
+ f"{formatCommandSequence(wrappedCommandSequence)}: {ex.filename or ex}"
)
logger.error(error)
return '', error, COMMAND_NOT_FOUND_RETURN_CODE
except subprocess.TimeoutExpired as ex:
stdout = ex.stdout or ''
stderr = ex.stderr or ''
error = (
f"Command timed out after {timeoutSeconds} seconds while running "
+ formatCommandSequence(wrappedCommandSequence)
)
if stderr:
error = f"{error}\n{stderr}"
logger.error(error)
return stdout, error, COMMAND_TIMED_OUT_RETURN_CODE
if completed.returncode != 0:
logger.warning(
"executeProcess() rc=%s command=%s",
completed.returncode,
formatCommandSequence(wrappedCommandSequence),
)
return completed.stdout, completed.stderr, completed.returncode

65
src/ffx/screen_support.py Normal file
View File

@@ -0,0 +1,65 @@
from __future__ import annotations
from dataclasses import dataclass
from .pattern_controller import PatternController
from .show_controller import ShowController
from .shifted_season_controller import ShiftedSeasonController
from .tag_controller import TagController
from .tmdb_controller import TmdbController
from .track_controller import TrackController
@dataclass(frozen=True)
class ScreenBootstrap:
context: dict
configuration_data: dict
signature_tags: dict
remove_global_keys: list
ignore_global_keys: list
remove_track_keys: list
ignore_track_keys: list
def build_screen_bootstrap(context: dict) -> ScreenBootstrap:
configurationData = context['config'].getData()
metadataConfiguration = configurationData.get('metadata', {})
streamMetadataConfiguration = metadataConfiguration.get('streams', {})
return ScreenBootstrap(
context=context,
configuration_data=configurationData,
signature_tags=metadataConfiguration.get('signature', {}),
remove_global_keys=metadataConfiguration.get('remove', []),
ignore_global_keys=metadataConfiguration.get('ignore', []),
remove_track_keys=streamMetadataConfiguration.get('remove', []),
ignore_track_keys=streamMetadataConfiguration.get('ignore', []),
)
def build_screen_controllers(
context: dict,
*,
pattern: bool = False,
show: bool = False,
track: bool = False,
tag: bool = False,
tmdb: bool = False,
shifted_season: bool = False,
) -> dict[str, object]:
controllers = {}
if pattern:
controllers['pattern'] = PatternController(context=context)
if show:
controllers['show'] = ShowController(context=context)
if track:
controllers['track'] = TrackController(context=context)
if tag:
controllers['tag'] = TagController(context=context)
if tmdb:
controllers['tmdb'] = TmdbController()
if shifted_season:
controllers['shifted_season'] = ShiftedSeasonController(context=context)
return controllers

View File

@@ -18,9 +18,16 @@ class ShiftedSeasonController():
self.Session = self.context['database']['session'] # convenience
def checkShiftedSeason(self, showId: int, shiftedSeasonObj: dict, shiftedSeasonId: int = 0):
"""
Check if for a particula season
shiftedSeasonId
"""
try:
s = self.Session()
originalSeason = shiftedSeasonObj['original_season']
firstEpisode = int(shiftedSeasonObj['first_episode'])
lastEpisode = int(shiftedSeasonObj['last_episode'])
@@ -31,11 +38,14 @@ class ShiftedSeasonController():
siblingShiftedSeason: ShiftedSeason
for siblingShiftedSeason in q.all():
siblingOriginalSeason = siblingShiftedSeason.getOriginalSeason
siblingFirstEpisode = siblingShiftedSeason.getFirstEpisode()
siblingLastEpisode = siblingShiftedSeason.getLastEpisode()
if (lastEpisode >= siblingFirstEpisode
if (originalSeason == siblingOriginalSeason
and lastEpisode >= siblingFirstEpisode
and siblingLastEpisode >= firstEpisode):
return False
return True
@@ -91,11 +101,9 @@ class ShiftedSeasonController():
try:
s = self.Session()
q = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId))
shiftedSeason = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId)).first()
if q.count():
shiftedSeason = q.first()
if shiftedSeason is not None:
shiftedSeason.original_season = int(shiftedSeasonObj['original_season'])
shiftedSeason.first_episode = int(shiftedSeasonObj['first_episode'])
@@ -131,12 +139,14 @@ class ShiftedSeasonController():
try:
s = self.Session()
q = s.query(ShiftedSeason).filter(ShiftedSeason.show_id == int(showId),
shiftedSeason = s.query(ShiftedSeason).filter(
ShiftedSeason.show_id == int(showId),
ShiftedSeason.original_season == int(originalSeason),
ShiftedSeason.first_episode == int(firstEpisode),
ShiftedSeason.last_episode == int(lastEpisode))
ShiftedSeason.last_episode == int(lastEpisode),
).first()
return q.first().getId() if q.count() else None
return shiftedSeason.getId() if shiftedSeason is not None else None
except Exception as ex:
raise click.ClickException(f"PatternController.findShiftedSeason(): {repr(ex)}")
@@ -167,9 +177,7 @@ class ShiftedSeasonController():
try:
s = self.Session()
q = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId))
return q.first() if q.count() else None
return s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId)).first()
except Exception as ex:
raise click.ClickException(f"ShiftedSeasonController.getShiftedSeason(): {repr(ex)}")
@@ -184,13 +192,12 @@ class ShiftedSeasonController():
try:
s = self.Session()
q = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId))
shiftedSeason = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId)).first()
if q.count():
if shiftedSeason is not None:
#DAFUQ: https://stackoverflow.com/a/19245058
# q.delete()
shiftedSeason = q.first()
s.delete(shiftedSeason)
s.commit()

View File

@@ -16,10 +16,9 @@ class ShowController():
try:
s = self.Session()
q = s.query(Show).filter(Show.id == showId)
show = s.query(Show).filter(Show.id == showId).first()
if q.count():
show: Show = q.first()
if show is not None:
return show.getDescriptor(self.context)
except Exception as ex:
@@ -31,9 +30,7 @@ class ShowController():
try:
s = self.Session()
q = s.query(Show).filter(Show.id == showId)
return q.first() if q.count() else None
return s.query(Show).filter(Show.id == showId).first()
except Exception as ex:
raise click.ClickException(f"ShowController.getShow(): {repr(ex)}")
@@ -44,12 +41,7 @@ class ShowController():
try:
s = self.Session()
q = s.query(Show)
if q.count():
return q.all()
else:
return []
return s.query(Show).all()
except Exception as ex:
raise click.ClickException(f"ShowController.getAllShows(): {repr(ex)}")
@@ -61,9 +53,9 @@ class ShowController():
try:
s = self.Session()
q = s.query(Show).filter(Show.id == showDescriptor.getId())
currentShow = s.query(Show).filter(Show.id == showDescriptor.getId()).first()
if not q.count():
if currentShow is None:
show = Show(id = int(showDescriptor.getId()),
name = str(showDescriptor.getName()),
year = int(showDescriptor.getYear()),
@@ -76,9 +68,6 @@ class ShowController():
s.commit()
return True
else:
currentShow = q.first()
changed = False
if currentShow.name != str(showDescriptor.getName()):
currentShow.name = str(showDescriptor.getName())
@@ -113,14 +102,12 @@ class ShowController():
def deleteShow(self, show_id):
try:
s = self.Session()
q = s.query(Show).filter(Show.id == int(show_id))
show = s.query(Show).filter(Show.id == int(show_id)).first()
if q.count():
if show is not None:
#DAFUQ: https://stackoverflow.com/a/19245058
# q.delete()
show = q.first()
s.delete(show)
s.commit()

View File

@@ -1,4 +1,4 @@
import logging
from .logging_utils import get_ffx_logger
class ShowDescriptor():
@@ -32,8 +32,7 @@ class ShowDescriptor():
self.__logger = self.__context['logger']
else:
self.__context = {}
self.__logger = logging.getLogger('FFX')
self.__logger.addHandler(logging.NullHandler())
self.__logger = get_ffx_logger()
if ShowDescriptor.ID_KEY in kwargs.keys():
if type(kwargs[ShowDescriptor.ID_KEY]) is not int:

View File

@@ -5,16 +5,9 @@ from textual.widgets import Header, Footer, Static, Button, DataTable, Input
from textual.containers import Grid
from textual.widgets._data_table import CellDoesNotExist
from ffx.model.pattern import Pattern
from .pattern_details_screen import PatternDetailsScreen
from .pattern_delete_screen import PatternDeleteScreen
from .show_controller import ShowController
from .pattern_controller import PatternController
from .tmdb_controller import TmdbController
from .shifted_season_controller import ShiftedSeasonController
from .show_descriptor import ShowDescriptor
from .shifted_season_details_screen import ShiftedSeasonDetailsScreen
@@ -23,6 +16,7 @@ from .shifted_season_delete_screen import ShiftedSeasonDeleteScreen
from ffx.model.shifted_season import ShiftedSeason
from .helper import filterFilename
from .screen_support import build_screen_bootstrap, build_screen_controllers
# Screen[dict[int, str, int]]
@@ -94,31 +88,24 @@ class ShowDetailsScreen(Screen):
def __init__(self, showId = None):
super().__init__()
self.context = self.app.getContext()
self.Session = self.context['database']['session'] # convenience
bootstrap = build_screen_bootstrap(self.app.getContext())
self.context = bootstrap.context
self.__sc = ShowController(context = self.context)
self.__pc = PatternController(context = self.context)
self.__tc = TmdbController()
self.__ssc = ShiftedSeasonController(context = self.context)
controllers = build_screen_controllers(
self.context,
pattern=True,
show=True,
tmdb=True,
shifted_season=True,
)
self.__sc = controllers['show']
self.__pc = controllers['pattern']
self.__tc = controllers['tmdb']
self.__ssc = controllers['shifted_season']
self.__showDescriptor = self.__sc.getShowDescriptor(showId) if showId is not None else None
def loadPatterns(self, show_id : int):
try:
s = self.Session()
q = s.query(Pattern).filter(Pattern.show_id == int(show_id))
return [{'id': int(p.id), 'pattern': str(p.pattern)} for p in q.all()]
except Exception as ex:
raise click.ClickException(f"ShowDetailsScreen.loadPatterns(): {repr(ex)}")
finally:
s.close()
def updateShiftedSeasons(self):
@@ -166,10 +153,8 @@ class ShowDetailsScreen(Screen):
#raise click.ClickException(f"show_id {showId}")
patternList = self.loadPatterns(showId)
# raise click.ClickException(f"patternList {patternList}")
for pattern in patternList:
row = (pattern['pattern'],)
for pattern in self.__pc.getPatternsForShow(showId):
row = (pattern.getPattern(),)
self.patternTable.add_row(*map(str, row))
self.updateShiftedSeasons()
@@ -400,7 +385,7 @@ class ShowDetailsScreen(Screen):
yield Footer()
def getShowDescriptorFromInput(self):
def getShowDescriptorFromInput(self) -> ShowDescriptor:
kwargs = {}
@@ -444,7 +429,7 @@ class ShowDetailsScreen(Screen):
# Event handler for button press
def on_button_pressed(self, event: Button.Pressed) -> None:
# Check if the button pressed is the one we are interested in
if event.button.id == "save_button":
showDescriptor = self.getShowDescriptorFromInput()

View File

@@ -162,4 +162,7 @@ class ShowsScreen(Screen):
yield self.table
yield Footer()
f = Footer()
f.description = "yolo"
yield f

View File

@@ -67,10 +67,11 @@ class TagController():
try:
s = self.Session()
q = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId),
MediaTag.key == str(tagKey))
if q.count():
tag = q.first()
tag = s.query(MediaTag).filter(
MediaTag.pattern_id == int(patternId),
MediaTag.key == str(tagKey),
).first()
if tag is not None:
s.delete(tag)
s.commit()
return True
@@ -107,12 +108,8 @@ class TagController():
try:
s = self.Session()
q = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId))
if q.count():
return {t.key:t.value for t in q.all()}
else:
return {}
tags = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId)).all()
return {t.key:t.value for t in tags}
except Exception as ex:
raise click.ClickException(f"TagController.findAllMediaTags(): {repr(ex)}")
@@ -125,12 +122,8 @@ class TagController():
try:
s = self.Session()
q = s.query(TrackTag).filter(TrackTag.track_id == int(trackId))
if q.count():
return {t.key:t.value for t in q.all()}
else:
return {}
tags = s.query(TrackTag).filter(TrackTag.track_id == int(trackId)).all()
return {t.key:t.value for t in tags}
except Exception as ex:
raise click.ClickException(f"TagController.findAllTracks(): {repr(ex)}")
@@ -142,12 +135,7 @@ class TagController():
try:
s = self.Session()
q = s.query(Track).filter(MediaTag.track_id == int(trackId), MediaTag.key == str(trackKey))
if q.count():
return q.first()
else:
return None
return s.query(Track).filter(MediaTag.track_id == int(trackId), MediaTag.key == str(trackKey)).first()
except Exception as ex:
raise click.ClickException(f"TagController.findMediaTag(): {repr(ex)}")
@@ -158,12 +146,10 @@ class TagController():
try:
s = self.Session()
q = s.query(TrackTag).filter(TrackTag.track_id == int(trackId), TrackTag.key == str(tagKey))
if q.count():
return q.first()
else:
return None
return s.query(TrackTag).filter(
TrackTag.track_id == int(trackId),
TrackTag.key == str(tagKey),
).first()
except Exception as ex:
raise click.ClickException(f"TagController.findTrackTag(): {repr(ex)}")
@@ -175,11 +161,9 @@ class TagController():
def deleteMediaTag(self, tagId) -> bool:
try:
s = self.Session()
q = s.query(MediaTag).filter(MediaTag.id == int(tagId))
tag = s.query(MediaTag).filter(MediaTag.id == int(tagId)).first()
if q.count():
tag = q.first()
if tag is not None:
s.delete(tag)
@@ -201,11 +185,9 @@ class TagController():
try:
s = self.Session()
q = s.query(TrackTag).filter(TrackTag.id == int(tagId))
tag = s.query(TrackTag).filter(TrackTag.id == int(tagId)).first()
if q.count():
tag = q.first()
if tag is not None:
s.delete(tag)

View File

@@ -1,6 +1,8 @@
import os, requests, time, logging
import os, requests, time
from datetime import datetime
from .logging_utils import get_ffx_logger
class TMDB_REQUEST_EXCEPTION(Exception):
def __init__(self, statusCode, statusMessage):
@@ -27,8 +29,7 @@ class TmdbController():
self.__context = context
if context is None:
self.__logger = logging.getLogger('FFX')
self.__logger.addHandler(logging.NullHandler())
self.__logger = get_ffx_logger()
else:
self.__logger = context['logger']

View File

@@ -5,11 +5,22 @@ class TrackCodec(Enum):
H265 = {'identifier': 'hevc', 'format': 'h265', 'extension': 'h265' ,'label': 'H.265'}
H264 = {'identifier': 'h264', 'format': 'h264', 'extension': 'h264' ,'label': 'H.264'}
MPEG4 = {'identifier': 'mpeg4', 'format': 'm4v', 'extension': 'm4v' ,'label': 'MPEG-4'}
MPEG2 = {'identifier': 'mpeg2video', 'format': 'mpeg2video', 'extension': 'mpg' ,'label': 'MPEG-2'}
AAC = {'identifier': 'aac', 'format': None, 'extension': 'aac' , 'label': 'AAC'}
AC3 = {'identifier': 'ac3', 'format': 'ac3', 'extension': 'ac3' , 'label': 'AC3'}
EAC3 = {'identifier': 'eac3', 'format': 'eac3', 'extension': 'eac3' , 'label': 'EAC3'}
DTS = {'identifier': 'dts', 'format': 'dts', 'extension': 'dts' , 'label': 'DTS'}
MP3 = {'identifier': 'mp3', 'format': 'mp3', 'extension': 'mp3' , 'label': 'MP3'}
SRT = {'identifier': 'subrip', 'format': 'srt', 'extension': 'srt' , 'label': 'SRT'}
ASS = {'identifier': 'ass', 'format': 'ass', 'extension': 'ass' , 'label': 'ASS'}
TTF = {'identifier': 'ttf', 'format': None, 'extension': 'ttf' , 'label': 'TTF'}
PGS = {'identifier': 'hdmv_pgs_subtitle', 'format': 'sup', 'extension': 'sup' , 'label': 'PGS'}
VOBSUB = {'identifier': 'dvd_subtitle', 'format': None, 'extension': 'mkv' , 'label': 'VobSub'}
PNG = {'identifier': 'png', 'format': None, 'extension': 'png' , 'label': 'PNG'}
UNKNOWN = {'identifier': 'unknown', 'format': None, 'extension': None, 'label': 'UNKNOWN'}
@@ -23,8 +34,8 @@ class TrackCodec(Enum):
return str(self.value['label'])
def format(self):
"""Returns the codec as single letter"""
return str(self.value['format'])
"""Returns the codec """
return self.value['format']
def extension(self):
"""Returns the corresponding extension"""

View File

@@ -19,6 +19,20 @@ class TrackController():
self.context = context
self.Session = self.context['database']['session'] # convenience
self.__configurationData = self.context['config'].getData()
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
if 'streams' in metadataConfiguration.keys()
and 'remove' in metadataConfiguration['streams'].keys() else [])
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
if 'streams' in metadataConfiguration.keys()
and 'ignore' in metadataConfiguration['streams'].keys() else [])
def addTrack(self, trackDescriptor : TrackDescriptor, patternId = None):
@@ -29,7 +43,7 @@ class TrackController():
s = self.Session()
track = Track(pattern_id = patId,
track_type = int(trackDescriptor.getType().index()),
codec_name = str(trackDescriptor.getCodec().label()),
codec_name = str(trackDescriptor.getCodec().identifier()),
index = int(trackDescriptor.getIndex()),
source_index = int(trackDescriptor.getSourceIndex()),
disposition_flags = int(TrackDisposition.toFlags(trackDescriptor.getDispositionSet())),
@@ -40,6 +54,8 @@ class TrackController():
for k,v in trackDescriptor.getTags().items():
# Filter tags that make no sense to preserve
if k not in self.__ignoreTrackKeys and k not in self.__removeTrackKeys:
tag = TrackTag(track_id = track.id,
key = k,
value = v)
@@ -59,11 +75,9 @@ class TrackController():
try:
s = self.Session()
q = s.query(Track).filter(Track.id == int(trackId))
track = s.query(Track).filter(Track.id == int(trackId)).first()
if q.count():
track : Track = q.first()
if track is not None:
track.index = int(trackDescriptor.getIndex())
@@ -177,12 +191,10 @@ class TrackController():
try:
s = self.Session()
q = s.query(Track).filter(Track.pattern_id == int(patternId), Track.index == int(index))
if q.count():
return q.first()
else:
return None
return s.query(Track).filter(
Track.pattern_id == int(patternId),
Track.index == int(index),
).first()
except Exception as ex:
raise click.ClickException(f"TrackController.getTrack(): {repr(ex)}")
@@ -202,11 +214,9 @@ class TrackController():
try:
s = self.Session()
q = s.query(Track).filter(Track.pattern_id == patternId, Track.index == index)
track = s.query(Track).filter(Track.pattern_id == patternId, Track.index == index).first()
if q.count():
track : Track = q.first()
if track is not None:
if state:
track.setDisposition(disposition)
@@ -228,15 +238,21 @@ class TrackController():
try:
s = self.Session()
q = s.query(Track).filter(Track.id == int(trackId))
track = s.query(Track).filter(Track.id == int(trackId)).first()
if q.count():
patternId = int(q.first().pattern_id)
if track is not None:
patternId = int(track.pattern_id)
q_siblings = s.query(Track).filter(Track.pattern_id == patternId).order_by(Track.index)
siblingTracks = q_siblings.all()
if len(siblingTracks) <= 1:
raise click.ClickException(
f"Cannot delete the last track from pattern #{patternId}. Patterns must define at least one track."
)
index = 0
for track in q_siblings.all():
for track in siblingTracks:
if track.id == int(trackId):
s.delete(track)

View File

@@ -6,8 +6,6 @@ from textual.containers import Grid
from ffx.track_descriptor import TrackDescriptor
from .track_controller import TrackController
# Screen[dict[int, str, int]]
class TrackDeleteScreen(Screen):
@@ -52,14 +50,9 @@ class TrackDeleteScreen(Screen):
def __init__(self, trackDescriptor : TrackDescriptor):
super().__init__()
self.context = self.app.getContext()
self.Session = self.context['database']['session'] # convenience
if type(trackDescriptor) is not TrackDescriptor:
raise click.ClickException('TrackDeleteScreen.init(): trackDescriptor is required to be of type TrackDescriptor')
self.__tc = TrackController(context = self.context)
self.__trackDescriptor = trackDescriptor
@@ -116,21 +109,7 @@ class TrackDeleteScreen(Screen):
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button.id == "delete_button":
track = self.__tc.getTrack(self.__trackDescriptor.getPatternId(), self.__trackDescriptor.getIndex())
if track is None:
raise click.ClickException(f"Track is none: patternId={self.__trackDescriptor.getPatternId()} type={self.__trackDescriptor.getType()} subIndex={self.__trackDescriptor.getSubIndex()}")
if track is not None:
if self.__tc.deleteTrack(track.getId()):
self.dismiss(self.__trackDescriptor)
else:
#TODO: Meldung
self.app.pop_screen()
if event.button.id == "cancel_button":
self.app.pop_screen()

View File

@@ -1,4 +1,3 @@
import logging
from typing import Self
from .iso_language import IsoLanguage
@@ -6,8 +5,9 @@ from .track_type import TrackType
from .audio_layout import AudioLayout
from .track_disposition import TrackDisposition
from .track_codec import TrackCodec
from .logging_utils import get_ffx_logger
from .helper import dictDiff, setDiff
# from .helper import dictDiff, setDiff
class TrackDescriptor:
@@ -34,7 +34,6 @@ class TrackDescriptor:
FFPROBE_CODEC_TYPE_KEY = "codec_type"
FFPROBE_CODEC_KEY = "codec_name"
CODEC_PGS = 'hdmv_pgs_subtitle'
def __init__(self, **kwargs):
@@ -47,8 +46,7 @@ class TrackDescriptor:
self.__logger = self.__context['logger']
else:
self.__context = {}
self.__logger = logging.getLogger('FFX')
self.__logger.addHandler(logging.NullHandler())
self.__logger = get_ffx_logger()
if TrackDescriptor.ID_KEY in kwargs.keys():
if type(kwargs[TrackDescriptor.ID_KEY]) is not int:
@@ -321,24 +319,24 @@ class TrackDescriptor:
else:
self.__dispositionSet.discard(disposition)
def compare(self, vsTrackDescriptor: Self):
compareResult = {}
tagsDiffResult = dictDiff(vsTrackDescriptor.getTags(), self.getTags())
if tagsDiffResult:
compareResult[TrackDescriptor.TAGS_KEY] = tagsDiffResult
vsDispositions = vsTrackDescriptor.getDispositionSet()
dispositions = self.getDispositionSet()
dispositionDiffResult = setDiff(vsDispositions, dispositions)
if dispositionDiffResult:
compareResult[TrackDescriptor.DISPOSITION_SET_KEY] = dispositionDiffResult
return compareResult
# def compare(self, vsTrackDescriptor: Self):
#
# compareResult = {}
#
# tagsDiffResult = dictKeysDiff(vsTrackDescriptor.getTags(), self.getTags())
#
# if tagsDiffResult:
# compareResult[TrackDescriptor.TAGS_KEY] = tagsDiffResult
#
# vsDispositions = vsTrackDescriptor.getDispositionSet()
# dispositions = self.getDispositionSet()
#
# dispositionDiffResult = setDiff(vsDispositions, dispositions)
#
# if dispositionDiffResult:
# compareResult[TrackDescriptor.DISPOSITION_SET_KEY] = dispositionDiffResult
#
# return compareResult
def setExternalSourceFilePath(self, filePath: str):
self.__externalSourceFilePath = str(filePath)

View File

@@ -3,29 +3,20 @@ import click
from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button, SelectionList, Select, DataTable, Input
from textual.containers import Grid
from ffx.model.pattern import Pattern
from .track_controller import TrackController
from .pattern_controller import PatternController
from .tag_controller import TagController
from .track_type import TrackType
from .track_codec import TrackCodec
from .iso_language import IsoLanguage
from .track_disposition import TrackDisposition
from .audio_layout import AudioLayout
from .track_descriptor import TrackDescriptor
from .tag_details_screen import TagDetailsScreen
from .tag_delete_screen import TagDeleteScreen
from textual.widgets._data_table import CellDoesNotExist
from .audio_layout import AudioLayout
from .iso_language import IsoLanguage
from .tag_delete_screen import TagDeleteScreen
from .tag_details_screen import TagDetailsScreen
from .track_codec import TrackCodec
from .track_descriptor import TrackDescriptor
from .track_disposition import TrackDisposition
from .track_type import TrackType
from ffx.helper import formatRichColor, removeRichColor
# Screen[dict[int, str, int]]
class TrackDetailsScreen(Screen):
CSS = """
@@ -95,335 +86,383 @@ class TrackDetailsScreen(Screen):
}
"""
def __init__(self, trackDescriptor : TrackDescriptor = None, patternId = None, trackType : TrackType = None, index = None, subIndex = None):
def __init__(
self,
trackDescriptor: TrackDescriptor = None,
patternId=None,
patternLabel: str = "",
siblingTrackDescriptors=None,
trackType: TrackType = None,
index=None,
subIndex=None,
):
super().__init__()
self.context = self.app.getContext()
self.Session = self.context['database']['session'] # convenience
self.__tc = TrackController(context = self.context)
self.__pc = PatternController(context = self.context)
self.__tac = TagController(context = self.context)
self.__configurationData = self.context["config"].getData()
metadataConfiguration = (
self.__configurationData["metadata"]
if "metadata" in self.__configurationData.keys()
else {}
)
self.__removeTrackKeys = (
metadataConfiguration["streams"]["remove"]
if "streams" in metadataConfiguration.keys()
and "remove" in metadataConfiguration["streams"].keys()
else []
)
self.__ignoreTrackKeys = (
metadataConfiguration["streams"]["ignore"]
if "streams" in metadataConfiguration.keys()
and "ignore" in metadataConfiguration["streams"].keys()
else []
)
self.__isNew = trackDescriptor is None
self.__trackDescriptor = trackDescriptor
self.__patternId = (
int(patternId)
if patternId is not None
else (
int(trackDescriptor.getPatternId())
if trackDescriptor is not None and trackDescriptor.getPatternId() != -1
else -1
)
)
self.__patternLabel = str(patternLabel)
self.__siblingTrackDescriptors = list(siblingTrackDescriptors or [])
if self.__isNew:
self.__trackType = trackType
self.__trackCodec = TrackCodec.UNKNOWN
self.__audioLayout = AudioLayout.LAYOUT_UNDEFINED
self.__index = index
self.__subIndex = subIndex
self.__trackDescriptor : TrackDescriptor = None
self.__pattern : Pattern = self.__pc.getPattern(patternId) if patternId is not None else {}
self.__draftTrackTags = {}
else:
self.__trackType = trackDescriptor.getType()
self.__trackCodec = trackDescriptor.getCodec()
self.__audioLayout = trackDescriptor.getAudioLayout()
self.__index = trackDescriptor.getIndex()
self.__subIndex = trackDescriptor.getSubIndex()
self.__trackDescriptor : TrackDescriptor = trackDescriptor
self.__pattern : Pattern = self.__pc.getPattern(self.__trackDescriptor.getPatternId())
self.__draftTrackTags = {
key: value
for key, value in trackDescriptor.getTags().items()
if key not in ("language", "title")
}
def _descriptor_refs_same_track(self, descriptor: TrackDescriptor) -> bool:
if self.__trackDescriptor is None:
return False
if descriptor.getId() != -1 and self.__trackDescriptor.getId() != -1:
return descriptor.getId() == self.__trackDescriptor.getId()
return (
descriptor.getPatternId() == self.__trackDescriptor.getPatternId()
and descriptor.getIndex() == self.__trackDescriptor.getIndex()
and descriptor.getSubIndex() == self.__trackDescriptor.getSubIndex()
)
def updateTags(self):
self.trackTagsTable.clear()
trackId = self.__trackDescriptor.getId()
for key, value in self.__draftTrackTags.items():
textColor = None
if key in self.__ignoreTrackKeys:
textColor = "blue"
if key in self.__removeTrackKeys:
textColor = "red"
if trackId != -1:
trackTags = self.__tac.findAllTrackTags(trackId)
for k,v in trackTags.items():
if k != 'language' and k != 'title':
row = (k,v)
row = (formatRichColor(key, textColor), formatRichColor(value, textColor))
self.trackTagsTable.add_row(*map(str, row))
def on_mount(self):
self.query_one("#index_label", Static).update(str(self.__index) if self.__index is not None else '-')
self.query_one("#subindex_label", Static).update(str(self.__subIndex)if self.__subIndex is not None else '-')
if self.__pattern is not None:
self.query_one("#pattern_label", Static).update(self.__pattern.getPattern())
self.query_one("#index_label", Static).update(
str(self.__index) if self.__index is not None else "-"
)
self.query_one("#subindex_label", Static).update(
str(self.__subIndex) if self.__subIndex is not None else "-"
)
self.query_one("#pattern_label", Static).update(self.__patternLabel)
if self.__trackType is not None:
self.query_one("#type_select", Select).value = self.__trackType.label()
if self.__trackType == TrackType.AUDIO:
self.query_one("#audio_layout_select", Select).value = self.__audioLayout.label()
for d in TrackDisposition:
for disposition in TrackDisposition:
dispositionIsSet = (self.__trackDescriptor is not None
and d in self.__trackDescriptor.getDispositionSet())
dispositionIsSet = (
self.__trackDescriptor is not None
and disposition in self.__trackDescriptor.getDispositionSet()
)
dispositionOption = (d.label(), d.index(), dispositionIsSet)
self.query_one("#dispositions_selection_list", SelectionList).add_option(dispositionOption)
dispositionOption = (
disposition.label(),
disposition.index(),
dispositionIsSet,
)
self.query_one("#dispositions_selection_list", SelectionList).add_option(
dispositionOption
)
if self.__trackDescriptor is not None:
self.query_one("#language_select", Select).value = self.__trackDescriptor.getLanguage().label()
self.query_one("#language_select", Select).value = (
self.__trackDescriptor.getLanguage().label()
)
self.query_one("#title_input", Input).value = self.__trackDescriptor.getTitle()
self.updateTags()
def compose(self):
self.trackTagsTable = DataTable(classes="five")
# Define the columns with headers
self.column_key_track_tag_key = self.trackTagsTable.add_column("Key", width=50)
self.column_key_track_tag_value = self.trackTagsTable.add_column("Value", width=100)
self.trackTagsTable.cursor_type = 'row'
self.trackTagsTable.cursor_type = "row"
languages = [l.label() for l in IsoLanguage]
languages = [language.label() for language in IsoLanguage]
yield Header()
with Grid():
# 1
yield Static(f"New stream" if self.__isNew else f"Edit stream", id="toplabel", classes="five")
yield Static(
"New stream" if self.__isNew else "Edit stream",
id="toplabel",
classes="five",
)
# 2
yield Static("for pattern")
yield Static("", id="pattern_label", classes="four")
yield Static("", id="pattern_label", classes="four", markup=False)
# 3
yield Static(" ", classes="five")
# 4
yield Static("Index / Subindex")
yield Static("", id="index_label", classes="two")
yield Static("", id="subindex_label", classes="two")
# 5
yield Static(" ", classes="five")
# 6
yield Static("Type")
yield Select.from_values([t.label() for t in TrackType], classes="four", id="type_select")
yield Select.from_values(
[trackType.label() for trackType in TrackType],
classes="four",
id="type_select",
)
# 7
if self.__trackType == TrackType.AUDIO:
yield Static("Audio Layout")
yield Select.from_values([t.label() for t in AudioLayout], classes="four", id="audio_layout_select")
else:
yield Select.from_values(
[layout.label() for layout in AudioLayout],
classes="four",
id="audio_layout_select",
)
yield Static(" ", classes="five")
# 8
yield Static(" ", classes="five")
# 9
yield Static(" ", classes="five")
# 10
yield Static("Language")
yield Select.from_values(languages, classes="four", id="language_select")
# 11
yield Static(" ", classes="five")
# 12
yield Static("Title")
yield Input(id="title_input", classes="four")
# 13
yield Static(" ", classes="five")
# 14
yield Static(" ", classes="five")
# 15
yield Static("Stream tags")
yield Static(" ")
yield Button("Add", id="button_add_stream_tag")
yield Button("Edit", id="button_edit_stream_tag")
yield Button("Delete", id="button_delete_stream_tag")
# 16
yield self.trackTagsTable
# 17
yield Static(" ", classes="five")
# 18
yield Static("Stream dispositions", classes="five")
# 19
yield SelectionList[int](
classes="five",
id = "dispositions_selection_list"
id="dispositions_selection_list",
)
# 20
yield Static(" ", classes="five")
# 21
yield Static(" ", classes="five")
# 22
yield Button("Save", id="save_button")
yield Button("Cancel", id="cancel_button")
# 23
yield Static(" ", classes="five")
# 24
yield Static(" ", classes="five", id="messagestatic")
yield Footer(id="footer")
def getTrackDescriptorFromInput(self):
kwargs = {}
kwargs[TrackDescriptor.CONTEXT_KEY] = self.context
kwargs[TrackDescriptor.PATTERN_ID_KEY] = int(self.__pattern.getId())
if self.__trackDescriptor is not None and self.__trackDescriptor.getId() != -1:
kwargs[TrackDescriptor.ID_KEY] = self.__trackDescriptor.getId()
kwargs[TrackDescriptor.INDEX_KEY] = self.__index
kwargs[TrackDescriptor.SUB_INDEX_KEY] = self.__subIndex #!
if self.__patternId != -1:
kwargs[TrackDescriptor.PATTERN_ID_KEY] = int(self.__patternId)
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.fromLabel(self.query_one("#type_select", Select).value)
kwargs[TrackDescriptor.INDEX_KEY] = int(self.__index)
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = (
int(self.__trackDescriptor.getSourceIndex())
if self.__trackDescriptor is not None
else int(self.__index)
)
if self.__subIndex is not None and int(self.__subIndex) >= 0:
kwargs[TrackDescriptor.SUB_INDEX_KEY] = int(self.__subIndex)
selectedTrackType = TrackType.fromLabel(
self.query_one("#type_select", Select).value
)
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = selectedTrackType
kwargs[TrackDescriptor.CODEC_KEY] = self.__trackCodec
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(self.query_one("#audio_layout_select", Select).value)
trackTags = {}
if selectedTrackType == TrackType.AUDIO:
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(
self.query_one("#audio_layout_select", Select).value
)
else:
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.LAYOUT_UNDEFINED
trackTags = dict(self.__draftTrackTags)
language = self.query_one("#language_select", Select).value
if language:
trackTags['language'] = IsoLanguage.find(language).threeLetter()
trackTags["language"] = IsoLanguage.find(language).threeLetter()
title = self.query_one("#title_input", Input).value
if title:
trackTags['title'] = title
trackTags["title"] = title
tableTags = {row[0]:row[1] for r in self.trackTagsTable.rows if (row := self.trackTagsTable.get_row(r)) and row[0] != 'language' and row[0] != 'title'}
kwargs[TrackDescriptor.TAGS_KEY] = trackTags
kwargs[TrackDescriptor.TAGS_KEY] = trackTags | tableTags
dispositionFlags = sum([2**f for f in self.query_one("#dispositions_selection_list", SelectionList).selected])
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = TrackDisposition.toSet(dispositionFlags)
dispositionFlags = sum(
[2 ** flag for flag in self.query_one("#dispositions_selection_list", SelectionList).selected]
)
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = TrackDisposition.toSet(
dispositionFlags
)
return TrackDescriptor(**kwargs)
def getSelectedTag(self):
try:
# Fetch the currently selected row when 'Enter' is pressed
#selected_row_index = self.table.cursor_row
row_key, col_key = self.trackTagsTable.coordinate_to_cell_key(self.trackTagsTable.cursor_coordinate)
row_key, _ = self.trackTagsTable.coordinate_to_cell_key(
self.trackTagsTable.cursor_coordinate
)
if row_key is not None:
selected_tag_data = self.trackTagsTable.get_row(row_key)
tagKey = str(selected_tag_data[0])
tagValue = str(selected_tag_data[1])
tagKey = removeRichColor(selected_tag_data[0])
tagValue = removeRichColor(selected_tag_data[1])
return tagKey, tagValue
else:
return None
except CellDoesNotExist:
return None
# Event handler for button press
def on_button_pressed(self, event: Button.Pressed) -> None:
# Check if the button pressed is the one we are interested in
if event.button.id == "save_button":
# Check for multiple default/forced disposition flags
if self.__trackType == TrackType.VIDEO:
trackList = self.__tc.findVideoTracks(self.__pattern.getId())
if self.__trackType == TrackType.AUDIO:
trackList = self.__tc.findAudioTracks(self.__pattern.getId())
elif self.__trackType == TrackType.SUBTITLE:
trackList = self.__tc.findSubtitleTracks(self.__pattern.getId())
else:
trackList = []
siblingTrackList = [t for t in trackList if t.getType() == self.__trackType and t.getIndex() != self.__index]
numDefaultTracks = len([t for t in siblingTrackList if TrackDisposition.DEFAULT in t.getDispositionSet()])
numForcedTracks = len([t for t in siblingTrackList if TrackDisposition.FORCED in t.getDispositionSet()])
self.__subIndex = len(trackList)
trackDescriptor = self.getTrackDescriptorFromInput()
if ((TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet() and numDefaultTracks)
or (TrackDisposition.FORCED in trackDescriptor.getDispositionSet() and numForcedTracks)):
siblingTrackList = [
descriptor
for descriptor in self.__siblingTrackDescriptors
if not self._descriptor_refs_same_track(descriptor)
]
siblingTrackList = [
descriptor
for descriptor in siblingTrackList
if descriptor.getType() == trackDescriptor.getType()
]
self.query_one("#messagestatic", Static).update("Cannot add another stream with disposition flag 'debug' or 'forced' set")
else:
self.query_one("#messagestatic", Static).update(" ")
numDefaultTracks = len(
[
descriptor
for descriptor in siblingTrackList
if TrackDisposition.DEFAULT in descriptor.getDispositionSet()
]
)
numForcedTracks = len(
[
descriptor
for descriptor in siblingTrackList
if TrackDisposition.FORCED in descriptor.getDispositionSet()
]
)
if self.__isNew:
trackDescriptor.setSubIndex(len(siblingTrackList))
elif self.__subIndex is not None and int(self.__subIndex) >= 0:
trackDescriptor.setSubIndex(int(self.__subIndex))
# Track per Screen hinzufügen
self.__tc.addTrack(trackDescriptor)
self.dismiss(trackDescriptor)
if (
TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet()
and numDefaultTracks
) or (
TrackDisposition.FORCED in trackDescriptor.getDispositionSet()
and numForcedTracks
):
self.query_one("#messagestatic", Static).update(
"Cannot add another stream with disposition flag 'default' or 'forced' set"
)
else:
track = self.__tc.getTrack(self.__pattern.getId(), self.__index)
# Track per details screen updaten
if self.__tc.updateTrack(track.getId(), trackDescriptor):
self.query_one("#messagestatic", Static).update(" ")
self.dismiss(trackDescriptor)
else:
self.app.pop_screen()
if event.button.id == "cancel_button":
self.app.pop_screen()
if event.button.id == "button_add_stream_tag":
if not self.__isNew:
self.app.push_screen(TagDetailsScreen(), self.handle_update_tag)
if event.button.id == "button_edit_stream_tag":
tagKey, tagValue = self.getSelectedTag()
self.app.push_screen(TagDetailsScreen(key=tagKey, value=tagValue), self.handle_update_tag)
selectedTag = self.getSelectedTag()
if selectedTag is not None:
self.app.push_screen(
TagDetailsScreen(key=selectedTag[0], value=selectedTag[1]),
self.handle_update_tag,
)
if event.button.id == "button_delete_stream_tag":
tagKey, tagValue = self.getSelectedTag()
self.app.push_screen(TagDeleteScreen(key=tagKey, value=tagValue), self.handle_delete_tag)
selectedTag = self.getSelectedTag()
if selectedTag is not None:
self.app.push_screen(
TagDeleteScreen(key=selectedTag[0], value=selectedTag[1]),
self.handle_delete_tag,
)
def handle_update_tag(self, tag):
trackId = self.__trackDescriptor.getId()
if trackId == -1:
raise click.ClickException(f"TrackDetailsScreen.handle_update_tag: trackId not set (-1) trackDescriptor={self.__trackDescriptor}")
if self.__tac.updateTrackTag(trackId, tag[0], tag[1]) is not None:
if tag is None:
return
self.__draftTrackTags[str(tag[0])] = str(tag[1])
self.updateTags()
def handle_delete_tag(self, trackTag):
trackId = self.__trackDescriptor.getId()
if trackId == -1:
raise click.ClickException(f"TrackDetailsScreen.handle_delete_tag: trackId not set (-1) trackDescriptor={self.__trackDescriptor}")
tag = self.__tac.findTrackTag(trackId, trackTag[0])
if tag is not None:
if self.__tac.deleteTrackTag(tag.id):
if trackTag is None:
return
self.__draftTrackTags.pop(str(trackTag[0]), None)
self.updateTags()

View File

@@ -5,6 +5,7 @@ class TrackType(Enum):
VIDEO = {'label': 'video', 'index': 1}
AUDIO = {'label': 'audio', 'index': 2}
SUBTITLE = {'label': 'subtitle', 'index': 3}
ATTACHMENT = {'label': 'attachment', 'index': 4}
UNKNOWN = {'label': 'unknown', 'index': 0}

View File

@@ -4,6 +4,8 @@ class VideoEncoder(Enum):
AV1 = {'label': 'av1', 'index': 1}
VP9 = {'label': 'vp9', 'index': 2}
H264 = {'label': 'h264', 'index': 3}
COPY = {'label': 'copy', 'index': 4}
UNDEFINED = {'label': 'undefined', 'index': 0}

1
tests/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Repo-root tests package for legacy and future test code.

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,138 @@
from __future__ import annotations
from pathlib import Path
import tempfile
import unittest
from tests.support.ffx_bundle import (
PatternTrackSpec,
SourceTrackSpec,
add_show,
build_controller_context,
create_source_fixture,
dispose_controller_context,
expected_output_path,
run_ffx_convert,
)
from ffx.pattern_controller import PatternController
from ffx.track_type import TrackType
try:
import pytest
except ImportError: # pragma: no cover - unittest-only environments
pytest = None
if pytest is not None:
pytestmark = [pytest.mark.integration, pytest.mark.pattern_management]
class PatternManagementCliTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
self.workdir = Path(self.tempdir.name)
self.home_dir = self.workdir / "home"
self.home_dir.mkdir()
self.database_path = self.workdir / "test.db"
def tearDown(self):
self.tempdir.cleanup()
def prepare_duplicate_matching_patterns(self):
context = build_controller_context(self.database_path)
try:
add_show(context, show_id=1)
add_show(context, show_id=2)
controller = PatternController(context)
track_descriptors = [
PatternTrackSpec(index=0, source_index=0, track_type=TrackType.VIDEO)
]
def to_track_descriptor(spec: PatternTrackSpec):
from ffx.track_descriptor import TrackDescriptor
kwargs = {
TrackDescriptor.INDEX_KEY: spec.index,
TrackDescriptor.SOURCE_INDEX_KEY: spec.source_index,
TrackDescriptor.TRACK_TYPE_KEY: spec.track_type,
TrackDescriptor.TAGS_KEY: dict(spec.tags),
TrackDescriptor.DISPOSITION_SET_KEY: set(spec.dispositions),
}
return TrackDescriptor(**kwargs)
controller.savePatternSchema(
{"show_id": 1, "pattern": r"^dup_(s[0-9]+e[0-9]+)\.mkv$"},
[to_track_descriptor(track_descriptors[0])],
)
controller.savePatternSchema(
{"show_id": 2, "pattern": r"^dup_.*$"},
[to_track_descriptor(track_descriptors[0])],
)
finally:
dispose_controller_context(context)
def test_convert_fails_when_filename_matches_more_than_one_pattern(self):
self.prepare_duplicate_matching_patterns()
source_filename = "dup_s01e01.mkv"
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng"),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-tmdb",
"--no-prompt",
"--no-signature",
str(source_path),
)
self.assertNotEqual(completed.returncode, 0)
error_output = f"{completed.stdout}\n{completed.stderr}"
self.assertIn("matched more than one pattern", error_output)
self.assertFalse(expected_output_path(self.workdir, source_filename).exists())
def test_convert_can_ignore_duplicate_matches_when_no_pattern_is_requested(self):
self.prepare_duplicate_matching_patterns()
source_filename = "dup_s01e01.mkv"
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng"),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-pattern",
"--no-tmdb",
"--no-prompt",
"--no-signature",
str(source_path),
)
self.assertEqual(
0,
completed.returncode,
f"STDOUT:\n{completed.stdout}\nSTDERR:\n{completed.stderr}",
)
self.assertTrue(expected_output_path(self.workdir, source_filename).exists())
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,436 @@
from __future__ import annotations
import json
from pathlib import Path
import tempfile
import unittest
from tests.support.ffx_bundle import (
PatternTrackSpec,
SourceTrackSpec,
create_source_fixture,
expected_output_path,
extract_first_subtitle_text,
ffprobe_json,
get_tag,
prepare_pattern_database,
run_ffx_convert,
write_vtt,
)
from ffx.track_type import TrackType
try:
import pytest
except ImportError: # pragma: no cover - unittest-only environments
pytest = None
if pytest is not None:
pytestmark = [pytest.mark.integration, pytest.mark.subtrack_mapping]
class SubtrackMappingBundleTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
self.workdir = Path(self.tempdir.name)
self.home_dir = self.workdir / "home"
self.home_dir.mkdir()
self.database_path = self.workdir / "test.db"
def tearDown(self):
self.tempdir.cleanup()
def write_config(self, data: dict) -> None:
config_dir = self.home_dir / ".local" / "etc"
config_dir.mkdir(parents=True, exist_ok=True)
(config_dir / "ffx.json").write_text(json.dumps(data), encoding="utf-8")
def assertCompleted(self, completed):
if completed.returncode != 0:
self.fail(
"FFX convert failed\n"
f"STDOUT:\n{completed.stdout}\n"
f"STDERR:\n{completed.stderr}"
)
def test_pattern_reorders_and_omits_tracks_preserving_metadata_and_group_order(self):
source_filename = "reorder_s01e01.mkv"
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0", title="Video Zero"),
SourceTrackSpec(
TrackType.SUBTITLE,
identity="subtitle-1",
language="eng",
title="First Subtitle",
subtitle_lines=("first embedded subtitle",),
),
SourceTrackSpec(
TrackType.AUDIO,
identity="audio-2",
language="deu",
title="German Audio",
),
SourceTrackSpec(
TrackType.SUBTITLE,
identity="subtitle-3",
language="fra",
title="Second Subtitle",
subtitle_lines=("second embedded subtitle",),
),
SourceTrackSpec(TrackType.ATTACHMENT, attachment_name="ordered.ttf"),
],
)
prepare_pattern_database(
self.database_path,
r"^reorder_(s[0-9]+e[0-9]+)\.mkv$",
[
PatternTrackSpec(
index=0,
source_index=0,
track_type=TrackType.VIDEO,
tags={"THIS_IS": "video-0", "title": "Video Zero"},
),
PatternTrackSpec(
index=1,
source_index=2,
track_type=TrackType.AUDIO,
tags={"THIS_IS": "audio-2", "language": "deu", "title": "German Audio"},
),
PatternTrackSpec(
index=2,
source_index=1,
track_type=TrackType.SUBTITLE,
tags={"THIS_IS": "subtitle-1", "language": "eng", "title": "First Subtitle"},
),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-tmdb",
"--no-prompt",
"--no-signature",
str(source_path),
)
self.assertCompleted(completed)
output_path = expected_output_path(self.workdir, source_filename)
self.assertTrue(output_path.is_file(), output_path)
streams = ffprobe_json(output_path)["streams"]
self.assertEqual(
[stream["codec_type"] for stream in streams],
["video", "audio", "subtitle", "attachment"],
)
self.assertEqual(
[get_tag(streams[index], "THIS_IS") for index in range(3)],
["video-0", "audio-2", "subtitle-1"],
)
self.assertNotIn(
"subtitle-3",
[get_tag(stream, "THIS_IS") for stream in streams if stream["codec_type"] != "attachment"],
)
self.assertEqual(streams[-1]["codec_name"], "ttf")
extracted_subtitle = extract_first_subtitle_text(self.workdir, output_path)
self.assertIn("first embedded subtitle", extracted_subtitle)
self.assertNotIn("second embedded subtitle", extracted_subtitle)
def test_cli_rearrange_streams_reorders_tracks_without_database_pattern(self):
source_filename = "cli_s01e01.mkv"
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng", title="First Audio"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-2", language="deu", title="Second Audio"),
SourceTrackSpec(TrackType.SUBTITLE, identity="subtitle-3", language="eng", title="Subtitle"),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-pattern",
"--no-tmdb",
"--no-prompt",
"--no-signature",
"--rearrange-streams",
"0,2,1,3",
str(source_path),
)
self.assertCompleted(completed)
output_path = expected_output_path(self.workdir, source_filename)
streams = ffprobe_json(output_path)["streams"]
self.assertEqual(
[stream["codec_type"] for stream in streams],
["video", "audio", "audio", "subtitle"],
)
self.assertEqual(
[get_tag(stream, "THIS_IS") for stream in streams],
["video-0", "audio-2", "audio-1", "subtitle-3"],
)
def test_no_pattern_stream_remove_list_clears_copied_stream_metadata(self):
source_filename = "remove_tags_s01e01.mkv"
self.write_config(
{
"metadata": {
"streams": {
"remove": ["BPS"],
}
}
}
)
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(
TrackType.VIDEO,
identity="video-0",
extra_tags={"BPS": "remove-me", "KEEP_ME": "video-keep"},
),
SourceTrackSpec(
TrackType.AUDIO,
identity="audio-1",
language="eng",
title="Main Audio",
extra_tags={"BPS": "remove-me", "KEEP_ME": "audio-keep"},
),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-pattern",
"--no-tmdb",
"--no-prompt",
"--no-signature",
str(source_path),
)
self.assertCompleted(completed)
output_path = expected_output_path(self.workdir, source_filename)
streams = ffprobe_json(output_path)["streams"]
self.assertEqual(
[stream["codec_type"] for stream in streams],
["video", "audio"],
)
self.assertEqual(get_tag(streams[0], "THIS_IS"), "video-0")
self.assertEqual(get_tag(streams[0], "KEEP_ME"), "video-keep")
self.assertIsNone(get_tag(streams[0], "BPS"))
self.assertEqual(get_tag(streams[1], "THIS_IS"), "audio-1")
self.assertEqual(get_tag(streams[1], "KEEP_ME"), "audio-keep")
self.assertIsNone(get_tag(streams[1], "BPS"))
def test_pattern_validation_fails_for_nonexistent_source_track_reference(self):
source_filename = "invalid_s01e01.mkv"
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-1"),
SourceTrackSpec(TrackType.SUBTITLE, identity="subtitle-2"),
],
)
prepare_pattern_database(
self.database_path,
r"^invalid_(s[0-9]+e[0-9]+)\.mkv$",
[
PatternTrackSpec(index=0, source_index=0, track_type=TrackType.VIDEO),
PatternTrackSpec(index=1, source_index=99, track_type=TrackType.SUBTITLE),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-tmdb",
"--no-prompt",
"--no-signature",
str(source_path),
)
self.assertNotEqual(completed.returncode, 0)
error_output = f"{completed.stdout}\n{completed.stderr}"
self.assertIn("non-existent source track #99", error_output)
self.assertFalse(expected_output_path(self.workdir, source_filename).exists())
def test_external_subtitle_file_replaces_payload_and_overrides_metadata(self):
source_filename = "substitute_s01e01.mkv"
self.write_config(
{
"metadata": {
"streams": {
"remove": ["BPS"],
}
}
}
)
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng", title="Main Audio"),
SourceTrackSpec(
TrackType.SUBTITLE,
identity="embedded-subtitle",
language="eng",
title="Embedded Title",
extra_tags={"BPS": "remove-me", "EXTERNAL_KEEP": "keep-me"},
subtitle_lines=("embedded subtitle payload",),
),
],
)
write_vtt(
self.workdir / "substitute_s01e01_2_deu.vtt",
("external subtitle payload",),
)
prepare_pattern_database(
self.database_path,
r"^substitute_(s[0-9]+e[0-9]+)\.mkv$",
[
PatternTrackSpec(index=0, source_index=0, track_type=TrackType.VIDEO),
PatternTrackSpec(index=1, source_index=1, track_type=TrackType.AUDIO),
PatternTrackSpec(index=2, source_index=2, track_type=TrackType.SUBTITLE),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-tmdb",
"--no-prompt",
"--no-signature",
"--subtitle-directory",
str(self.workdir),
"--subtitle-prefix",
"substitute",
str(source_path),
)
self.assertCompleted(completed)
output_path = expected_output_path(self.workdir, source_filename)
streams = ffprobe_json(output_path)["streams"]
subtitle_stream = [stream for stream in streams if stream["codec_type"] == "subtitle"][0]
self.assertEqual(get_tag(subtitle_stream, "language"), "deu")
self.assertEqual(get_tag(subtitle_stream, "title"), "Embedded Title")
self.assertEqual(get_tag(subtitle_stream, "THIS_IS"), "embedded-subtitle")
self.assertEqual(get_tag(subtitle_stream, "EXTERNAL_KEEP"), "keep-me")
self.assertIsNone(get_tag(subtitle_stream, "BPS"))
extracted_subtitle = extract_first_subtitle_text(self.workdir, output_path)
self.assertIn("external subtitle payload", extracted_subtitle)
self.assertNotIn("embedded subtitle payload", extracted_subtitle)
def test_subtitle_prefix_uses_configured_base_directory_when_directory_is_omitted(self):
source_filename = "substitute_default_s01e01.mkv"
subtitle_prefix = "substitute_default"
subtitles_base_dir = self.home_dir / ".local" / "var" / "sync" / "subtitles"
resolved_subtitle_dir = subtitles_base_dir / subtitle_prefix
resolved_subtitle_dir.mkdir(parents=True, exist_ok=True)
self.write_config(
{
"subtitlesDirectory": "~/.local/var/sync/subtitles",
"metadata": {
"streams": {
"remove": ["BPS"],
}
}
}
)
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(TrackType.AUDIO, identity="audio-1", language="eng", title="Main Audio"),
SourceTrackSpec(
TrackType.SUBTITLE,
identity="embedded-subtitle",
language="eng",
title="Embedded Title",
extra_tags={"BPS": "remove-me", "EXTERNAL_KEEP": "keep-me"},
subtitle_lines=("embedded subtitle payload",),
),
],
)
write_vtt(
resolved_subtitle_dir / f"{subtitle_prefix}_s01e01_2_deu.vtt",
("external subtitle payload",),
)
prepare_pattern_database(
self.database_path,
r"^substitute_default_(s[0-9]+e[0-9]+)\.mkv$",
[
PatternTrackSpec(index=0, source_index=0, track_type=TrackType.VIDEO),
PatternTrackSpec(index=1, source_index=1, track_type=TrackType.AUDIO),
PatternTrackSpec(index=2, source_index=2, track_type=TrackType.SUBTITLE),
],
)
completed = run_ffx_convert(
self.workdir,
self.home_dir,
self.database_path,
"--video-encoder",
"copy",
"--no-tmdb",
"--no-prompt",
"--no-signature",
"--subtitle-prefix",
subtitle_prefix,
str(source_path),
)
self.assertCompleted(completed)
output_path = expected_output_path(self.workdir, source_filename)
streams = ffprobe_json(output_path)["streams"]
subtitle_stream = [stream for stream in streams if stream["codec_type"] == "subtitle"][0]
self.assertEqual(get_tag(subtitle_stream, "language"), "deu")
self.assertEqual(get_tag(subtitle_stream, "title"), "Embedded Title")
self.assertEqual(get_tag(subtitle_stream, "THIS_IS"), "embedded-subtitle")
self.assertEqual(get_tag(subtitle_stream, "EXTERNAL_KEEP"), "keep-me")
self.assertIsNone(get_tag(subtitle_stream, "BPS"))
extracted_subtitle = extract_first_subtitle_text(self.workdir, output_path)
self.assertIn("external subtitle payload", extracted_subtitle)
self.assertNotIn("embedded subtitle payload", extracted_subtitle)
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,106 @@
from __future__ import annotations
import json
import os
from pathlib import Path
import subprocess
import sys
import tempfile
import unittest
from tests.support.ffx_bundle import SourceTrackSpec, create_source_fixture
from ffx.track_type import TrackType
try:
import pytest
except ImportError: # pragma: no cover - unittest-only environments
pytest = None
if pytest is not None:
pytestmark = [pytest.mark.integration]
SRC_ROOT = Path(__file__).resolve().parents[2] / "src"
def run_ffx_unmux(workdir: Path, home_dir: Path, database_path: Path, *args: str) -> subprocess.CompletedProcess[str]:
env = os.environ.copy()
env["HOME"] = str(home_dir)
existing_pythonpath = env.get("PYTHONPATH", "")
env["PYTHONPATH"] = str(SRC_ROOT) if not existing_pythonpath else f"{SRC_ROOT}{os.pathsep}{existing_pythonpath}"
command = [
sys.executable,
"-m",
"ffx",
"--database-file",
str(database_path),
"unmux",
*args,
]
return subprocess.run(command, cwd=workdir, env=env, capture_output=True, text=True)
class UnmuxCliTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
self.workdir = Path(self.tempdir.name)
self.home_dir = self.workdir / "home"
self.home_dir.mkdir()
self.database_path = self.workdir / "test.db"
def tearDown(self):
self.tempdir.cleanup()
def write_config(self, data: dict) -> None:
config_dir = self.home_dir / ".local" / "etc"
config_dir.mkdir(parents=True, exist_ok=True)
(config_dir / "ffx.json").write_text(json.dumps(data), encoding="utf-8")
def assertCompleted(self, completed):
if completed.returncode != 0:
self.fail(
"FFX unmux failed\n"
f"STDOUT:\n{completed.stdout}\n"
f"STDERR:\n{completed.stderr}"
)
def test_subtitles_only_without_output_directory_uses_configured_base_plus_label(self):
self.write_config(
{
"subtitlesDirectory": "~/.local/var/sync/subtitles",
}
)
source_filename = "unmux_s01e01.mkv"
source_path = create_source_fixture(
self.workdir,
source_filename,
[
SourceTrackSpec(TrackType.VIDEO, identity="video-0"),
SourceTrackSpec(
TrackType.SUBTITLE,
identity="subtitle-1",
language="eng",
subtitle_lines=("subtitle payload",),
),
],
)
completed = run_ffx_unmux(
self.workdir,
self.home_dir,
self.database_path,
"--subtitles-only",
"--label",
"dball",
str(source_path),
)
self.assertCompleted(completed)
expected_directory = self.home_dir / ".local" / "var" / "sync" / "subtitles" / "dball"
self.assertTrue(expected_directory.is_dir(), expected_directory)
if __name__ == "__main__":
unittest.main()

1
tests/legacy/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Legacy custom FFX test harness modules.

View File

@@ -24,8 +24,9 @@ class BasenameCombinator():
@staticmethod
def getClassReference(identifier):
importlib.import_module(f"ffx.test.basename_combinator_{ identifier }")
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.basename_combinator_{ identifier }"]):
module_name = f"tests.legacy.basename_combinator_{ identifier }"
importlib.import_module(module_name)
for name, obj in inspect.getmembers(sys.modules[module_name]):
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
if inspect.isclass(obj) and name != 'BasenameCombinator' and name.startswith('BasenameCombinator'):
return obj

View File

@@ -24,8 +24,9 @@ class DispositionCombinator2():
@staticmethod
def getClassReference(identifier):
importlib.import_module(f"ffx.test.disposition_combinator_2_{ identifier }")
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.disposition_combinator_2_{ identifier }"]):
module_name = f"tests.legacy.disposition_combinator_2_{ identifier }"
importlib.import_module(module_name)
for name, obj in inspect.getmembers(sys.modules[module_name]):
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
if inspect.isclass(obj) and name != 'DispositionCombinator2' and name.startswith('DispositionCombinator2'):
return obj

View File

@@ -23,8 +23,9 @@ class DispositionCombinator3():
@staticmethod
def getClassReference(identifier):
importlib.import_module(f"ffx.test.disposition_combinator_3_{ identifier }")
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.disposition_combinator_3_{ identifier }"]):
module_name = f"tests.legacy.disposition_combinator_3_{ identifier }"
importlib.import_module(module_name)
for name, obj in inspect.getmembers(sys.modules[module_name]):
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
if inspect.isclass(obj) and name != 'DispositionCombinator3' and name.startswith('DispositionCombinator3'):
return obj

View File

@@ -1,11 +1,9 @@
import os, math, tempfile, click
from ffx.ffx_controller import FfxController
from ffx.process import executeProcess
from ffx.media_descriptor import MediaDescriptor
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
from ffx.track_type import TrackType
from ffx.helper import dictCache
@@ -149,7 +147,6 @@ def createMediaTestFile(mediaDescriptor: MediaDescriptor,
# subtitleFilePath = createVttFile(SHORT_SUBTITLE_SEQUENCE)
# commandTokens = FfxController.COMMAND_TOKENS
commandTokens = ['ffmpeg', '-y']
generatorCache = []
@@ -164,7 +161,8 @@ def createMediaTestFile(mediaDescriptor: MediaDescriptor,
subIndexCounter = {}
for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
# for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
for trackDescriptor in mediaDescriptor.getTrackDescriptors():
trackType = trackDescriptor.getType()
@@ -231,15 +229,14 @@ def createMediaTestFile(mediaDescriptor: MediaDescriptor,
f"{mediaTagKey}={mediaTagValue}"]
subIndexCounter[trackType] += 1
#TODO: Optimize too many runs
ffxContext = {'config': ConfigurationController(), 'logger': logger}
fc = FfxController(ffxContext, mediaDescriptor)
mdcs = MediaDescriptorChangeSet(ffxContext, mediaDescriptor)
commandTokens += (generatorTokens
+ importTokens
+ mappingTokens
+ metadataTokens
+ fc.generateDispositionTokens())
+ mdcs.generateDispositionTokens())
commandTokens += ['-t', str(length)]

View File

@@ -25,8 +25,9 @@ class LabelCombinator():
@staticmethod
def getClassReference(identifier):
importlib.import_module(f"ffx.test.{LabelCombinator.PREFIX}{ identifier }")
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.{LabelCombinator.PREFIX}{ identifier }"]):
module_name = f"tests.legacy.{LabelCombinator.PREFIX}{ identifier }"
importlib.import_module(module_name)
for name, obj in inspect.getmembers(sys.modules[module_name]):
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
if inspect.isclass(obj) and name != 'LabelCombinator' and name.startswith('LabelCombinator'):
return obj

View File

@@ -22,8 +22,9 @@ class MediaCombinator():
@staticmethod
def getClassReference(identifier):
importlib.import_module(f"ffx.test.media_combinator_{ identifier }")
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.media_combinator_{ identifier }"]):
module_name = f"tests.legacy.media_combinator_{ identifier }"
importlib.import_module(module_name)
for name, obj in inspect.getmembers(sys.modules[module_name]):
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
if inspect.isclass(obj) and name != 'MediaCombinator' and name.startswith('MediaCombinator'):
return obj

View File

@@ -22,8 +22,9 @@ class MediaTagCombinator():
@staticmethod
def getClassReference(identifier):
importlib.import_module(f"ffx.test.media_tag_combinator_{ identifier }")
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.media_tag_combinator_{ identifier }"]):
module_name = f"tests.legacy.media_tag_combinator_{ identifier }"
importlib.import_module(module_name)
for name, obj in inspect.getmembers(sys.modules[module_name]):
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
if inspect.isclass(obj) and name != 'MediaTagCombinator' and name.startswith('MediaTagCombinator'):
return obj

View File

@@ -4,7 +4,7 @@ from ffx.show_controller import ShowController
from ffx.pattern_controller import PatternController
from ffx.media_controller import MediaController
from ffx.test.helper import createEmptyDirectory
from .helper import createEmptyDirectory
from ffx.database import databaseContext
class Scenario():
@@ -90,11 +90,7 @@ class Scenario():
def __init__(self, context = None):
self._context = context
self._testDirectory = createEmptyDirectory()
self._ffxExecutablePath = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.dirname(__file__))),
'ffx.py')
self._ffxModuleName = 'ffx'
self._logger = context['logger']
self._reportLogger = context['report_logger']
@@ -146,8 +142,9 @@ class Scenario():
@staticmethod
def getClassReference(identifier):
importlib.import_module(f"ffx.test.scenario_{ identifier }")
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.scenario_{ identifier }"]):
module_name = f"tests.legacy.scenario_{ identifier }"
importlib.import_module(module_name)
for name, obj in inspect.getmembers(sys.modules[module_name]):
#HINT: Excluding Scenario as it seems to be included by import (?)
if inspect.isclass(obj) and name != 'Scenario' and name.startswith('Scenario'):
return obj

View File

@@ -2,7 +2,7 @@ import os, sys, click, glob
from .scenario import Scenario
from ffx.test.helper import createMediaTestFile
from .helper import createMediaTestFile
from ffx.process import executeProcess
from ffx.file_properties import FileProperties
@@ -13,9 +13,9 @@ from ffx.track_descriptor import TrackDescriptor
from ffx.track_type import TrackType
from ffx.track_disposition import TrackDisposition
from ffx.test.media_combinator_0 import MediaCombinator0
from .media_combinator_0 import MediaCombinator0
from ffx.test.basename_combinator import BasenameCombinator
from .basename_combinator import BasenameCombinator
class Scenario1(Scenario):
@@ -92,8 +92,7 @@ class Scenario1(Scenario):
# Phase 2: Run ffx
commandSequence = [sys.executable,
self._ffxExecutablePath]
commandSequence = [sys.executable, '-m', self._ffxModuleName]
if self._context['verbosity']:
commandSequence += ['--verbose',

View File

@@ -2,7 +2,7 @@ import os, sys, click
from .scenario import Scenario
from ffx.test.helper import createMediaTestFile
from .helper import createMediaTestFile
from ffx.process import executeProcess
from ffx.file_properties import FileProperties
@@ -13,7 +13,7 @@ from ffx.track_descriptor import TrackDescriptor
from ffx.track_type import TrackType
from ffx.track_disposition import TrackDisposition
from ffx.test.media_combinator import MediaCombinator
from .media_combinator import MediaCombinator
class Scenario2(Scenario):
@@ -77,8 +77,7 @@ class Scenario2(Scenario):
# Phase 2: Run ffx
commandSequence = [sys.executable,
self._ffxExecutablePath]
commandSequence = [sys.executable, '-m', self._ffxModuleName]
if self._context['verbosity']:
commandSequence += ['--verbose',
@@ -122,7 +121,8 @@ class Scenario2(Scenario):
resultFileProperties = FileProperties(testContext, resultFile)
resultMediaDescriptor = resultFileProperties.getMediaDescriptor()
resultMediaTracks = resultMediaDescriptor.getAllTrackDescriptors()
# resultMediaTracks = resultMediaDescriptor.getAllTrackDescriptors()
resultMediaTracks = resultMediaDescriptor.getTrackDescriptors()
for assertIndex in range(len(assertSelectorList)):

View File

@@ -2,11 +2,11 @@ import os, sys, click
from .scenario import Scenario
from ffx.test.helper import createMediaTestFile
from .helper import createMediaTestFile
from ffx.process import executeProcess
from ffx.database import databaseContext
from ffx.test.helper import createEmptyDirectory
from .helper import createEmptyDirectory
from ffx.helper import getEpisodeFileBasename
from ffx.file_properties import FileProperties
@@ -17,8 +17,8 @@ from ffx.track_descriptor import TrackDescriptor
from ffx.track_type import TrackType
from ffx.track_disposition import TrackDisposition
from ffx.test.media_combinator import MediaCombinator
from ffx.test.indicator_combinator import IndicatorCombinator
from .media_combinator import MediaCombinator
from .indicator_combinator import IndicatorCombinator
from ffx.show_descriptor import ShowDescriptor
@@ -163,8 +163,7 @@ class Scenario4(Scenario):
# Phase 3: Run ffx
commandSequence = [sys.executable,
self._ffxExecutablePath]
commandSequence = [sys.executable, '-m', self._ffxModuleName]
if self._context['verbosity']:
commandSequence += ['--verbose',
@@ -223,7 +222,8 @@ class Scenario4(Scenario):
self._logger.debug(f"{variantLabel}: Result file properties: {rfp.getFilename()} season={rfp.getSeason()} episode={rfp.getEpisode()}")
rmd = rfp.getMediaDescriptor()
rmt = rmd.getAllTrackDescriptors()
# rmt = rmd.getAllTrackDescriptors()
rmt = rmd.getTrackDescriptors()
for l in rmd.getConfiguration(label = 'resultMediaDescriptor'):
self._logger.debug(l)

Some files were not shown because too many files have changed in this diff Show More