111 Commits

Author SHA1 Message Date
Javanaut
f288d445e4 Adds requirements, streamlines CLI helper procedures 2026-04-09 00:59:37 +02:00
Javanaut
d9db6da191 tf 2026-01-31 17:30:35 +01:00
Javanaut
5443881ea1 tf 2026-01-31 17:12:27 +01:00
Javanaut
8946b57456 fix attachement descriptor handling 2026-01-31 12:06:24 +01:00
Javanaut
686239491b Adapt for .ssa subtitles with attached fonts 2026-01-31 10:00:14 +01:00
Javanaut
126ba4487c fixes TextArea 2025-11-07 15:54:14 +01:00
Javanaut
447cda19ef ff 2025-11-07 15:49:52 +01:00
Javanaut
f1ba913a98 ff 2025-11-07 15:45:47 +01:00
Javanaut
59336aafb7 dd 2025-11-07 15:43:31 +01:00
Javanaut
fd5ad3ed56 Removes build artifacts from branch 2025-11-07 15:38:07 +01:00
Javanaut
2d03a3bb10 ff 2025-11-07 15:17:13 +01:00
Javanaut
4dc02d52a2 Adds notes field for patterns 2025-11-07 15:14:55 +01:00
Javanaut
ed0cea9c26 Adapts Q Message 2025-11-07 14:11:50 +01:00
Javanaut
15bfbdbe88 Adds setting quality accoeding to pattern default 2025-11-06 14:08:00 +01:00
Javanaut
c354ba09ba Adds pattern quality UI field 2025-11-05 21:24:53 +01:00
2eeea08be0 Merge branch 'dev' of gitea.maveno.de:Javanaut/ffx into dev 2025-10-08 11:01:15 +02:00
fbfc8ea965 rfc niceness/cpulimit 2025-10-08 10:59:04 +02:00
Javanaut
6ec5db2ea2 ff 2025-10-07 10:10:27 +02:00
Javanaut
8feced6f1c Lang/Codec changes 2025-10-07 10:10:09 +02:00
Javanaut
285649c30a Fix chinese iso code 2025-09-09 08:26:27 +02:00
Javanaut
558da817f1 Fügt hinzu Ländercodes Bokmal und Filipino 2025-09-02 23:46:56 +02:00
Javanaut
2a84327f69 Fügt hinzu Ländercodes Filipinisch und Bokmal 2025-09-02 23:38:07 +02:00
535b11dca5 fix pattern markup 2025-03-27 08:04:41 +01:00
8edc715795 typo 2025-03-04 22:58:14 +01:00
cd203703e8 adding languages 2025-03-04 22:56:39 +01:00
8f2367b71e ff 2025-02-17 00:11:38 +01:00
101c7605d2 deint 2025-02-16 23:54:32 +01:00
a5b58e34e4 ff 2025-02-11 20:19:55 +01:00
a32e86550c ff 2025-02-11 19:55:12 +01:00
5de3778ae5 ff 2025-02-11 19:52:05 +01:00
81aab0657e ff 2025-02-11 19:48:38 +01:00
8514a0c152 ff 2025-02-02 17:01:22 +01:00
c846147c64 ff 2025-02-02 17:00:33 +01:00
e52297b2ba ff 2025-02-02 16:58:24 +01:00
655833f13e ff 2025-02-02 16:36:36 +01:00
03dd02ed87 ff 2025-02-02 16:25:19 +01:00
b6ee197536 ff 2025-02-02 16:14:16 +01:00
d8374ae9f2 ff 2025-02-02 16:12:11 +01:00
f262eaa120 ff 2025-02-02 16:10:59 +01:00
d940a6e92a ff 2025-02-02 16:03:09 +01:00
e1395aeca0 tf auto_crop 2025-02-02 16:00:57 +01:00
48841c5750 ff 2025-02-02 13:35:11 +01:00
d558bbf6bd ff 2025-02-02 13:34:06 +01:00
b05d989581 ff 2025-02-02 13:29:50 +01:00
bc8af53525 ff 2025-02-02 13:29:08 +01:00
6bd1587947 ff 2025-02-02 13:27:19 +01:00
7d6531b40e ff 2025-02-02 13:26:28 +01:00
ab435a4c76 ff 2025-02-02 13:24:06 +01:00
0a88e366b1 ff 2025-02-02 13:22:30 +01:00
1c80cd7d7d ff 2025-02-02 13:19:21 +01:00
a45c180aaa ff 2025-02-02 13:17:28 +01:00
0b204ff19c ff 2025-02-02 13:09:59 +01:00
d7ec5f7620 ff 2025-02-02 13:07:35 +01:00
3f64304374 ff 2025-02-02 13:05:28 +01:00
b459272149 ff 2025-02-02 13:02:08 +01:00
4b05fc194b ff 2025-02-02 12:53:55 +01:00
9d088819ab ff 2025-02-02 12:50:56 +01:00
e20f7a1f67 ff 2025-02-02 12:48:07 +01:00
9d683dfa84 tf h264/mkv 2025-02-02 12:46:45 +01:00
867756c661 ff 2025-02-02 12:22:34 +01:00
f81a6edb07 ff 2025-02-02 12:21:38 +01:00
ec4bce473c ff 2025-02-02 12:20:25 +01:00
bf882b741f ff 2025-02-02 12:19:36 +01:00
a4e25b5ec8 ff 2025-02-02 12:16:38 +01:00
ff6bacb0d5 ff 2025-02-02 12:11:26 +01:00
f32b7a06c0 ff 2025-02-02 12:10:56 +01:00
7ceed58e7b add cropdetect stub 2025-02-02 12:10:04 +01:00
153f401dd3 ff 2025-01-17 18:27:29 +01:00
7f1f34fb9f multiplicity iso languages 2025-01-17 18:25:25 +01:00
21fe7cb1eb ff 2025-01-14 22:10:20 +01:00
9e63184524 tf import dubtiles for movies 2025-01-14 22:02:19 +01:00
3742221189 5.0 channel layout 2025-01-14 21:30:04 +01:00
478ac15ab8 ff 2025-01-14 08:06:14 +01:00
ef0a01bc9b ff 2025-01-14 08:04:55 +01:00
802c11be44 ff 2025-01-14 08:00:41 +01:00
4cbb135772 ff 2025-01-14 08:00:14 +01:00
3d52442471 ff 2025-01-14 07:56:35 +01:00
81640192ab ff 2025-01-14 00:45:33 +01:00
81d760aabe ff 2025-01-14 00:44:10 +01:00
c0eff679f7 ff 2025-01-14 00:41:28 +01:00
07097058d7 add mpeg format 2025-01-14 00:30:57 +01:00
cd7a338541 ff 2024-12-31 11:12:11 +01:00
be652f8efb copy only mode 2024-12-31 10:56:17 +01:00
dd51b14d49 ff 2024-12-27 09:10:08 +01:00
a471808392 add mp3 codec 2024-12-27 09:08:59 +01:00
b3da8ce738 add mpeg-4 format 2024-12-18 22:00:57 +01:00
fe0c078c3f ff 2024-12-15 17:16:57 +01:00
962522b974 ff 2024-12-15 17:13:39 +01:00
24367ea08a ff 2024-12-15 17:13:13 +01:00
f0eebd0bea ff 2024-12-15 17:12:45 +01:00
c8e21b9260 modipy ansible role for pypi packaing 2024-12-15 17:02:03 +01:00
cdc1664779 adapt for manjaro 2024-12-15 16:53:36 +01:00
Maveno
2849eda05a perm filter out png thumbnails 2024-11-29 07:08:59 +01:00
Maveno
cfb2df8d66 pf png tracks 2024-11-27 23:40:22 +01:00
Maveno
12c8ad3782 add codec eac3 png 2024-11-25 08:13:02 +01:00
Maveno
74a39a8f9a #433 Descriptor Pattern Checks 2024-11-24 14:01:55 +01:00
Maveno
5eacb0d0cb #411 Input/Output Pfade 2024-11-24 13:21:46 +01:00
Maveno
e8c0c3d646 fix unchanged tracks for external files 2024-11-24 12:56:31 +01:00
Maveno
6b2671a1f5 ff 2024-11-23 18:08:24 +01:00
Maveno
2d8622506e Rework Descriptor Diff 2024-11-23 13:26:44 +01:00
Maveno
86cc7dfc6f nighl 2024-11-20 22:12:40 +01:00
Maveno
d84bee74c4 ff 2024-11-20 19:01:23 +01:00
Maveno
488caa7a08 ff 2024-11-19 07:56:37 +01:00
Maveno
62877dfed6 ff 2024-11-19 07:54:39 +01:00
Maveno
87ff94e204 add codec srt 2024-11-19 07:54:06 +01:00
Maveno
0c78ed7cf7 ff 2024-11-18 21:15:29 +01:00
Maveno
4db9bfd103 ff 2024-11-18 20:57:04 +01:00
Maveno
db7700a6b9 fix #409: Doppelter Show-Eintrag bei ffx inspect 2024-11-18 20:53:15 +01:00
Maveno
222234f978 hf fix tmdb filename filters 2024-11-18 18:37:31 +01:00
Maveno
3672474ff5 ff 2024-11-18 07:53:07 +01:00
Maveno
5ff0fc3fad hf Episodenteil Substitutionen 2024-11-18 07:51:23 +01:00
46 changed files with 3300 additions and 705 deletions

11
.gitignore vendored
View File

@@ -2,9 +2,14 @@ __pycache__
junk/
.vscode
.ipynb_checkpoints/
ansible/inventory/hawaii.yml
ansible/inventory/peppermint.yml
tools/ansible/inventory/hawaii.yml
tools/ansible/inventory/peppermint.yml
tools/ansible/inventory/cappuccino.yml
tools/ansible/inventory/group_vars/all.yml
ffx_test_report.log
bin/conversiontest.py
*.egg-info/
build/
dist/
*.egg-info/
.codex

376
AGENTS.md Normal file
View File

@@ -0,0 +1,376 @@
# AGENTS.md
This file is the entry point for agent guidance in this repository.
It is intentionally generic and reusable across projects. Keep this file focused on non-project-specific constraints, working style, and the structure used to link more detailed guidance.
# Purpose
- Provide a small default rule set for agents working in this repository.
- Keep the base guidance modular and easy to extend.
- Separate reusable agent behavior from project-specific requirements.
# Comment Syntax
- A segment wrapped in `<!--` and `-->` is a comment and must be ignored by agents.
- Use HTML comments for optional guidance that should stay inactive until enabled.
- To enable an optional segment, remove the surrounding `<!--` and `-->` markers.
# Core Principles
- Prefer the simplest solution that satisfies the current goal.
- Keep guidance lightweight: only add detail when it meaningfully improves outcomes.
- Reuse modular guideline files instead of expanding this file indefinitely.
- Treat project-specific documents as the source of truth for project behavior.
- When guidance conflicts, use the most specific applicable document.
# Rule Terms
- A `rule` is the general term for any constraint, requirement, definition, or similar guidance item.
- A `rule set` addresses all rules inside one file that share the same rule set ID.
- Any rule inside a rule set shall use an ID following the schema `RULESET-0001`, `RULESET-0002`, and so on.
- Rules without a rule set ID are also valid, but they are not addressable by rule ID.
# Scope Of This File
This file should contain:
- Generic agent behavior and constraints.
- Rules that are reusable across multiple projects.
- Links to optional guideline modules.
- Links to project-specific requirements.
- Commented optional templates for released-product documentation and agent-output locations.
This file should not contain:
- Project business requirements.
- Project architecture decisions.
- Stack-specific implementation details unless they are universally applicable.
- Task-specific runbooks that belong in dedicated modules.
# Default Agent Behavior
- Read the relevant context before making changes.
- Prefer small, understandable edits over broad refactors.
- Preserve existing patterns unless there is a clear reason to change them.
- Document assumptions when context is missing.
- Ignore HTML comment segments.
- If a more specific enabled guideline exists for the current task, follow it.
# Guideline Structure
Use the following structure for reusable guidance files and project-specific documentation as needed:
```text
/
|-- AGENTS.md
|-- guidance/
| |-- stacks/
| |-- conventions/
| `-- workflows/
|-- prompts/
`-- requirements/
Optional files and directories
|-- SCRATCHPAD.md
|-- docs/
| |-- readme.md
| |-- installation.md
| `-- history.md
|-- process/
| |-- log.md
| `-- coding-handbook.md
```
# Optional Reusable Modules
Add files under `guidance/` only when they are needed.
# Optional Scratchpad
- `SCRATCHPAD.md` is an optional repo-root scratchpad for temporary
information aimed at the next iteration.
- Developers may create or delete `SCRATCHPAD.md` at any time.
- Developers may refer to `SCRATCHPAD.md` as `scratchpad` when giving agents a
source or target for information.
- Agents may read, update, create, or remove the scratchpad when the task
explicitly calls for it.
- Treat the scratchpad as low-formality working context rather than canonical
project truth.
- Use the scratchpad for short-lived notes, open questions, sketches, and
temporary decisions that should be resolved away.
- Move durable outcomes into `requirements/`, `guidance/`, code, tests, or
another long-lived location.
- If `SCRATCHPAD.md` is absent, agents should continue normally.
# Optional Rule Sets
- Optional rule sets may be stored in `guidance/optional/` or in `guidance/{section}/optional/`.
- Optional rule sets are inactive by default and shall only be applied when a prompt explicitly requests them, for example by phrases such as `Apply rules for lean interface iteration in the following steps.` or `Apply LII rules.`
- An optional rule set may be requested by its descriptive name, by its rule set ID, or by another equally clear explicit reference.
- Agents shall never infer or auto-enable optional rule sets from general intent alone.
- If an optional rule or rule set cannot be identified and addressed clearly, agents shall stop and ask before proceeding.
# Prepared Orders
- An `order` is a prepared prompt for one isolated operation rather than a general workflow or standing rule set.
- Orders shall be stored under `prompts/`.
- Order files shall use the naming schema `ORDER-0001-<slug>.md`, `ORDER-0002-<slug>.md`, and so on.
- The canonical order identifier is the `ORDER-0001` style prefix. The trailing slug is descriptive only.
- Recommended internal order file structure is: prompt ID, prompt name, purpose, trigger examples, scope, operation, and expected output.
- Orders shall only be executed when they are explicitly requested by a prompt such as `Execute ORDER-0007.` or `Execute ORDER 7.`
- Agents may accept an unambiguous short numeric reference such as `ORDER 7` as an alias for `ORDER-0007`.
- If an order cannot be identified uniquely and clearly, agents shall stop and ask before proceeding.
# Toolstack Guides
Location:
```text
guidance/stacks/
```
Examples:
- `guidance/stacks/python.md`
- `guidance/stacks/typescript.md`
- `guidance/stacks/docker.md`
- `guidance/stacks/terraform.md`
Use for:
- Language or framework expectations.
- Tooling and environment conventions.
- Build, test, and runtime guidance tied to a specific stack.
# Coding Conventions
Location:
```text
guidance/conventions/
```
Examples:
- `guidance/conventions/naming.md`
- `guidance/conventions/testing.md`
- `guidance/conventions/review.md`
Use for:
- Naming and structure conventions.
- Testing expectations.
- Code review and quality rules.
# Recurring Workflows
Location:
```text
guidance/workflows/
```
Examples:
- `guidance/workflows/feature-delivery.md`
- `guidance/workflows/bugfix.md`
- `guidance/workflows/release.md`
- `guidance/workflows/incident-response.md`
Use for:
- Repeatable task flows.
- Checklists for common delivery work.
- Operational or maintenance procedures.
<!-- Enable this optional section by removing the outer HTML comment markers from this segment
when you want agents to create, update, and consult released-product
documentation in `docs/`.
# Released Product Documentation
Released-product documentation should live outside the generic sections above.
Recommended location:
```text
docs/
```
Examples:
- `docs/readme.md`
- `docs/installation.md`
- `docs/history.md`
Agent rules for docs output:
- Keep content compact but comprehensive.
- Write for end users, operators, or other consumers of the released product.
- Prefer shipped behavior, supported workflows, and stable terminology over
internal implementation detail.
- Keep documentation synchronized with released behavior.
- Update release history when user-visible changes are shipped.
Recommended topics:
- Product overview and intended use.
- Installation, configuration, and upgrade guidance.
- Usage patterns, operational instructions, and support boundaries.
- Compatibility notes, migration notes, and release history.
- Troubleshooting and common pitfalls when relevant. -->
<!-- Enable this optional section by removing the outer HTML comment markers from this
segment when you want agents to produce and consult workflow output in `process/`.
# Agent Output In `process/`
The `process/` directory is primarily for agent output created during
delivery, maintenance, and review work.
Recommended location:
```text
process/
```
Agent rules for process output:
- Use `process/` for agent-produced artifacts rather than released-product
documentation.
- Keep entries concise, traceable, and tied to resulting changes.
- Treat `process/` as workflow output, not as the primary source of product
truth.
- Prefer summaries and rationale over raw transcript dumps unless a workflow
explicitly requires full prompt history.
# Agent Change Log
Location:
```text
process/log.md
```
Use for:
- Capturing prompts given to agents.
- Recording concise explanations of the resulting changes made by agents.
- Preserving task-by-task rationale, decisions, and implementation notes.
# Coding Handbook
Location:
```text
process/coding-handbook.md
```
Use for:
- A tutorial-style handbook that explains the programming components used in
the project.
- Compact but comprehensive technical onboarding material for future
contributors.
- Written explanations that connect code structure, concepts, and
implementation patterns. -->
# Project-Specific Requirements
Project-specific material should live outside the generic sections above.
Recommended location:
```text
requirements/
```
Examples:
- `requirements/project.md`
- `requirements/architecture.md`
- `requirements/decisions.md`
- `requirements/domain.md`
Use for:
- Product and business requirements.
- Project goals and constraints.
- Architecture and design decisions.
- Domain knowledge that is specific to this repository.
# Agent-Level Variables
When present, `requirements/identifiers.yml` is an optional project-specific
input that defines agent-level variables for use inside `requirements/` and
`guidance/`.
Variable schema:
- Use `@{VARIABLE_NAME}` for agent-level variables.
- Prefer uppercase snake case names such as `@{PROJECT_ID}` or `@{VENDOR_ID}`.
- Do not treat `${...}` as an agent-level variable form; that syntax may appear
in Bash or other code and should not be interpreted as agent metadata.
Scope:
- The effective scope of `requirements/identifiers.yml` is limited to
`requirements/` and `guidance/`.
- Definitions from `requirements/identifiers.yml` must not leak into product code.
Defaults:
- Default `@{VENDOR_ID}` is `osgw`.
- Default `@{PROJECT_ID}` is the current repository directory name.
Resolution rules:
- Treat `requirements/identifiers.yml` as optional; when it is absent, agents
may still resolve the defaults defined above.
- If a variable is used in `requirements/` or `guidance/` and it is not
defined in `requirements/identifiers.yml` and does not have a default in this
file, agents may stop and report the undefined variable.
- Prefer updating duplicated identifier values in `requirements/` and
`guidance/` to use the variable schema when that improves consistency.
# Precedence
Some precedence levels may be absent because optional levels can remain inside
HTML comments. The smaller numeric index wins.
Apply guidance in this order:
1. Direct user or task instructions.
2. Project-specific documents in `requirements/`.
<!-- 3. Released-product documentation in `docs/` when shipped behavior or
user-facing expectations are relevant. -->
4. Relevant modular guides in `guidance/stacks/`, `guidance/conventions/`, or `guidance/workflows/`.
<!-- 5. Agent output in `process/` when prior prompts, rationale, or
implementation notes are relevant. -->
6. This `AGENTS.md`.
# Maintenance
- Keep this file short and stable.
- Move detail into dedicated modules when a section becomes too specific or too long.
- Add new guideline files only when they solve a recurring need.
- Remove outdated references when the repository structure changes.
# Current Status
This repository defines the base `AGENTS.md` structure plus project-specific
requirements and modular guidance.
Future project work can add:
- Reusable modules under `guidance/`
- Project-specific documentation under `requirements/`
- Optional temporary iteration context in `SCRATCHPAD.md`
- Optional released-product documentation under `docs/` by uncommenting its segment
- Optional agent output under `process/` by uncommenting its segment
- Cross-references from this file once those documents exist

62
SCRATCHPAD.md Normal file
View File

@@ -0,0 +1,62 @@
<!--
# Scratchpad
Temporary information holder for the next iteration. Developers may create or
delete this file at any time. Anything durable should move into code, tests, or
canonical docs, then this file should disappear.
## Goal
Use this section for the current slice of work. It should explain what the
scratchpad is helping us move forward right now.
## Settled
Use this for decisions that are stable enough to guide the next steps, but are
still temporary enough to live in the scratchpad for now.
## Focused Snapshot
Use an extra section like this only when one slice needs its own compact
summary. This is useful when a specific API, boundary, or model was recently
recreated and should be captured clearly.
## Open
Use this for unresolved questions, design choices, and risks that still need a
decision.
## Sketches
Use this for rough candidate structures, names, or shapes. Keep it explicit
that these are sketches, not committed architecture.
## Gaps Right Now
Use this for concrete missing pieces in the current repo state. This section
should describe what is absent or incomplete, not broad future ambitions.
## Next
Use this for the immediate sequence of work. It should be short, ordered, and
biased toward the next deliverable rather than a long roadmap.
## Delete When
Use this to define when the scratchpad should disappear. That keeps it clearly
temporary and helps prevent it from turning into shadow documentation.
## Suggested Style
- Prefer short bullets over long prose.
- Keep facts, questions, and rough sketches in separate sections.
- Add custom sections only when they help the next iteration move faster.
- Move durable outcomes out of the scratchpad once they stop being temporary.
-->

View File

@@ -0,0 +1,28 @@
# Lean Interface Iteration
Rule set name: `lean-interface-iteration`
Rule set ID: `LII`
Status: optional, prompt-activated only
Trigger examples:
- `Apply the lean-interface-iteration rules.`
- `Apply LII rules.`
LII-0001: Apply this rule set only when it is explicitly requested in the prompt.
LII-0002: The target of work under this rule set is the iterated product state for the addressed iteration only.
LII-0003: Optimize the addressed interface toward the leanest and least complex model that still satisfies the iteration order.
LII-0004: Backward compatibility, legacy aliases, and compatibility shims are not required unless the prompt explicitly asks to preserve them.
LII-0005: Prefer one authoritative interface over multiple overlapping parameters, flags, or naming variants.
LII-0006: Remove or avoid transitional interface layers when they are not required by the addressed iteration order.
LII-0007: Update affected tests, guidance, requirements, and documentation so they describe the simplified interface model rather than a mixed legacy-and-new model.
LII-0008: Never change behavior, interfaces, or surrounding areas that are not addressed by the current iteration order.

View File

@@ -0,0 +1,56 @@
# Preparation Script Design
Rule set name: `preparation-script-design`
Rule set ID: `PSD`
Status: optional, prompt-activated only
Trigger examples:
- `Apply the preparation-script-design rules.`
- `Apply PSD rules.`
PSD-0001: Apply this rule set only when it is explicitly requested in the prompt.
PSD-0002: Use this rule set for scripts whose purpose is to prepare, verify, or expose a local development or automation environment rather than to perform product runtime behavior.
PSD-0003: Keep a preparation script focused on environment readiness, dependency installation, local helper exposure, and clear verification output; do not mix unrelated product logic into the script.
PSD-0004: Design the script to be idempotent so repeated runs converge on the same prepared state without unnecessary reinstallation or destructive side effects.
PSD-0005: Provide a verification-only mode such as `--check` that reports readiness without installing, modifying, or creating dependencies.
PSD-0006: Separate component checks from installation steps so the script can report what is missing before or after attempted remediation.
PSD-0007: Group required capabilities into clear purpose-oriented sections such as support toolchains, local package bundles, generated environment helpers, or other relevant readiness areas instead of presenting one undifferentiated dependency list.
PSD-0008: Prefer explicit per-component check helpers over opaque one-shot checks so failures remain traceable and easy to extend.
PSD-0009: Generate or update environment helper files only when they provide a stable, reusable way to expose repo-local or workspace-local tools, paths, or environment variables.
PSD-0010: Generated environment helper files shall be safe to source multiple times and should avoid duplicating path entries or clobbering unrelated user environment state.
PSD-0011: When a preparation flow seeds optional user-owned files such as config templates, do so non-destructively by creating them only when absent unless the prompt explicitly requests overwrite behavior.
PSD-0012: Report status in a concise scan-friendly line format of the shape `[status] Label: detail`, where the label names the checked component and the detail string stays short and specific.
PSD-0013: Prefer a small canonical status vocabulary in those report lines, with `ok` for satisfied checks, `warn` for non-blocking gaps, and a failure status such as `failed` for blocking or unsuccessful states.
PSD-0014: When a preparation script uses terminal colors in its status output, apply a consistent severity mapping so `ok` is green, `warn` is yellow, and all other status levels are red.
PSD-0015: In bracketed status markers such as `[ok]` or `[warn]`, keep the square brackets uncolored and apply the severity color only to the inner status text.
PSD-0016: Colorized status output shall degrade safely in non-terminal or non-color contexts so the script remains readable and automation-friendly without ANSI support.
PSD-0017: End with an explicit readiness conclusion that distinguishes between successful preparation, incomplete prerequisites, and failed installation attempts.
PSD-0018: Installation logic should use the narrowest supported platform-specific package-manager actions necessary for the declared scope and should fail clearly when no supported installation path is available.
PSD-0019: Treat repo-local helper tooling and local package installation boundaries explicitly rather than assuming global installs, especially when the prepared environment is intended to be reproducible.
PSD-0020: Keep the script suitable for both interactive local developer use and non-interactive automation checks by avoiding prompts during normal execution unless the prompt explicitly requires interactivity.
PSD-0021: When a script depends on generated helper files or adjacent validation helpers, update those supporting files only as needed to keep the preparation flow coherent and usable.
PSD-0022: Verify shell syntax after changes and, when feasible, run a dry readiness check so the resulting preparation flow is validated rather than only written.

View File

@@ -0,0 +1,97 @@
# Architecture
## Architecture Goals
- Keep the tool small, local, and easy to reason about.
- Separate media inspection, stored normalization rules, and conversion execution clearly enough that users can inspect and adjust behavior.
- Favor explicit local state and deterministic rule application over opaque automation.
- Make external runtime dependencies and platform assumptions visible.
## System Context
- Primary actors:
- Local operator running the CLI.
- Local operator using the Textual TUI to inspect files and maintain rules.
- External systems:
- `ffprobe` for media introspection.
- `ffmpeg` for conversion and extraction.
- TMDB API for optional show and episode metadata.
- Local filesystem for source media, generated outputs, subtitles, logs, config, and database files.
- Data entering the system:
- Media container and stream metadata from source files.
- Regex patterns and per-show normalization rules entered in the TUI.
- Optional config values from `~/.local/etc/ffx.json`.
- Optional TMDB identifiers and CLI overrides.
- Optional external subtitle files.
- Data leaving the system:
- Normalized output media files.
- Extracted stream files from unmux operations.
- SQLite rows representing shows, patterns, tracks, tags, shifted seasons, and properties.
- Local log output and console messages.
## High-Level Building Blocks
- Frontend, CLI, API, or worker:
- A Click-based CLI in [`src/ffx/ffx.py`](/home/osgw/.local/src/codex/ffx/src/ffx/ffx.py).
- A Textual terminal UI rooted in [`src/ffx/ffx_app.py`](/home/osgw/.local/src/codex/ffx/src/ffx/ffx_app.py) with screens for shows, patterns, file inspection, tracks, tags, and shifted seasons.
- Core business logic:
- Descriptor objects model media files, shows, and tracks.
- Controllers encapsulate CRUD operations and workflow orchestration for shows, patterns, tags, tracks, season shifts, configuration, and conversion.
- `MediaDescriptorChangeSet` computes differences between a file and its stored target schema to drive metadata and disposition updates.
- Storage:
- SQLite via SQLAlchemy ORM, with schema rooted in shows, patterns, tracks, media tags, track tags, shifted seasons, and generic properties.
- A configuration JSON file supplies optional path, metadata-filtering, and filename-template settings.
- Integration adapters:
- Process execution wrapper for `ffmpeg`, `ffprobe`, `nice`, and `cpulimit`.
- HTTP adapter for TMDB via `requests`.
## Data And Interface Notes
- Key entities or records:
- `Show`: canonical TV show metadata plus digit-formatting rules for generated filenames.
- `Pattern`: regex rule tying filenames to one show and one target media schema.
- `Track` and `TrackTag`: persisted target stream layout, codec, dispositions, audio layout, and stream-level tags.
- `MediaTag`: persisted container-level metadata for a pattern.
- `ShiftedSeason`: mapping from source numbering ranges to adjusted season and episode numbers.
- `Property`: internal key-value storage currently used for database versioning.
- External interfaces:
- CLI commands for conversion, inspection, extraction, and crop detection.
- TUI workflows for rule authoring and rule maintenance.
- Environment variable `TMDB_API_KEY` for TMDB access.
- Config keys `databasePath`, `logDirectory`, and `outputFilenameTemplate`, plus optional metadata-filter rules.
- Validation rules:
- Only supported media-file extensions are accepted for conversion.
- Stored database version must match the runtime-required version.
- A normalized descriptor may have at most one default and one forced stream per relevant track type.
- Stored target tracks must refer to valid source tracks of matching types.
- Shifted-season ranges are intended not to overlap for the same show and season.
- TMDB lookups require a show ID and season and episode numbers.
- Error-handling approach:
- User-facing operational failures are raised as `click.ClickException` or warnings.
- Ambiguous default and forced stream states trigger prompts unless `--no-prompt` is set, in which case the command fails fast.
- External-process failures and invalid media are surfaced through logs and command errors rather than retries, except for TMDB rate-limit retries.
## Deployment And Operations
- Runtime environment:
- Local Python environment with the package installed and `ffmpeg`, `ffprobe`, `nice`, and `cpulimit` available on `PATH`.
- Deployment shape:
- Single-process command execution on demand; no daemon, queue, or network service of its own.
- Secrets and configuration handling:
- TMDB secret is read from `TMDB_API_KEY`.
- User config is read from `~/.local/etc/ffx.json`.
- Database path may also be overridden per command via `--database-file`.
- Logging and monitoring approach:
- File and console logging configured per invocation.
- Default log file path is `~/.local/var/log/ffx.log`.
- No dedicated monitoring integration is present.
## Open Technical Questions
- Question: Should Linux-specific assumptions such as `/dev/null`, `nice`, `cpulimit`, and `~/.local` remain part of the supported-platform contract?
- Risk: Portability and operational behavior are underspecified for non-Linux environments.
- Next decision needed: Either document Linux-like systems as the official support boundary or refactor the process and path handling for broader portability.
- Question: Should placeholder TUI surfaces such as settings and help become part of the required product surface or stay explicitly out of scope?
- Risk: The UI appears broader than the actually finished feature set.
- Next decision needed: Either remove or complete placeholder screens and update requirements accordingly.

101
requirements/project.md Normal file
View File

@@ -0,0 +1,101 @@
## Purpose And Scope
- Project name: FFX
- User problem: TV episode files from mixed sources arrive with inconsistent codecs, stream metadata, subtitle layouts, season and episode numbering, and output filenames, which makes them awkward to archive and use in media-player applications.
- Target users: Individual operators curating a local TV media library on a workstation, especially users willing to define normalization rules per show.
- Success outcome: A user can inspect source files, define reusable show and pattern rules, and produce output files whose streams, metadata, and filenames follow a predictable schema for web playback and library import.
- Out of scope:
- Multi-user or hosted service workflows.
- General movie-library management.
- Distributed transcoding or remote job orchestration.
- Broad media-server administration beyond file preparation.
## Required Product
- Deliverable type: Installable Python command-line application with a Textual terminal UI for inspection and rule editing.
- Core capabilities:
- Maintain an SQLite-backed database of shows, filename-matching patterns, per-pattern stream layouts and metadata tags, and optional season-shift rules.
- Inspect existing media files through `ffprobe` and compare discovered stream metadata with stored normalization rules.
- Convert media files through `ffmpeg` into a normalized output layout, including video recoding, audio transcoding to Opus, metadata cleanup and rewrite, and controlled disposition flags.
- Build output filenames from detected or configured show, season, and episode information, optionally enriched from TMDB and a configurable Jinja-style filename template.
- Support auxiliary file operations such as subtitle import, unmuxing, crop detection, and rename-only runs.
- Supported environments:
- Local execution on a Python-capable workstation.
- Best-supported on Linux-like systems because the implementation assumes `~/.local`, `/dev/null`, `nice`, and `cpulimit`.
- Requires `ffmpeg`, `ffprobe`, and `cpulimit` on `PATH`.
- Operational owner: The local user running the tool and maintaining its config, database, and external tooling.
## Suggested User Stories
- As a library maintainer, I want to define show-specific matching rules once so that future source files can be normalized automatically.
- As an operator, I want to inspect a file before conversion so that I can compare its actual streams and tags against the stored target schema.
- As a user preparing web-playback files, I want to recode video and audio with a small set of predictable options so that results are compatible and consistently named.
- As a user dealing with nonstandard releases, I want CLI overrides for language, title, stream order, default and forced tracks, and season and episode data so that one-off fixes do not require database edits first.
- As a user importing anime or other shifted numbering schemes, I want season and episode offsets per show so that generated filenames align with TMDB and media-library expectations.
## Functional Requirements
- The system shall provide a CLI entrypoint named `ffx` with commands for `convert`, `inspect`, `shows`, `unmux`, `cropdetect`, `version`, and `help`.
- The system shall persist reusable normalization rules in SQLite for:
- shows and show formatting digits,
- regex-based filename patterns,
- per-pattern media tags,
- per-pattern stream definitions,
- shifted-season mappings,
- internal database version properties.
- The system shall inspect source media using `ffprobe` and derive a structured description of container metadata and streams.
- The system shall optionally open a Textual UI to browse shows, inspect files, and create, edit, or delete shows, patterns, stream definitions, tags, and shifted-season rules.
- The system shall match filenames against stored regex patterns to decide whether an input file should inherit a target stream and metadata schema.
- The system shall convert supported input files (`mkv`, `mp4`, `avi`, `flv`, `webm`) with `ffmpeg`, supporting at least:
- VP9, AV1, and H.264 video encoding,
- Opus audio encoding with bitrate selection based on channel layout,
- metadata and disposition rewriting,
- optional crop detection and crop application,
- optional deinterlacing and denoising,
- optional subtitle import from external files,
- rename-only copy mode.
- The system shall support optional TMDB lookups to resolve show names, years, and episode titles when a show ID, season, and episode are available.
- The system shall generate output filenames from show metadata, season and episode indices, and episode names using the configured filename template.
- The system shall allow CLI overrides for stream languages, stream titles, default and forced tracks, stream order, TMDB show and episode data, output directory, label prefix, and processing resource limits.
- The system shall support extracting streams into separate files via `unmux` and reporting suggested crop parameters via `cropdetect`.
- The system shall handle invalid input and system failures gracefully by logging warnings or raising `click` errors for missing files, invalid media, missing TMDB credentials, incompatible database versions, and ambiguous track dispositions when prompting is disabled.
## Quality Requirements
- The system should stay understandable as a small local tool: controllers, descriptors, models, and screens should remain separate enough for contributors to trace a workflow end to end.
- The system should produce predictable output for the same database rules, CLI overrides, and source files.
- The system should preserve a lightweight operational footprint: local SQLite state, local log file, no mandatory background services.
- The system should be testable through the existing combinatorial CLI-oriented test harness and through isolated logic in descriptors and controllers.
- The system should expose enough logging to diagnose failed probes, failed conversions, and rule mismatches without requiring a debugger.
## Constraints And Assumptions
- Technology constraints:
- Python package built with setuptools.
- Primary libraries: `click`, `textual`, `sqlalchemy`, `jinja2`, `requests`.
- Conversion and inspection rely on external executables rather than pure-Python media libraries.
- Hosting or infrastructure constraints:
- Intended for local execution, not server deployment.
- Stores default state in `~/.local/etc/ffx.json`, `~/.local/var/ffx/ffx.db`, and `~/.local/var/log/ffx.log`.
- Timeline constraints:
- The current implemented scope reflects a compact alpha release stream up to version `0.2.3`.
- Team capacity assumptions:
- Maintained as a small codebase where simple patterns and direct controller logic are preferred over framework-heavy abstractions.
- Third-party dependencies:
- `ffmpeg`, `ffprobe`, and `cpulimit`.
- TMDB API access through `TMDB_API_KEY` for metadata enrichment.
## Acceptance Scope
- First release boundary:
- Local installation through `pip`.
- Working SQLite-backed rule storage.
- Functional CLI conversion and inspection workflows.
- Textual CRUD flows for shows, patterns, tags, tracks, and shifted seasons.
- TMDB-assisted filename generation, subtitle import, season shifting, database versioning, and configurable output filename templating.
- Excluded follow-up ideas:
- Completing placeholder screens such as settings and help.
- Hardening platform portability beyond Linux-like systems.
- Broader media types, richer release packaging, and production-grade background processing.
- Demonstration scenario:
- Inspect a TV episode file, define or update the matching show and pattern in the TUI, then run `ffx convert` so the result uses the stored stream schema, optional TMDB episode naming, and a normalized output filename.

View File

@@ -9,6 +9,7 @@ class AudioLayout(Enum):
LAYOUT_7_1 = {"label": "7.1", "index": 4} #TODO: Does this exist?
LAYOUT_6CH = {"label": "6ch", "index": 5}
LAYOUT_5_0 = {"label": "5.0(side)", "index": 6}
LAYOUT_UNDEFINED = {"label": "undefined", "index": 0}
@@ -29,6 +30,15 @@ class AudioLayout(Enum):
except:
return AudioLayout.LAYOUT_UNDEFINED
# @staticmethod
# def fromIndex(index : int):
# try:
# target_index = int(index)
# except (TypeError, ValueError):
# return AudioLayout.LAYOUT_UNDEFINED
# return next((a for a in AudioLayout if a.value['index'] == target_index),
# AudioLayout.LAYOUT_UNDEFINED)
@staticmethod
def fromIndex(index : int):
try:

View File

@@ -9,7 +9,7 @@ DEFAULT_AC3_BANDWIDTH = "256"
DEFAULT_DTS_BANDWIDTH = "320"
DEFAULT_7_1_BANDWIDTH = "384"
DEFAULT_CROP_START = 60
DEFAULT_CROP_LENGTH = 180
DEFAULT_cut_start = 60
DEFAULT_cut_length = 180
DEFAULT_OUTPUT_FILENAME_TEMPLATE = '{{ ffx_show_name }} - {{ ffx_index }}{{ ffx_index_separator }}{{ ffx_episode_name }}{{ ffx_indicator_separator }}{{ ffx_indicator }}'

View File

@@ -1,6 +1,6 @@
#! /usr/bin/python3
import os, click, time, logging
import os, click, time, logging, shutil, subprocess
from ffx.configuration_controller import ConfigurationController
@@ -22,7 +22,7 @@ from ffx.track_disposition import TrackDisposition
from ffx.track_codec import TrackCodec
from ffx.process import executeProcess
from ffx.helper import filterFilename
from ffx.helper import filterFilename, substituteTmdbFilename
from ffx.helper import getEpisodeFileBasename
from ffx.constants import DEFAULT_STEREO_BANDWIDTH, DEFAULT_AC3_BANDWIDTH, DEFAULT_DTS_BANDWIDTH, DEFAULT_7_1_BANDWIDTH
@@ -30,7 +30,9 @@ from ffx.constants import DEFAULT_STEREO_BANDWIDTH, DEFAULT_AC3_BANDWIDTH, DEFAU
from ffx.filter.quality_filter import QualityFilter
from ffx.filter.preset_filter import PresetFilter
from ffx.filter.crop_filter import CropFilter
from ffx.filter.nlmeans_filter import NlmeansFilter
from ffx.filter.deinterlace_filter import DeinterlaceFilter
from ffx.constants import VERSION
@@ -47,6 +49,11 @@ def ffx(ctx, database_file, verbose, dry_run):
ctx.obj = {}
if ctx.invoked_subcommand in ('setup_dependencies', 'upgrade'):
ctx.obj['dry_run'] = dry_run
ctx.obj['verbosity'] = verbose
return
ctx.obj['config'] = ConfigurationController()
ctx.obj['database'] = databaseContext(databasePath=database_file
@@ -95,6 +102,82 @@ def help():
click.echo(f"Usage: ffx [input file] [output file] [vp9|av1] [q=[nn[,nn,...]]] [p=nn] [a=nnn[k]] [ac3=nnn[k]] [dts=nnn[k]] [crop]")
def getRepoRootPath():
currentFilePath = os.path.abspath(__file__)
return os.path.dirname(os.path.dirname(os.path.dirname(currentFilePath)))
def getPrepareScriptPath():
return os.path.join(getRepoRootPath(), 'tools', 'prepare.sh')
def getBundleVenvDirectory():
return os.path.join(os.path.expanduser('~'), '.local', 'share', 'ffx.venv')
def getBundlePipPath():
return os.path.join(getBundleVenvDirectory(), 'bin', 'pip')
def getBundleRepoPath():
return getRepoRootPath()
@ffx.command(name='setup_dependencies')
@click.pass_context
@click.option('--check', is_flag=True, default=False, help='Only verify dependency readiness')
@click.argument('prepare_args', nargs=-1, type=click.UNPROCESSED)
def setup_dependencies(ctx, check, prepare_args):
prepareScriptPath = getPrepareScriptPath()
if not os.path.isfile(prepareScriptPath):
raise click.ClickException(f"Preparation script not found at {prepareScriptPath}")
commandSequence = ['bash', prepareScriptPath]
if check:
commandSequence.append('--check')
commandSequence += list(prepare_args)
if ctx.obj.get('dry_run', False):
click.echo(' '.join(commandSequence))
return
completed = subprocess.run(commandSequence)
ctx.exit(completed.returncode)
@ffx.command(name='upgrade')
@click.pass_context
@click.argument('branch', required=False, default='main')
def upgrade(ctx, branch):
bundleRepoPath = getBundleRepoPath()
bundlePipPath = getBundlePipPath()
if not os.path.isdir(bundleRepoPath):
raise click.ClickException(f"Bundle repository not found at {bundleRepoPath}")
if not os.path.isfile(bundlePipPath):
raise click.ClickException(f"Bundle pip not found at {bundlePipPath}")
commandSequences = [
['git', 'checkout', branch],
['git', 'pull'],
[bundlePipPath, 'install', '--editable', '.'],
]
if ctx.obj.get('dry_run', False):
for commandSequence in commandSequences:
click.echo(f"(cd {bundleRepoPath} && {' '.join(commandSequence)})")
return
for commandSequence in commandSequences:
completed = subprocess.run(commandSequence, cwd=bundleRepoPath)
if completed.returncode != 0:
ctx.exit(completed.returncode)
@ffx.command()
@click.pass_context
@click.argument('filename', nargs=1)
@@ -181,11 +264,12 @@ def unmux(ctx,
else:
ctx.obj['logger'].info(f"\nUnmuxing file {fp.getFilename()}\n")
for trackDescriptor in sourceMediaDescriptor.getAllTrackDescriptors():
# for trackDescriptor in sourceMediaDescriptor.getAllTrackDescriptors():
for trackDescriptor in sourceMediaDescriptor.getTrackDescriptors():
if trackDescriptor.getType() == TrackType.SUBTITLE or not subtitles_only:
# SEASON_EPISODE_STREAM_LANGUAGE_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
# SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
targetPrefix = f"{targetLabel}{targetIndicator}_{trackDescriptor.getIndex()}_{trackDescriptor.getLanguage().threeLetter()}"
td: TrackDisposition
@@ -211,6 +295,38 @@ def unmux(ctx,
ctx.obj['logger'].warning(f"Skipping File {sourcePath} ({ex})")
@ffx.command()
@click.pass_context
@click.argument('paths', nargs=-1)
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
def cropdetect(ctx,
paths,
nice,
cpu):
existingSourcePaths = [p for p in paths if os.path.isfile(p)]
ctx.obj['logger'].debug(f"\nUnmuxing {len(existingSourcePaths)} files")
ctx.obj['resource_limits'] = {}
ctx.obj['resource_limits']['niceness'] = nice
ctx.obj['resource_limits']['cpu_percent'] = cpu
for sourcePath in existingSourcePaths:
try:
fp = FileProperties(ctx.obj, sourcePath)
cropParams = fp.findCropParams()
click.echo(cropParams)
except Exception as ex:
ctx.obj['logger'].warning(f"Skipping File {sourcePath} ({ex})")
@ffx.command()
@click.pass_context
@@ -272,9 +388,9 @@ def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
@click.option('-v', '--video-encoder', type=str, default=FfxController.DEFAULT_VIDEO_ENCODER, help=f"Target video encoder (vp9 or av1)", show_default=True)
@click.option('-v', '--video-encoder', type=str, default=FfxController.DEFAULT_VIDEO_ENCODER, help=f"Target video encoder (vp9, av1 or h264)", show_default=True)
@click.option('-q', '--quality', type=str, default="", help=f"Quality settings to be used with VP9 encoder")
@click.option('-q', '--quality', type=str, default="", help=f"Quality settings to be used with VP9/H264 encoder")
@click.option('-p', '--preset', type=str, default="", help=f"Quality preset to be used with AV1 encoder")
@click.option('-a', '--stereo-bitrate', type=int, default=DEFAULT_STEREO_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode stereo audio streams", show_default=True)
@@ -296,10 +412,13 @@ def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
@click.option('--rearrange-streams', type=str, default="", help='Rearrange output streams order. Use format comma separated integers')
@click.option("--crop", is_flag=False, flag_value="default", default="none")
@click.option("--crop", is_flag=False, flag_value="auto", default="none")
@click.option("--cut", is_flag=False, flag_value="default", default="none")
@click.option("--output-directory", type=str, default='')
@click.option("--deinterlace", is_flag=False, flag_value="default", default="none")
@click.option("--denoise", is_flag=False, flag_value="default", default="none")
@click.option("--denoise-use-hw", is_flag=True, default=False)
@click.option('--denoise-strength', type=str, default='', help='Denoising strength, more blurring vs more details.')
@@ -324,6 +443,8 @@ def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
@click.option('--rename-only', is_flag=True, default=False, help='Only renaming, no recoding')
def convert(ctx,
paths,
label,
@@ -350,8 +471,12 @@ def convert(ctx,
rearrange_streams,
crop,
cut,
output_directory,
deinterlace,
denoise,
denoise_use_hw,
denoise_strength,
@@ -372,7 +497,8 @@ def convert(ctx,
keep_mkvmerge_metadata,
nice,
cpu):
cpu,
rename_only):
"""Batch conversion of audiovideo files in format suitable for web playback, e.g. jellyfin
Files found under PATHS will be converted according to parameters.
@@ -381,13 +507,14 @@ def convert(ctx,
or if the filename has not changed."""
startTime = time.perf_counter()
context = ctx.obj
context['video_encoder'] = VideoEncoder.fromLabel(video_encoder)
targetFormat = FfxController.DEFAULT_FILE_FORMAT
targetExtension = FfxController.DEFAULT_FILE_EXTENSION
#HINT: quick and dirty override for h264, todo improve
targetFormat = '' if context['video_encoder'] == VideoEncoder.H264 else FfxController.DEFAULT_FILE_FORMAT
targetExtension = 'mkv' if context['video_encoder'] == VideoEncoder.H264 else FfxController.DEFAULT_FILE_EXTENSION
context['use_tmdb'] = not no_tmdb
context['use_pattern'] = not no_pattern
@@ -476,14 +603,6 @@ def convert(ctx,
ctx.obj['logger'].debug(f"\nVideo encoder: {video_encoder}")
qualityTokens = quality.split(',')
q_list = [q for q in qualityTokens if q.isnumeric()]
ctx.obj['logger'].debug(f"Qualities: {q_list}")
presetTokens = preset.split(',')
p_list = [p for p in presetTokens if p.isnumeric()]
ctx.obj['logger'].debug(f"Presets: {p_list}")
context['bitrates'] = {}
context['bitrates']['stereo'] = str(stereo_bitrate) if str(stereo_bitrate).endswith('k') else f"{stereo_bitrate}k"
@@ -494,26 +613,35 @@ def convert(ctx,
ctx.obj['logger'].debug(f"AC3 bitrate: {context['bitrates']['ac3']}")
ctx.obj['logger'].debug(f"DTS bitrate: {context['bitrates']['dts']}")
# Process crop parameters
context['perform_crop'] = (crop != 'none')
if context['perform_crop']:
cTokens = crop.split(',')
if cTokens and len(cTokens) == 2:
context['crop_start'] = int(cTokens[0])
context['crop_length'] = int(cTokens[1])
ctx.obj['logger'].debug(f"Crop start={context['crop_start']} length={context['crop_length']}")
#->
# Process cut parameters
context['perform_cut'] = (cut != 'none')
if context['perform_cut']:
cutTokens = cut.split(',')
if cutTokens and len(cutTokens) == 2:
context['cut_start'] = int(cutTokens[0])
context['cut_length'] = int(cutTokens[1])
ctx.obj['logger'].debug(f"Cut start={context['cut_start']} length={context['cut_length']}")
tc = TmdbController() if context['use_tmdb'] else None
qualityKwargs = {QualityFilter.QUALITY_KEY: quality}
qualityKwargs = {QualityFilter.QUALITY_KEY: str(quality)}
qf = QualityFilter(**qualityKwargs)
if context['video_encoder'] == VideoEncoder.AV1 and preset:
presetKwargs = {PresetFilter.PRESET_KEY: preset}
PresetFilter(**presetKwargs)
cf = None
# if crop != 'none':
if crop == 'auto':
cropKwargs = {}
cf = CropFilter(**cropKwargs)
denoiseKwargs = {}
if denoise_strength:
denoiseKwargs[NlmeansFilter.STRENGTH_KEY] = denoise_strength
@@ -528,6 +656,9 @@ def convert(ctx,
if denoise != 'none' or denoiseKwargs:
NlmeansFilter(**denoiseKwargs)
if deinterlace != 'none':
DeinterlaceFilter()
chainYield = list(qf.getChainYield())
ctx.obj['logger'].info(f"\nRunning {len(existingSourcePaths) * len(chainYield)} jobs")
@@ -548,8 +679,19 @@ def convert(ctx,
targetSuffices = {}
mediaFileProperties = FileProperties(context, sourcePath)
mediaFileProperties = FileProperties(context, sourceFilename)
# if not cf is None:
#
cropArguments = {} if cf is None else mediaFileProperties.findCropArguments()
#
# ctx.obj['logger'].info(f"\nSetting crop arguments: ouput width: {cropArguments[CropFilter.OUTPUT_WIDTH_KEY]} "
# + f"height: {cropArguments[CropFilter.OUTPUT_HEIGHT_KEY]} "
# + f"offset x: {cropArguments[CropFilter.OFFSET_X_KEY]} "
# + f"y: {cropArguments[CropFilter.OFFSET_Y_KEY]}")
#
# cf.setArguments(**cropArguments)
ssc = ShiftedSeasonController(context)
@@ -572,6 +714,16 @@ def convert(ctx,
sourceMediaDescriptor = mediaFileProperties.getMediaDescriptor()
if ([smd for smd in sourceMediaDescriptor.getSubtitleTracks()
if smd.getCodec() == TrackCodec.ASS]
and [amd for amd in sourceMediaDescriptor.getAttachmentTracks()
if amd.getCodec() == TrackCodec.TTF]):
targetFormat = ''
targetExtension = 'mkv'
#HINT: This is None if the filename did not match anything in database
currentPattern = mediaFileProperties.getPattern() if context['use_pattern'] else None
@@ -599,18 +751,43 @@ def convert(ctx,
checkUniqueDispositions(context, targetMediaDescriptor)
currentShowDescriptor = currentPattern.getShowDescriptor(ctx.obj)
# Check if source and target track descriptors match
sourceTrackDescriptorList = sourceMediaDescriptor.getTrackDescriptors()
targetTrackDescriptorList = targetMediaDescriptor.getTrackDescriptors()
for ttd in targetTrackDescriptorList:
tti = ttd.getIndex()
ttsi = ttd.getSourceIndex()
stList = [st for st in sourceTrackDescriptorList if st.getIndex() == ttsi]
std = stList[0] if stList else None
if std is None:
raise click.ClickException(f"Target track #{tti} refering to non-existent source track #{ttsi}")
ttType = ttd.getType()
stType = std.getType()
if ttType != stType:
raise click.ClickException(f"Target track #{tti} type ({ttType.label()}) not matching source track #{ttsi} type ({stType.label()})")
if context['import_subtitles']:
targetMediaDescriptor.importSubtitles(context['subtitle_directory'],
context['subtitle_prefix'],
showSeason,
showEpisode)
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
# ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getTrackDescriptors()]}")
if cliOverrides:
targetMediaDescriptor.applyOverrides(cliOverrides)
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
# ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getTrackDescriptors()]}")
ctx.obj['logger'].debug(f"Input mapping tokens (2nd pass): {targetMediaDescriptor.getInputMappingTokens()}")
@@ -653,9 +830,9 @@ def convert(ctx,
ctx.obj['logger'].debug(f"tmdbEpisodeResult={tmdbEpisodeResult}")
if tmdbEpisodeResult:
filteredEpisodeName = filterFilename(tmdbEpisodeResult['name'])
substitutedEpisodeName = filterFilename(substituteTmdbFilename(tmdbEpisodeResult['name']))
sourceFileBasename = getEpisodeFileBasename(showFilenamePrefix,
filteredEpisodeName,
substitutedEpisodeName,
shiftedShowSeason,
shiftedShowEpisode,
indexSeasonDigits,
@@ -678,12 +855,8 @@ def convert(ctx,
for chainIteration in chainYield:
ctx.obj['logger'].debug(f"\nchain iteration: {chainIteration}\n")
# if len(q_list) > 1:
# targetSuffices['q'] = f"q{q}"
chainVariant = '-'.join([fy['variant'] for fy in chainIteration])
ctx.obj['logger'].debug(f"\nRunning job {jobIndex} file={sourcePath} variant={chainVariant}")
@@ -692,10 +865,10 @@ def convert(ctx,
ctx.obj['logger'].debug(f"label={label if label else 'Falsy'}")
ctx.obj['logger'].debug(f"sourceFileBasename={sourceFileBasename}")
# targetFileBasename = mediaFileProperties.assembleTargetFileBasename(label,
# q if len(q_list) > 1 else -1,
#
targetFileBasename = sourceFileBasename if context['use_tmdb'] and not label else label
# targetFileBasename = sourceFileBasename if context['use_tmdb'] and not label else label
targetFileBasename = (label or sourceFileBasename) if context['use_tmdb'] else sourceFileBasename
targetFilenameTokens = [targetFileBasename]
@@ -703,34 +876,31 @@ def convert(ctx,
if 'se' in targetSuffices.keys():
targetFilenameTokens += [targetSuffices['se']]
# if 'q' in targetSuffices.keys():
# targetFilenameTokens += [targetSuffices['q']]
for filterYield in chainIteration:
# filterIdentifier = filterYield['identifier']
# filterParameters = filterYield['parameters']
# filterSuffices = filterYield['suffices']
targetFilenameTokens += filterYield['suffices']
#TODO #387
# targetFilename = ((f"{sourceFileBasename}_q{q}" if len(q_list) > 1 else sourceFileBasename)
# if context['use_tmdb'] else targetFileBasename)
targetFilename = f"{'_'.join(targetFilenameTokens)}.{sourceFilenameExtension if rename_only else targetExtension}"
targetFilename = f"{'_'.join(targetFilenameTokens)}.{targetExtension}"
if sourceFilename == targetFilename:
targetFilename = f"out_{targetFilename}"
targetPath = os.path.join(output_directory if output_directory else sourceDirectory, targetFilename)
#TODO: target extension anpassen
targetPath = os.path.join(output_directory, targetFilename) if output_directory else targetFilename
ctx.obj['logger'].info(f"Creating file {targetFilename}")
fc.runJob(sourcePath,
targetPath,
targetFormat,
context['video_encoder'],
chainIteration)
#TODO: click.confirm('Warning! This file is not compliant to the defined source schema! Do you want to continue?', abort=True)
if rename_only:
shutil.copyfile(sourcePath, targetPath)
else:
fc.runJob(sourcePath,
targetPath,
targetFormat,
chainIteration,
cropArguments,
currentPattern)
endTime = time.perf_counter()
ctx.obj['logger'].info(f"\nDONE\nTime elapsed {endTime - startTime}")

View File

@@ -1,18 +1,22 @@
import os, click
from logging import Logger
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
from ffx.media_descriptor import MediaDescriptor
from ffx.track_descriptor import TrackDescriptor
from ffx.audio_layout import AudioLayout
from ffx.track_type import TrackType
from ffx.track_codec import TrackCodec
from ffx.video_encoder import VideoEncoder
from ffx.process import executeProcess
from ffx.track_disposition import TrackDisposition
from ffx.track_codec import TrackCodec
from ffx.constants import DEFAULT_CROP_START, DEFAULT_CROP_LENGTH
from ffx.constants import DEFAULT_cut_start, DEFAULT_cut_length
from ffx.filter.quality_filter import QualityFilter
from ffx.filter.preset_filter import PresetFilter
from ffx.filter.crop_filter import CropFilter
from ffx.model.pattern import Pattern
class FfxController():
@@ -31,8 +35,7 @@ class FfxController():
CHANNEL_MAP_5_1 = 'FL-FL|FR-FR|FC-FC|LFE-LFE|SL-BL|SR-BR:5.1'
#!
SIGNATURE_TAGS = {'RECODED_WITH': 'FFX'}
# SIGNATURE_TAGS = {'RECODED_WITH': 'FFX'}
def __init__(self,
context : dict,
@@ -40,12 +43,15 @@ class FfxController():
sourceMediaDescriptor : MediaDescriptor = None):
self.__context = context
self.__sourceMediaDescriptor = sourceMediaDescriptor
self.__targetMediaDescriptor = targetMediaDescriptor
self.__sourceMediaDescriptor = sourceMediaDescriptor
self.__configurationData = self.__context['config'].getData()
self.__mdcs = MediaDescriptorChangeSet(context,
targetMediaDescriptor,
sourceMediaDescriptor)
self.__logger = context['logger']
self.__logger: Logger = context['logger']
def generateAV1Tokens(self, quality, preset, subIndex : int = 0):
@@ -55,6 +61,14 @@ class FfxController():
'-pix_fmt', 'yuv420p10le']
# -c:v libx264 -preset slow -crf 17
def generateH264Tokens(self, quality, subIndex : int = 0):
return [f"-c:v:{int(subIndex)}", 'libx264',
"-preset", "slow",
'-crf', str(quality)]
# -c:v:0 libvpx-vp9 -row-mt 1 -crf 32 -pass 1 -speed 4 -frame-parallel 0 -g 9999 -aq-mode 0
def generateVP9Pass1Tokens(self, quality, subIndex : int = 0):
@@ -82,21 +96,28 @@ class FfxController():
'-auto-alt-ref', '1',
'-lag-in-frames', '25']
def generateVideoCopyTokens(self, subIndex):
return [f"-c:v:{int(subIndex)}",
'copy']
def generateCropTokens(self):
if 'crop_start' in self.__context.keys() and 'crop_length' in self.__context.keys():
cropStart = int(self.__context['crop_start'])
cropLength = int(self.__context['crop_length'])
if 'cut_start' in self.__context.keys() and 'cut_length' in self.__context.keys():
cropStart = int(self.__context['cut_start'])
cropLength = int(self.__context['cut_length'])
else:
cropStart = DEFAULT_CROP_START
cropLength = DEFAULT_CROP_LENGTH
cropStart = DEFAULT_cut_start
cropLength = DEFAULT_cut_length
return ['-ss', str(cropStart), '-t', str(cropLength)]
def generateOutputTokens(self, filePathBase, format = '', ext = ''):
outputFilePath = f"{filePathBase}{'.'+str(ext) if ext else ''}"
self.__logger.debug(f"FfxController.generateOutputTokens(): base='{filePathBase}' format='{format}' ext='{ext}'")
outputFilePath = f"{filePathBase}{('.'+str(ext)) if ext else ''}"
if format:
return ['-f', format, outputFilePath]
else:
@@ -108,7 +129,8 @@ class FfxController():
audioTokens = []
targetAudioTrackDescriptors = [td for td in self.__targetMediaDescriptor.getAllTrackDescriptors() if td.getType() == TrackType.AUDIO]
# targetAudioTrackDescriptors = [td for td in self.__targetMediaDescriptor.getAllTrackDescriptors() if td.getType() == TrackType.AUDIO]
targetAudioTrackDescriptors = self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.AUDIO)
trackSubIndex = 0
for trackDescriptor in targetAudioTrackDescriptors:
@@ -144,115 +166,74 @@ class FfxController():
f"channelmap={FfxController.CHANNEL_MAP_5_1}",
f"-b:a:{trackSubIndex}",
self.__context['bitrates']['ac3']]
# -ac 5 ?
if trackAudioLayout == AudioLayout.LAYOUT_5_0:
audioTokens += [f"-c:a:{trackSubIndex}",
'libopus',
f"-filter:a:{trackSubIndex}",
'channelmap=channel_layout=5.0',
f"-b:a:{trackSubIndex}",
self.__context['bitrates']['ac3']]
trackSubIndex += 1
return audioTokens
# -disposition:s:0 default -disposition:s:1 0
def generateDispositionTokens(self):
targetTrackDescriptors = self.__targetMediaDescriptor.getAllTrackDescriptors()
sourceTrackDescriptors = ([] if self.__sourceMediaDescriptor is None
else self.__sourceMediaDescriptor.getAllTrackDescriptors())
dispositionTokens = []
for trackIndex in range(len(targetTrackDescriptors)):
td = targetTrackDescriptors[trackIndex]
#HINT: No dispositions for pgs subtitle tracks that have no external file source
if (td.getExternalSourceFilePath()
or td.getCodec() != TrackCodec.PGS):
subIndex = td.getSubIndex()
streamIndicator = td.getType().indicator()
sourceDispositionSet = sourceTrackDescriptors[td.getSourceIndex()].getDispositionSet() if sourceTrackDescriptors else set()
#TODO: Alles discarden was im targetDescriptor vorhanden ist (?)
sourceDispositionSet.discard(TrackDisposition.DEFAULT)
dispositionSet = td.getDispositionSet() | sourceDispositionSet
if dispositionSet:
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in dispositionSet])]
else:
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
return dispositionTokens
def generateMetadataTokens(self):
metadataTokens = []
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
removeTrackKeys = metadataConfiguration['streams']['remove'] if 'streams' in metadataConfiguration.keys() and 'remove' in metadataConfiguration['streams'].keys() else []
mediaTags = {k:v for k,v in self.__targetMediaDescriptor.getTags().items() if not k in removeGlobalKeys}
if (not 'no_signature' in self.__context.keys()
or not self.__context['no_signature']):
outputMediaTags = mediaTags | signatureTags
else:
outputMediaTags = mediaTags
for tagKey, tagValue in outputMediaTags.items():
metadataTokens += [f"-metadata:g",
f"{tagKey}={tagValue}"]
for removeKey in removeGlobalKeys:
metadataTokens += [f"-metadata:g",
f"{removeKey}="]
removeMkvmergeMetadata = (not 'keep_mkvmerge_metadata' in self.__context.keys()
or not self.__context['keep_mkvmerge_metadata'])
#HINT: With current ffmpeg version track metadata tags are not passed to the outfile
for td in self.__targetMediaDescriptor.getAllTrackDescriptors():
typeIndicator = td.getType().indicator()
subIndex = td.getSubIndex()
for tagKey, tagValue in td.getTags().items():
if not tagKey in removeTrackKeys:
metadataTokens += [f"-metadata:s:{typeIndicator}:{subIndex}",
f"{tagKey}={tagValue}"]
for removeKey in removeTrackKeys:
metadataTokens += [f"-metadata:s:{typeIndicator}:{subIndex}",
f"{removeKey}="]
return metadataTokens
def runJob(self,
sourcePath,
targetPath,
targetFormat: str = '',
videoEncoder: VideoEncoder = VideoEncoder.VP9,
chainIteration: list = []):
chainIteration: list = [],
cropArguments: dict = {},
currentPattern: Pattern = None):
# quality: int = DEFAULT_QUALITY,
# preset: int = DEFAULT_AV1_PRESET):
videoEncoder: VideoEncoder = self.__context.get('video_encoder', VideoEncoder.VP9)
qualityFilters = [fy for fy in chainIteration if fy['identifier'] == 'quality']
presetFilters = [fy for fy in chainIteration if fy['identifier'] == 'preset']
denoiseFilters = [fy for fy in chainIteration if fy['identifier'] == 'nlmeans']
quality = qualityFilters[0]['parameters']['quality'] if qualityFilters else QualityFilter.DEFAULT_QUALITY
cropFilters = [fy for fy in chainIteration if fy['identifier'] == 'crop']
denoiseFilters = [fy for fy in chainIteration if fy['identifier'] == 'nlmeans']
deinterlaceFilters = [fy for fy in chainIteration if fy['identifier'] == 'bwdif']
if qualityFilters and (quality := qualityFilters[0]['parameters']['quality']):
self.__logger.info(f"Setting quality {quality} from command line parameter")
elif (quality := currentPattern.quality):
self.__logger.info(f"Setting quality {quality} from pattern default")
else:
quality = (QualityFilter.DEFAULT_H264_QUALITY
if (videoEncoder == VideoEncoder.H264)
else QualityFilter.DEFAULT_VP9_QUALITY)
self.__logger.info(f"Setting quality {quality} from default")
preset = presetFilters[0]['parameters']['preset'] if presetFilters else PresetFilter.DEFAULT_PRESET
denoiseTokens = denoiseFilters[0]['tokens'] if denoiseFilters else []
filterParamTokens = []
if cropArguments:
cropParams = (f"crop="
+ f"{cropArguments[CropFilter.OUTPUT_WIDTH_KEY]}"
+ f":{cropArguments[CropFilter.OUTPUT_HEIGHT_KEY]}"
+ f":{cropArguments[CropFilter.OFFSET_X_KEY]}"
+ f":{cropArguments[CropFilter.OFFSET_Y_KEY]}")
filterParamTokens.append(cropParams)
filterParamTokens.extend(denoiseFilters[0]['tokens'] if denoiseFilters else [])
filterParamTokens.extend(deinterlaceFilters[0]['tokens'] if deinterlaceFilters else [])
deinterlaceFilters
filterTokens = ['-vf', ', '.join(filterParamTokens)] if filterParamTokens else []
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
@@ -261,19 +242,22 @@ class FfxController():
commandSequence = (commandTokens
+ self.__targetMediaDescriptor.getImportFileTokens()
+ self.__targetMediaDescriptor.getInputMappingTokens()
+ self.generateDispositionTokens())
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
+ self.__mdcs.generateDispositionTokens())
# Optional tokens
commandSequence += self.generateMetadataTokens()
commandSequence += denoiseTokens
commandSequence += self.__mdcs.generateMetadataTokens()
commandSequence += filterTokens
commandSequence += (self.generateAudioEncodingTokens()
+ self.generateAV1Tokens(int(quality), int(preset))
+ self.generateAudioEncodingTokens())
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec != TrackCodec.PNG:
commandSequence += self.generateAV1Tokens(int(quality), int(preset))
if self.__context['perform_crop']:
commandSequence += FfxController.generateCropTokens()
commandSequence += self.generateAudioEncodingTokens()
if self.__context['perform_cut']:
commandSequence += self.generateCropTokens()
commandSequence += self.generateOutputTokens(targetPath,
targetFormat)
@@ -284,6 +268,37 @@ class FfxController():
executeProcess(commandSequence, context = self.__context)
if videoEncoder == VideoEncoder.H264:
commandSequence = (commandTokens
+ self.__targetMediaDescriptor.getImportFileTokens()
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
+ self.__mdcs.generateDispositionTokens())
# Optional tokens
commandSequence += self.__mdcs.generateMetadataTokens()
commandSequence += filterTokens
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec != TrackCodec.PNG:
commandSequence += self.generateH264Tokens(int(quality))
commandSequence += self.generateAudioEncodingTokens()
if self.__context['perform_cut']:
commandSequence += self.generateCropTokens()
commandSequence += self.generateOutputTokens(targetPath,
targetFormat)
self.__logger.debug(f"FfxController.runJob(): Running command sequence")
if not self.__context['dry_run']:
executeProcess(commandSequence, context = self.__context)
if videoEncoder == VideoEncoder.VP9:
commandSequence1 = (commandTokens
@@ -294,11 +309,14 @@ class FfxController():
# the required bitrate for the second run is determined and recorded
# TODO: Results seems to be slightly better with first pass omitted,
# Confirm or find better filter settings for 2-pass
# commandSequence1 += self.__context['denoiser'].generateDenoiseTokens()
# commandSequence1 += self.__context['denoiser'].generatefilterTokens()
commandSequence1 += self.generateVP9Pass1Tokens(int(quality))
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec != TrackCodec.PNG:
commandSequence1 += self.generateVP9Pass1Tokens(int(quality))
if self.__context['perform_crop']:
if self.__context['perform_cut']:
commandSequence1 += self.generateCropTokens()
commandSequence1 += FfxController.NULL_TOKENS
@@ -313,16 +331,21 @@ class FfxController():
commandSequence2 = (commandTokens
+ self.__targetMediaDescriptor.getImportFileTokens()
+ self.__targetMediaDescriptor.getInputMappingTokens()
+ self.generateDispositionTokens())
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
+ self.__mdcs.generateDispositionTokens())
# Optional tokens
commandSequence2 += self.generateMetadataTokens()
commandSequence2 += denoiseTokens
commandSequence2 += self.__mdcs.generateMetadataTokens()
commandSequence2 += filterTokens
commandSequence2 += self.generateVP9Pass2Tokens(int(quality)) + self.generateAudioEncodingTokens()
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec != TrackCodec.PNG:
commandSequence2 += self.generateVP9Pass2Tokens(int(quality))
if self.__context['perform_crop']:
commandSequence2 += self.generateAudioEncodingTokens()
if self.__context['perform_cut']:
commandSequence2 += self.generateCropTokens()
commandSequence2 += self.generateOutputTokens(targetPath,

View File

@@ -3,6 +3,8 @@ import os, re, json
from .media_descriptor import MediaDescriptor
from .pattern_controller import PatternController
from ffx.filter.crop_filter import CropFilter
from .process import executeProcess
from ffx.model.pattern import Pattern
@@ -16,6 +18,8 @@ class FileProperties():
SEASON_EPISODE_INDICATOR_MATCH = '[sS]([0-9]+)[eE]([0-9]+)'
EPISODE_INDICATOR_MATCH = '[eE]([0-9]+)'
CROPDETECT_PATTERN = 'crop=[0-9]+:[0-9]+:[0-9]+:[0-9]+$'
DEFAULT_INDEX_DIGITS = 3
def __init__(self, context, sourcePath):
@@ -174,6 +178,49 @@ class FileProperties():
return json.loads(ffprobeOutput)['streams']
def findCropArguments(self):
""""""
# ffmpeg -i <input.file> -vf cropdetect -f null -
ffprobeOutput, ffprobeError, returnCode = executeProcess(["ffmpeg", "-i",
self.__sourcePath,
"-vf", "cropdetect",
"-ss", "60",
"-t", "180",
"-f", "null", "-"
])
errorLines = ffprobeError.split('\n')
crops = {}
for el in errorLines:
cropdetect_match = re.search(FileProperties.CROPDETECT_PATTERN, el)
if cropdetect_match is not None:
cropParam = str(cropdetect_match.group(0))
crops[cropParam] = crops.get(cropParam, 0) + 1
if crops:
cropHistogram = sorted(crops, reverse=True)
cropString = cropHistogram[0]
cropTokens = cropString.split('=')
cropValueTokens = cropTokens[1]
cropValues = cropValueTokens.split(':')
return {
CropFilter.OUTPUT_WIDTH_KEY: cropValues[0],
CropFilter.OUTPUT_HEIGHT_KEY: cropValues[1],
CropFilter.OFFSET_X_KEY: cropValues[2],
CropFilter.OFFSET_Y_KEY: cropValues[3]
}
else:
return {}
def getMediaDescriptor(self):
return MediaDescriptor.fromFfprobe(self.context, self.getFormatData(), self.getStreamData())

View File

@@ -0,0 +1,51 @@
import itertools
from .filter import Filter
class CropFilter(Filter):
IDENTIFIER = 'crop'
OUTPUT_WIDTH_KEY = 'output_width'
OUTPUT_HEIGHT_KEY = 'output_height'
OFFSET_X_KEY = 'x_offset'
OFFSET_Y_KEY = 'y_offset'
def __init__(self, **kwargs):
self.__outputWidth = int(kwargs.get(CropFilter.OUTPUT_WIDTH_KEY, 0))
self.__outputHeight = int(kwargs.get(CropFilter.OUTPUT_HEIGHT_KEY, 0))
self.__offsetX = int(kwargs.get(CropFilter.OFFSET_X_KEY, 0))
self.__offsetY = int(kwargs.get(CropFilter.OFFSET_Y_KEY, 0))
super().__init__(self)
def setArguments(self, **kwargs):
self.__outputWidth = int(kwargs.get(CropFilter.OUTPUT_WIDTH_KEY))
self.__outputHeight = int(kwargs.get(CropFilter.OUTPUT_HEIGHT_KEY))
self.__offsetX = int(kwargs.get(CropFilter.OFFSET_X_KEY,))
self.__offsetY = int(kwargs.get(CropFilter.OFFSET_Y_KEY,))
def getPayload(self):
payload = {'identifier': CropFilter.IDENTIFIER,
'parameters': {
CropFilter.OUTPUT_WIDTH_KEY: self.__outputWidth,
CropFilter.OUTPUT_HEIGHT_KEY: self.__outputHeight,
CropFilter.OFFSET_X_KEY: self.__offsetX,
CropFilter.OFFSET_Y_KEY: self.__offsetY
},
'suffices': [],
'variant': f"C{self.__outputWidth}-{self.__outputHeight}-{self.__offsetX}-{self.__offsetY}",
'tokens': ['crop='
+ f"{self.__outputWidth}"
+ f":{self.__outputHeight}"
+ f":{self.__offsetX}"
+ f":{self.__offsetY}"]}
return payload
def getYield(self):
yield self.getPayload()

View File

@@ -0,0 +1,140 @@
import itertools
from .filter import Filter
class DeinterlaceFilter(Filter):
IDENTIFIER = 'bwdif'
# DEFAULT_STRENGTH: float = 2.8
# DEFAULT_PATCH_SIZE: int = 13
# DEFAULT_CHROMA_PATCH_SIZE: int = 9
# DEFAULT_RESEARCH_WINDOW: int = 23
# DEFAULT_CHROMA_RESEARCH_WINDOW: int= 17
# STRENGTH_KEY = 'strength'
# PATCH_SIZE_KEY = 'patch_size'
# CHROMA_PATCH_SIZE_KEY = 'chroma_patch_size'
# RESEARCH_WINDOW_KEY = 'research_window'
# CHROMA_RESEARCH_WINDOW_KEY = 'chroma_research_window'
def __init__(self, **kwargs):
# self.__useHardware = kwargs.get('use_hardware', False)
# self.__strengthList = []
# strength = kwargs.get(NlmeansFilter.STRENGTH_KEY, '')
# if strength:
# strengthTokens = strength.split(',')
# for st in strengthTokens:
# try:
# strengthValue = float(st)
# except:
# raise ValueError('NlmeansFilter: Strength value has to be of type float')
# if strengthValue < 1.0 or strengthValue > 30.0:
# raise ValueError('NlmeansFilter: Strength value has to be between 1.0 and 30.0')
# self.__strengthList.append(strengthValue)
# else:
# self.__strengthList = [NlmeansFilter.DEFAULT_STRENGTH]
# self.__patchSizeList = []
# patchSize = kwargs.get(NlmeansFilter.PATCH_SIZE_KEY, '')
# if patchSize:
# patchSizeTokens = patchSize.split(',')
# for pst in patchSizeTokens:
# try:
# patchSizeValue = int(pst)
# except:
# raise ValueError('NlmeansFilter: Patch size value has to be of type int')
# if patchSizeValue < 0 or patchSizeValue > 99:
# raise ValueError('NlmeansFilter: Patch size value has to be between 0 and 99')
# if patchSizeValue % 2 == 0:
# raise ValueError('NlmeansFilter: Patch size value has to an odd number')
# self.__patchSizeList.append(patchSizeValue)
# else:
# self.__patchSizeList = [NlmeansFilter.DEFAULT_PATCH_SIZE]
# self.__chromaPatchSizeList = []
# chromaPatchSize = kwargs.get(NlmeansFilter.CHROMA_PATCH_SIZE_KEY, '')
# if chromaPatchSize:
# chromaPatchSizeTokens = chromaPatchSize.split(',')
# for cpst in chromaPatchSizeTokens:
# try:
# chromaPatchSizeValue = int(pst)
# except:
# raise ValueError('NlmeansFilter: Chroma patch size value has to be of type int')
# if chromaPatchSizeValue < 0 or chromaPatchSizeValue > 99:
# raise ValueError('NlmeansFilter: Chroma patch value has to be between 0 and 99')
# if chromaPatchSizeValue % 2 == 0:
# raise ValueError('NlmeansFilter: Chroma patch value has to an odd number')
# self.__chromaPatchSizeList.append(chromaPatchSizeValue)
# else:
# self.__chromaPatchSizeList = [NlmeansFilter.DEFAULT_CHROMA_PATCH_SIZE]
# self.__researchWindowList = []
# researchWindow = kwargs.get(NlmeansFilter.RESEARCH_WINDOW_KEY, '')
# if researchWindow:
# researchWindowTokens = researchWindow.split(',')
# for rwt in researchWindowTokens:
# try:
# researchWindowValue = int(rwt)
# except:
# raise ValueError('NlmeansFilter: Research window value has to be of type int')
# if researchWindowValue < 0 or researchWindowValue > 99:
# raise ValueError('NlmeansFilter: Research window value has to be between 0 and 99')
# if researchWindowValue % 2 == 0:
# raise ValueError('NlmeansFilter: Research window value has to an odd number')
# self.__researchWindowList.append(researchWindowValue)
# else:
# self.__researchWindowList = [NlmeansFilter.DEFAULT_RESEARCH_WINDOW]
# self.__chromaResearchWindowList = []
# chromaResearchWindow = kwargs.get(NlmeansFilter.CHROMA_RESEARCH_WINDOW_KEY, '')
# if chromaResearchWindow:
# chromaResearchWindowTokens = chromaResearchWindow.split(',')
# for crwt in chromaResearchWindowTokens:
# try:
# chromaResearchWindowValue = int(crwt)
# except:
# raise ValueError('NlmeansFilter: Chroma research window value has to be of type int')
# if chromaResearchWindowValue < 0 or chromaResearchWindowValue > 99:
# raise ValueError('NlmeansFilter: Chroma research window value has to be between 0 and 99')
# if chromaResearchWindowValue % 2 == 0:
# raise ValueError('NlmeansFilter: Chroma research window value has to an odd number')
# self.__chromaResearchWindowList.append(chromaResearchWindowValue)
# else:
# self.__chromaResearchWindowList = [NlmeansFilter.DEFAULT_CHROMA_RESEARCH_WINDOW]
super().__init__(self)
def getPayload(self):
# strength = iteration[0]
# patchSize = iteration[1]
# chromaPatchSize = iteration[2]
# researchWindow = iteration[3]
# chromaResearchWindow = iteration[4]
suffices = []
# filterName = 'nlmeans_opencl' if self.__useHardware else 'nlmeans'
payload = {'identifier': DeinterlaceFilter.IDENTIFIER,
'parameters': {},
'suffices': suffices,
'variant': f"DEINT",
'tokens': ['bwdif=mode=1']}
return payload
def getYield(self):
# for it in itertools.product(self.__strengthList,
# self.__patchSizeList,
# self.__chromaPatchSizeList,
# self.__researchWindowList,
# self.__chromaResearchWindowList):
yield self.getPayload()

View File

@@ -144,11 +144,11 @@ class NlmeansFilter(Filter):
'suffices': suffices,
'variant': f"DS{strength}-DP{patchSize}-DPC{chromaPatchSize}"
+ f"-DR{researchWindow}-DRC{chromaResearchWindow}",
'tokens': ['-vf', f"{filterName}=s={strength}"
+ f":p={patchSize}"
+ f":pc={chromaPatchSize}"
+ f":r={researchWindow}"
+ f":rc={chromaResearchWindow}"]}
'tokens': [f"{filterName}=s={strength}"
+ f":p={patchSize}"
+ f":pc={chromaPatchSize}"
+ f":r={researchWindow}"
+ f":rc={chromaResearchWindow}"]}
return payload

View File

@@ -1,18 +1,24 @@
import itertools
import click
from .filter import Filter
from ffx.video_encoder import VideoEncoder
class QualityFilter(Filter):
IDENTIFIER = 'quality'
DEFAULT_QUALITY = 32
DEFAULT_VP9_QUALITY = 32
DEFAULT_H264_QUALITY = 17
QUALITY_KEY = 'quality'
def __init__(self, **kwargs):
context = click.get_current_context().obj
self.__qualitiesList = []
qualities = kwargs.get(QualityFilter.QUALITY_KEY, '')
if qualities:
@@ -26,7 +32,9 @@ class QualityFilter(Filter):
raise ValueError('QualityFilter: Quality value has to be between 0 and 63')
self.__qualitiesList.append(qualityValue)
else:
self.__qualitiesList = [QualityFilter.DEFAULT_QUALITY]
self.__qualitiesList = [None]
super().__init__(self)
@@ -51,4 +59,4 @@ class QualityFilter(Filter):
def getYield(self):
for q in self.__qualitiesList:
yield self.getPayload(q)
yield self.getPayload(q)

View File

@@ -1,4 +1,4 @@
import logging
import re, logging
from jinja2 import Environment, Undefined
from .constants import DEFAULT_OUTPUT_FILENAME_TEMPLATE
@@ -15,8 +15,41 @@ DIFF_REMOVED_KEY = 'removed'
DIFF_CHANGED_KEY = 'changed'
DIFF_UNCHANGED_KEY = 'unchanged'
RICH_COLOR_PATTERN = '\[[a-z_]+\](.+)\[\/[a-z_]+\]'
def dictDiff(a : dict, b : dict):
def dictDiff(a : dict, b : dict, ignoreKeys: list = [], removeKeys: list = []):
"""
ignoreKeys: Ignored keys are filtered from calculating diff at all
removeKeys: Override diff calculation to remove keys certainly
"""
a_filtered = {k:v for k,v in a.items() if not k in ignoreKeys}
b_filtered = {k:v for k,v in b.items() if not k in ignoreKeys and k not in removeKeys}
a_only = {k:v for k,v in a_filtered.items() if not k in b_filtered.keys()}
b_only = {k:v for k,v in b_filtered.items() if not k in a_filtered.keys()}
a_b = set(a_filtered.keys()) & set(b_filtered.keys())
changed = {k:b_filtered[k] for k in a_b if a_filtered[k] != b_filtered[k]}
unchanged = {k:b_filtered[k] for k in a_b if a_filtered[k] == b_filtered[k]}
diffResult = {}
if a_only:
diffResult[DIFF_REMOVED_KEY] = a_only
diffResult[DIFF_UNCHANGED_KEY] = unchanged
if b_only:
diffResult[DIFF_ADDED_KEY] = b_only
if changed:
diffResult[DIFF_CHANGED_KEY] = changed
return diffResult
def dictKeysDiff(a : dict, b : dict):
a_keys = set(a.keys())
b_keys = set(b.keys())
@@ -40,9 +73,10 @@ def dictDiff(a : dict, b : dict):
return diffResult
def dictCache(element: dict, cache: list = []):
for index in range(len(cache)):
diff = dictDiff(cache[index], element)
diff = dictKeysDiff(cache[index], element)
if not diff:
return index, cache
cache.append(element)
@@ -53,11 +87,13 @@ def setDiff(a : set, b : set) -> set:
a_only = a - b
b_only = b - a
a_and_b = a & b
diffResult = {}
if a_only:
diffResult[DIFF_REMOVED_KEY] = a_only
diffResult[DIFF_UNCHANGED_KEY] = a_and_b
if b_only:
diffResult[DIFF_ADDED_KEY] = b_only
@@ -78,17 +114,40 @@ def filterFilename(fileName: str) -> str:
"""This filter replaces charactes from TMDB responses with characters
less problemating when using in filenames or removes them"""
# This appears in TMDB episode names
fileName = str(fileName).replace(' (*)', '')
fileName = str(fileName).replace('(*)', '')
fileName = str(fileName).replace('/', '-')
fileName = str(fileName).replace(':', ';')
fileName = str(fileName).replace('*', '')
fileName = str(fileName).replace("'", '')
fileName = str(fileName).replace("?", '#')
fileName = str(fileName).replace('', '')
fileName = str(fileName).replace('', '')
return fileName.strip()
def substituteTmdbFilename(fileName: str) -> str:
"""If chaining this method with filterFilename use this one first as the latter will destroy some patterns"""
# This indicates filler episodes in TMDB episode names
fileName = str(fileName).replace(' (*)', '')
fileName = str(fileName).replace('(*)', '')
# This indicates the index of multi-episode files
episodePartMatch = re.search("\\(([0-9]+)\\)$", fileName)
if episodePartMatch is not None:
partSuffix = str(episodePartMatch.group(0))
partIndex = episodePartMatch.groups()[0]
fileName = str(fileName).replace(partSuffix, f"Teil {partIndex}")
# Also multi-episodes with first and last episode index
episodePartMatch = re.search("\\(([0-9]+)[-\\/]([0-9]+)\\)$", fileName)
if episodePartMatch is not None:
partSuffix = str(episodePartMatch.group(0))
partFirstIndex = episodePartMatch.groups()[0]
partLastIndex = episodePartMatch.groups()[1]
fileName = str(fileName).replace(partSuffix, f"Teil {partFirstIndex}-{partLastIndex}")
return fileName
def getEpisodeFileBasename(showName,
episodeName,
@@ -164,3 +223,17 @@ def getEpisodeFileBasename(showName,
# return ''.join(filenameTokens)
def formatRichColor(text: str, color: str = None):
if color is None:
return text
else:
return f"[{color}]{text}[/{color}]"
def removeRichColor(text: str):
richColorMatch = re.search(RICH_COLOR_PATTERN, text)
if richColorMatch is None:
return text
else:
return str(richColorMatch.group(1))

View File

@@ -3,77 +3,83 @@ import difflib
class IsoLanguage(Enum):
AFRIKAANS = {"name": "Afrikaans", "iso639_1": "af", "iso639_2": "afr"}
ALBANIAN = {"name": "Albanian", "iso639_1": "sq", "iso639_2": "alb"}
ARABIC = {"name": "Arabic", "iso639_1": "ar", "iso639_2": "ara"}
ARMENIAN = {"name": "Armenian", "iso639_1": "hy", "iso639_2": "arm"}
AZERBAIJANI = {"name": "Azerbaijani", "iso639_1": "az", "iso639_2": "aze"}
BASQUE = {"name": "Basque", "iso639_1": "eu", "iso639_2": "baq"}
BELARUSIAN = {"name": "Belarusian", "iso639_1": "be", "iso639_2": "bel"}
BULGARIAN = {"name": "Bulgarian", "iso639_1": "bg", "iso639_2": "bul"}
CATALAN = {"name": "Catalan", "iso639_1": "ca", "iso639_2": "cat"}
CHINESE = {"name": "Chinese", "iso639_1": "zh", "iso639_2": "chi"}
CROATIAN = {"name": "Croatian", "iso639_1": "hr", "iso639_2": "hrv"}
CZECH = {"name": "Czech", "iso639_1": "cs", "iso639_2": "cze"}
DANISH = {"name": "Danish", "iso639_1": "da", "iso639_2": "dan"}
DUTCH = {"name": "Dutch", "iso639_1": "nl", "iso639_2": "dut"}
ENGLISH = {"name": "English", "iso639_1": "en", "iso639_2": "eng"}
ESTONIAN = {"name": "Estonian", "iso639_1": "et", "iso639_2": "est"}
FINNISH = {"name": "Finnish", "iso639_1": "fi", "iso639_2": "fin"}
FRENCH = {"name": "French", "iso639_1": "fr", "iso639_2": "fre"}
GEORGIAN = {"name": "Georgian", "iso639_1": "ka", "iso639_2": "geo"}
GERMAN = {"name": "German", "iso639_1": "de", "iso639_2": "ger"}
GREEK = {"name": "Greek", "iso639_1": "el", "iso639_2": "gre"}
HEBREW = {"name": "Hebrew", "iso639_1": "he", "iso639_2": "heb"}
HINDI = {"name": "Hindi", "iso639_1": "hi", "iso639_2": "hin"}
HUNGARIAN = {"name": "Hungarian", "iso639_1": "hu", "iso639_2": "hun"}
ICELANDIC = {"name": "Icelandic", "iso639_1": "is", "iso639_2": "ice"}
INDONESIAN = {"name": "Indonesian", "iso639_1": "id", "iso639_2": "ind"}
IRISH = {"name": "Irish", "iso639_1": "ga", "iso639_2": "gle"}
ITALIAN = {"name": "Italian", "iso639_1": "it", "iso639_2": "ita"}
JAPANESE = {"name": "Japanese", "iso639_1": "ja", "iso639_2": "jpn"}
KAZAKH = {"name": "Kazakh", "iso639_1": "kk", "iso639_2": "kaz"}
KOREAN = {"name": "Korean", "iso639_1": "ko", "iso639_2": "kor"}
LATIN = {"name": "Latin", "iso639_1": "la", "iso639_2": "lat"}
LATVIAN = {"name": "Latvian", "iso639_1": "lv", "iso639_2": "lav"}
LITHUANIAN = {"name": "Lithuanian", "iso639_1": "lt", "iso639_2": "lit"}
MACEDONIAN = {"name": "Macedonian", "iso639_1": "mk", "iso639_2": "mac"}
MALAY = {"name": "Malay", "iso639_1": "ms", "iso639_2": "may"}
MALTESE = {"name": "Maltese", "iso639_1": "mt", "iso639_2": "mlt"}
NORWEGIAN = {"name": "Norwegian", "iso639_1": "no", "iso639_2": "nor"}
PERSIAN = {"name": "Persian", "iso639_1": "fa", "iso639_2": "per"}
POLISH = {"name": "Polish", "iso639_1": "pl", "iso639_2": "pol"}
PORTUGUESE = {"name": "Portuguese", "iso639_1": "pt", "iso639_2": "por"}
ROMANIAN = {"name": "Romanian", "iso639_1": "ro", "iso639_2": "rum"}
RUSSIAN = {"name": "Russian", "iso639_1": "ru", "iso639_2": "rus"}
NORTHERN_SAMI = {"name": "Northern Sami", "iso639_1": "se", "iso639_2": "sme"}
SAMOAN = {"name": "Samoan", "iso639_1": "sm", "iso639_2": "smo"}
SANGO = {"name": "Sango", "iso639_1": "sg", "iso639_2": "sag"}
SANSKRIT = {"name": "Sanskrit", "iso639_1": "sa", "iso639_2": "san"}
SARDINIAN = {"name": "Sardinian", "iso639_1": "sc", "iso639_2": "srd"}
SERBIAN = {"name": "Serbian", "iso639_1": "sr", "iso639_2": "srp"}
SHONA = {"name": "Shona", "iso639_1": "sn", "iso639_2": "sna"}
SINDHI = {"name": "Sindhi", "iso639_1": "sd", "iso639_2": "snd"}
SINHALA = {"name": "Sinhala", "iso639_1": "si", "iso639_2": "sin"}
SLOVAK = {"name": "Slovak", "iso639_1": "sk", "iso639_2": "slk"}
SLOVENIAN = {"name": "Slovenian", "iso639_1": "sl", "iso639_2": "slv"}
SOMALI = {"name": "Somali", "iso639_1": "so", "iso639_2": "som"}
SOUTHERN_SOTHO = {"name": "Southern Sotho", "iso639_1": "st", "iso639_2": "sot"}
SPANISH = {"name": "Spanish", "iso639_1": "es", "iso639_2": "spa"}
SUNDANESE = {"name": "Sundanese", "iso639_1": "su", "iso639_2": "sun"}
SWAHILI = {"name": "Swahili", "iso639_1": "sw", "iso639_2": "swa"}
SWATI = {"name": "Swati", "iso639_1": "ss", "iso639_2": "ssw"}
SWEDISH = {"name": "Swedish", "iso639_1": "sv", "iso639_2": "swe"}
TAGALOG = {"name": "Tagalog", "iso639_1": "tl", "iso639_2": "tgl"}
TAMIL = {"name": "Tamil", "iso639_1": "ta", "iso639_2": "tam"}
THAI = {"name": "Thai", "iso639_1": "th", "iso639_2": "tha"}
TURKISH = {"name": "Turkish", "iso639_1": "tr", "iso639_2": "tur"}
UKRAINIAN = {"name": "Ukrainian", "iso639_1": "uk", "iso639_2": "ukr"}
URDU = {"name": "Urdu", "iso639_1": "ur", "iso639_2": "urd"}
VIETNAMESE = {"name": "Vietnamese", "iso639_1": "vi", "iso639_2": "vie"}
WELSH = {"name": "Welsh", "iso639_1": "cy", "iso639_2": "wel"}
AFRIKAANS = {"name": "Afrikaans", "iso639_1": "af", "iso639_2": ["afr"]}
ALBANIAN = {"name": "Albanian", "iso639_1": "sq", "iso639_2": ["alb"]}
ARABIC = {"name": "Arabic", "iso639_1": "ar", "iso639_2": ["ara"]}
ARMENIAN = {"name": "Armenian", "iso639_1": "hy", "iso639_2": ["arm"]}
AZERBAIJANI = {"name": "Azerbaijani", "iso639_1": "az", "iso639_2": ["aze"]}
BASQUE = {"name": "Basque", "iso639_1": "eu", "iso639_2": ["baq"]}
BELARUSIAN = {"name": "Belarusian", "iso639_1": "be", "iso639_2": ["bel"]}
BOKMAL = {"name": "Bokmål", "iso639_1": "nb", "iso639_2": ["nob"]} # Norwegian Bokmål
BULGARIAN = {"name": "Bulgarian", "iso639_1": "bg", "iso639_2": ["bul"]}
CATALAN = {"name": "Catalan", "iso639_1": "ca", "iso639_2": ["cat"]}
CHINESE = {"name": "Chinese", "iso639_1": "zh", "iso639_2": ["zho", "chi"]}
CROATIAN = {"name": "Croatian", "iso639_1": "hr", "iso639_2": ["hrv"]}
CZECH = {"name": "Czech", "iso639_1": "cs", "iso639_2": ["cze"]}
DANISH = {"name": "Danish", "iso639_1": "da", "iso639_2": ["dan"]}
DUTCH = {"name": "Dutch", "iso639_1": "nl", "iso639_2": ["nld", "dut"]}
ENGLISH = {"name": "English", "iso639_1": "en", "iso639_2": ["eng"]}
ESTONIAN = {"name": "Estonian", "iso639_1": "et", "iso639_2": ["est"]}
FILIPINO = {"name": "Filipino", "iso639_1": "tl", "iso639_2": ["fil"]} # Tagalog
FINNISH = {"name": "Finnish", "iso639_1": "fi", "iso639_2": ["fin"]}
FRENCH = {"name": "French", "iso639_1": "fr", "iso639_2": ["fra", "fre"]}
GALICIAN = {"name": "Galician", "iso639_1": "gl", "iso639_2": ["glg"]}
GEORGIAN = {"name": "Georgian", "iso639_1": "ka", "iso639_2": ["geo"]}
GERMAN = {"name": "German", "iso639_1": "de", "iso639_2": ["deu", "ger"]}
GREEK = {"name": "Greek", "iso639_1": "el", "iso639_2": ["gre"]}
HEBREW = {"name": "Hebrew", "iso639_1": "he", "iso639_2": ["heb"]}
HINDI = {"name": "Hindi", "iso639_1": "hi", "iso639_2": ["hin"]}
HUNGARIAN = {"name": "Hungarian", "iso639_1": "hu", "iso639_2": ["hun"]}
ICELANDIC = {"name": "Icelandic", "iso639_1": "is", "iso639_2": ["ice"]}
INDONESIAN = {"name": "Indonesian", "iso639_1": "id", "iso639_2": ["ind"]}
IRISH = {"name": "Irish", "iso639_1": "ga", "iso639_2": ["gle"]}
ITALIAN = {"name": "Italian", "iso639_1": "it", "iso639_2": ["ita"]}
JAPANESE = {"name": "Japanese", "iso639_1": "ja", "iso639_2": ["jpn"]}
KANNADA = {"name": "Kannada", "iso639_1": "kn", "iso639_2": ["kan"]}
KAZAKH = {"name": "Kazakh", "iso639_1": "kk", "iso639_2": ["kaz"]}
KOREAN = {"name": "Korean", "iso639_1": "ko", "iso639_2": ["kor"]}
LATIN = {"name": "Latin", "iso639_1": "la", "iso639_2": ["lat"]}
LATVIAN = {"name": "Latvian", "iso639_1": "lv", "iso639_2": ["lav"]}
LITHUANIAN = {"name": "Lithuanian", "iso639_1": "lt", "iso639_2": ["lit"]}
MACEDONIAN = {"name": "Macedonian", "iso639_1": "mk", "iso639_2": ["mac"]}
MALAY = {"name": "Malay", "iso639_1": "ms", "iso639_2": ["may"]}
MALAYALAM = {"name": "Malayalam", "iso639_1": "ml", "iso639_2": ["mal"]}
MALTESE = {"name": "Maltese", "iso639_1": "mt", "iso639_2": ["mlt"]}
NORWEGIAN = {"name": "Norwegian", "iso639_1": "no", "iso639_2": ["nor"]}
PERSIAN = {"name": "Persian", "iso639_1": "fa", "iso639_2": ["per"]}
POLISH = {"name": "Polish", "iso639_1": "pl", "iso639_2": ["pol"]}
PORTUGUESE = {"name": "Portuguese", "iso639_1": "pt", "iso639_2": ["por"]}
ROMANIAN = {"name": "Romanian", "iso639_1": "ro", "iso639_2": ["rum"]}
RUSSIAN = {"name": "Russian", "iso639_1": "ru", "iso639_2": ["rus"]}
NORTHERN_SAMI = {"name": "Northern Sami", "iso639_1": "se", "iso639_2": ["sme"]}
SAMOAN = {"name": "Samoan", "iso639_1": "sm", "iso639_2": ["smo"]}
SANGO = {"name": "Sango", "iso639_1": "sg", "iso639_2": ["sag"]}
SANSKRIT = {"name": "Sanskrit", "iso639_1": "sa", "iso639_2": ["san"]}
SARDINIAN = {"name": "Sardinian", "iso639_1": "sc", "iso639_2": ["srd"]}
SERBIAN = {"name": "Serbian", "iso639_1": "sr", "iso639_2": ["srp"]}
SHONA = {"name": "Shona", "iso639_1": "sn", "iso639_2": ["sna"]}
SINDHI = {"name": "Sindhi", "iso639_1": "sd", "iso639_2": ["snd"]}
SINHALA = {"name": "Sinhala", "iso639_1": "si", "iso639_2": ["sin"]}
SLOVAK = {"name": "Slovak", "iso639_1": "sk", "iso639_2": ["slk"]}
SLOVENIAN = {"name": "Slovenian", "iso639_1": "sl", "iso639_2": ["slv"]}
SOMALI = {"name": "Somali", "iso639_1": "so", "iso639_2": ["som"]}
SOUTHERN_SOTHO = {"name": "Southern Sotho", "iso639_1": "st", "iso639_2": ["sot"]}
SPANISH = {"name": "Spanish", "iso639_1": "es", "iso639_2": ["spa"]}
SUNDANESE = {"name": "Sundanese", "iso639_1": "su", "iso639_2": ["sun"]}
SWAHILI = {"name": "Swahili", "iso639_1": "sw", "iso639_2": ["swa"]}
SWATI = {"name": "Swati", "iso639_1": "ss", "iso639_2": ["ssw"]}
SWEDISH = {"name": "Swedish", "iso639_1": "sv", "iso639_2": ["swe"]}
TAGALOG = {"name": "Tagalog", "iso639_1": "tl", "iso639_2": ["tgl"]}
TAMIL = {"name": "Tamil", "iso639_1": "ta", "iso639_2": ["tam"]}
TELUGU = {"name": "Telugu", "iso639_1": "te", "iso639_2": ["tel"]}
THAI = {"name": "Thai", "iso639_1": "th", "iso639_2": ["tha"]}
TURKISH = {"name": "Turkish", "iso639_1": "tr", "iso639_2": ["tur"]}
UKRAINIAN = {"name": "Ukrainian", "iso639_1": "uk", "iso639_2": ["ukr"]}
URDU = {"name": "Urdu", "iso639_1": "ur", "iso639_2": ["urd"]}
VIETNAMESE = {"name": "Vietnamese", "iso639_1": "vi", "iso639_2":[ "vie"]}
WELSH = {"name": "Welsh", "iso639_1": "cy", "iso639_2": ["wel"]}
UNDEFINED = {"name": "undefined", "iso639_1": "xx", "iso639_2": "und"}
UNDEFINED = {"name": "undefined", "iso639_1": "xx", "iso639_2": ["und"]}
@staticmethod
@@ -89,7 +95,7 @@ class IsoLanguage(Enum):
@staticmethod
def findThreeLetter(theeLetter : str):
foundLangs = [l for l in IsoLanguage if l.value['iso639_2'] == str(theeLetter)]
foundLangs = [l for l in IsoLanguage if str(theeLetter) in l.value['iso639_2']]
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
@@ -100,7 +106,6 @@ class IsoLanguage(Enum):
return str(self.value['iso639_1'])
def threeLetter(self):
return str(self.value['iso639_2'])
return str(self.value['iso639_2'][0])

View File

@@ -32,7 +32,8 @@ class MediaController():
for mediaTagKey, mediaTagValue in mediaDescriptor.getTags():
self.__tac.updateMediaTag(pid, mediaTagKey, mediaTagValue)
for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
# for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
for trackDescriptor in mediaDescriptor.getTrackDescriptors():
self.__tc.addTrack(trackDescriptor, patternId = pid)
s.commit()

View File

@@ -10,8 +10,6 @@ from ffx.track_codec import TrackCodec
from ffx.track_descriptor import TrackDescriptor
from ffx.helper import dictDiff, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY
class MediaDescriptor:
"""This class represents the structural content of a media file including streams and metadata"""
@@ -22,6 +20,7 @@ class MediaDescriptor:
TRACKS_KEY = "tracks"
TRACK_DESCRIPTOR_LIST_KEY = "track_descriptors"
ATTACHMENT_DESCRIPTOR_LIST_KEY = "attachment_descriptors"
CLEAR_TAGS_FLAG_KEY = "clear_tags"
FFPROBE_DISPOSITION_KEY = "disposition"
@@ -31,7 +30,9 @@ class MediaDescriptor:
#407 remove as well
EXCLUDED_MEDIA_TAGS = ["creation_time"]
SEASON_EPISODE_STREAM_LANGUAGE_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
STREAM_LANGUAGE_DISPOSITIONS_MATCH = '([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
SUBTITLE_FILE_EXTENSION = 'vtt'
def __init__(self, **kwargs):
@@ -69,9 +70,9 @@ class MediaDescriptor:
raise TypeError(
f"TrackDesciptor.__init__(): All elements of argument list {MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY} are required to be of type TrackDescriptor"
)
self.__trackDescriptors = kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY]
self.__trackDescriptors: List[TrackDescriptor] = kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY]
else:
self.__trackDescriptors = []
self.__trackDescriptors: List[TrackDescriptor] = []
def setTrackLanguage(self, language: str, index: int, trackType: TrackType = None):
@@ -107,14 +108,16 @@ class MediaDescriptor:
def setDefaultSubTrack(self, trackType: TrackType, subIndex: int):
for t in self.getAllTrackDescriptors():
# for t in self.getAllTrackDescriptors():
for t in self.getTrackDescriptors():
if t.getType() == trackType:
t.setDispositionFlag(
TrackDisposition.DEFAULT, t.getSubIndex() == int(subIndex)
)
def setForcedSubTrack(self, trackType: TrackType, subIndex: int):
for t in self.getAllTrackDescriptors():
# for t in self.getAllTrackDescriptors():
for t in self.getTrackDescriptors():
if t.getType() == trackType:
t.setDispositionFlag(
TrackDisposition.FORCED, t.getSubIndex() == int(subIndex)
@@ -190,7 +193,8 @@ class MediaDescriptor:
def applySourceIndices(self, sourceMediaDescriptor: Self):
sourceTrackDescriptors = sourceMediaDescriptor.getAllTrackDescriptors()
# sourceTrackDescriptors = sourceMediaDescriptor.getAllTrackDescriptors()
sourceTrackDescriptors = sourceMediaDescriptor.getTrackDescriptors()
numTrackDescriptors = len(self.__trackDescriptors)
if len(sourceTrackDescriptors) != numTrackDescriptors:
@@ -285,9 +289,9 @@ class MediaDescriptor:
tdList[trackIndex].setIndex(trackIndex)
def getAllTrackDescriptors(self):
"""Returns all track descriptors sorted by type: video, audio then subtitles"""
return self.getVideoTracks() + self.getAudioTracks() + self.getSubtitleTracks()
# def getAllTrackDescriptors(self):
# """Returns all track descriptors sorted by type: video, audio then subtitles"""
# return self.getVideoTracks() + self.getAudioTracks() + self.getSubtitleTracks()
def getTrackDescriptors(self,
@@ -317,82 +321,16 @@ class MediaDescriptor:
if s.getType() == TrackType.SUBTITLE
]
def compare(self, vsMediaDescriptor: Self):
if not isinstance(vsMediaDescriptor, self.__class__):
self.__logger.error(f"MediaDescriptor.compare(): Argument is required to be of type {self.__class__}")
raise click.Abort()
vsTags = vsMediaDescriptor.getTags()
tags = self.getTags()
# HINT: Some tags differ per file, for example creation_time, so these are removed before diff
for emt in MediaDescriptor.EXCLUDED_MEDIA_TAGS:
if emt in tags.keys():
del tags[emt]
if emt in vsTags.keys():
del vsTags[emt]
tagsDiff = dictDiff(vsTags, tags)
compareResult = {}
if tagsDiff:
compareResult[MediaDescriptor.TAGS_KEY] = tagsDiff
# Target track configuration (from DB)
# tracks = self.getAllTrackDescriptors()
tracks = self.getAllTrackDescriptors() # filtern
numTracks = len(tracks)
# Current track configuration (of file)
vsTracks = vsMediaDescriptor.getAllTrackDescriptors()
numVsTracks = len(vsTracks)
maxNumOfTracks = max(numVsTracks, numTracks)
trackCompareResult = {}
for tp in range(maxNumOfTracks):
#!
vsTrackIndex = tracks[tp].getSourceIndex()
# Will trigger if tracks are missing in file
if tp > (numVsTracks - 1):
if DIFF_ADDED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_ADDED_KEY] = set()
trackCompareResult[DIFF_ADDED_KEY].add(tracks[tp].getIndex())
continue
# Will trigger if tracks are missing in DB definition
# New tracks will be added per update via this way
if tp > (numTracks - 1):
if DIFF_REMOVED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_REMOVED_KEY] = {}
trackCompareResult[DIFF_REMOVED_KEY][
vsTracks[vsTrackIndex].getIndex()
] = vsTracks[vsTrackIndex]
continue
# assumption is made here that the track order will not change for all files of a sequence
trackDiff = tracks[tp].compare(vsTracks[vsTrackIndex])
if trackDiff:
if DIFF_CHANGED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_CHANGED_KEY] = {}
trackCompareResult[DIFF_CHANGED_KEY][
vsTracks[vsTrackIndex].getIndex()
] = trackDiff
if trackCompareResult:
compareResult[MediaDescriptor.TRACKS_KEY] = trackCompareResult
return compareResult
def getAttachmentTracks(self) -> List[TrackDescriptor]:
return [
s
for s in self.__trackDescriptors
if s.getType() == TrackType.ATTACHMENT
]
def getImportFileTokens(self, use_sub_index: bool = True):
"""Generate ffmpeg import options for external stream files"""
importFileTokens = []
@@ -415,76 +353,103 @@ class MediaDescriptor:
return importFileTokens
def getInputMappingTokens(self, use_sub_index: bool = True, only_video: bool = False):
def getInputMappingTokens(self,
use_sub_index: bool = True,
only_video: bool = False,
sourceMediaDescriptor: Self = None):
"""Tracks must be reordered for source index order"""
inputMappingTokens = []
sortedTrackDescriptors = sorted(self.__trackDescriptors, key=lambda d: d.getIndex())
# raise click.ClickException(' '.join([f"\nindex={td.getIndex()} subIndex={td.getSubIndex()} srcIndex={td.getSourceIndex()} type={td.getType().label()}" for td in self.__trackDescriptors]))
filePointer = 1
for trackIndex in range(len(self.__trackDescriptors)):
for trackIndex in range(len(sortedTrackDescriptors)):
td = self.__trackDescriptors[trackIndex]
td: TrackDescriptor = sortedTrackDescriptors[trackIndex]
stdi = self.__trackDescriptors[td.getSourceIndex()].getIndex()
stdsi = self.__trackDescriptors[td.getSourceIndex()].getSubIndex()
#HINT: Attached thumbnails are not supported by .webm container format
if td.getCodec() != TrackCodec.PNG:
# sti = self.__trackDescriptors[trackIndex].getSourceIndex()
# sotd = sourceOrderTrackDescriptors[sti]
stdi = sortedTrackDescriptors[td.getSourceIndex()].getIndex()
stdsi = sortedTrackDescriptors[td.getSourceIndex()].getSubIndex()
trackType = td.getType()
if (trackType == TrackType.VIDEO or not only_video):
trackType = td.getType()
trackCodec = td.getCodec()
importedFilePath = td.getExternalSourceFilePath()
if (trackType != TrackType.ATTACHMENT
and (trackType == TrackType.VIDEO or not only_video)):
if use_sub_index:
if importedFilePath:
importedFilePath = td.getExternalSourceFilePath()
inputMappingTokens += [
"-map",
f"{filePointer}:{trackType.indicator()}:0",
]
filePointer += 1
if use_sub_index:
else:
if importedFilePath:
if td.getCodec() != TrackCodec.PGS:
inputMappingTokens += [
"-map",
f"0:{trackType.indicator()}:{stdsi}",
f"{filePointer}:{trackType.indicator()}:0",
]
filePointer += 1
else:
if td.getCodec() != TrackCodec.PGS:
inputMappingTokens += ["-map", f"0:{stdi}"]
else:
if not trackCodec in [TrackCodec.PGS, TrackCodec.VOBSUB]:
inputMappingTokens += [
"-map",
f"0:{trackType.indicator()}:{stdsi}",
]
else:
if not trackCodec in [TrackCodec.PGS, TrackCodec.VOBSUB]:
inputMappingTokens += ["-map", f"0:{stdi}"]
if sourceMediaDescriptor:
fontDescriptors = [ftd for ftd in sourceMediaDescriptor.getAttachmentTracks()
if ftd.getCodec() == TrackCodec.TTF]
else:
fontDescriptors = [ftd for ftd in self.__trackDescriptors
if ftd.getType() == TrackType.ATTACHMENT
and ftd.getCodec() == TrackCodec.TTF]
for ad in sorted(fontDescriptors, key=lambda d: d.getIndex()):
inputMappingTokens += ["-map", f"0:{ad.getIndex()}"]
return inputMappingTokens
def searchSubtitleFiles(self, searchDirectory, prefix):
sesl_match = re.compile(MediaDescriptor.SEASON_EPISODE_STREAM_LANGUAGE_MATCH)
sesld_match = re.compile(f"{prefix}_{MediaDescriptor.SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH}")
sld_match = re.compile(f"{prefix}_{MediaDescriptor.STREAM_LANGUAGE_DISPOSITIONS_MATCH}")
subtitleFileDescriptors = []
for subtitleFilename in os.listdir(searchDirectory):
if subtitleFilename.startswith(prefix) and subtitleFilename.endswith(
"." + MediaDescriptor.SUBTITLE_FILE_EXTENSION
):
sesl_result = sesl_match.search(subtitleFilename)
if sesl_result is not None:
sesld_result = sesld_match.search(subtitleFilename)
sld_result = None if not sesld_result is None else sld_match.search(subtitleFilename)
if not sesld_result is None:
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
if os.path.isfile(subtitleFilePath):
subtitleFileDescriptor = {}
subtitleFileDescriptor["path"] = subtitleFilePath
subtitleFileDescriptor["season"] = int(sesl_result.group(1))
subtitleFileDescriptor["episode"] = int(sesl_result.group(2))
subtitleFileDescriptor["index"] = int(sesl_result.group(3))
subtitleFileDescriptor["language"] = sesl_result.group(4)
subtitleFileDescriptor["season"] = int(sesld_result.group(1))
subtitleFileDescriptor["episode"] = int(sesld_result.group(2))
subtitleFileDescriptor["index"] = int(sesld_result.group(3))
subtitleFileDescriptor["language"] = sesld_result.group(4)
dispSet = set()
dispCaptGroups = sesl_result.groups()
dispCaptGroups = sesld_result.groups()
numCaptGroups = len(dispCaptGroups)
if numCaptGroups > 4:
for groupIndex in range(numCaptGroups - 4):
@@ -495,6 +460,29 @@ class MediaDescriptor:
subtitleFileDescriptors.append(subtitleFileDescriptor)
if not sld_result is None:
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
if os.path.isfile(subtitleFilePath):
subtitleFileDescriptor = {}
subtitleFileDescriptor["path"] = subtitleFilePath
subtitleFileDescriptor["index"] = int(sld_result.group(1))
subtitleFileDescriptor["language"] = sld_result.group(2)
dispSet = set()
dispCaptGroups = sld_result.groups()
numCaptGroups = len(dispCaptGroups)
if numCaptGroups > 2:
for groupIndex in range(numCaptGroups - 2):
disp = TrackDisposition.fromIndicator(dispCaptGroups[groupIndex + 2])
if disp is not None:
dispSet.add(disp)
subtitleFileDescriptor["disposition_set"] = dispSet
subtitleFileDescriptors.append(subtitleFileDescriptor)
self.__logger.debug(f"searchSubtitleFiles(): Available subtitle files {subtitleFileDescriptors}")
return subtitleFileDescriptors
@@ -518,7 +506,8 @@ class MediaDescriptor:
[
d
for d in availableFileSubtitleDescriptors
if d["season"] == int(season) and d["episode"] == int(episode)
if ((season == -1 and episode == -1)
or (d["season"] == int(season) and d["episode"] == int(episode)))
],
key=lambda d: d["index"],
)
@@ -541,7 +530,8 @@ class MediaDescriptor:
def getConfiguration(self, label: str = ''):
yield f"--- {label if label else 'MediaDescriptor '+str(id(self))} {' '.join([str(k)+'='+str(v) for k,v in self.__mediaTags.items()])}"
for td in self.getAllTrackDescriptors():
# for td in self.getAllTrackDescriptors():
for td in self.getTrackDescriptors():
yield (f"{td.getIndex()}:{td.getType().indicator()}:{td.getSubIndex()} "
+ '|'.join([d.indicator() for d in td.getDispositionSet()])
+ ' ' + ' '.join([str(k)+'='+str(v) for k,v in td.getTags().items()]))

View File

@@ -0,0 +1,302 @@
import click
from ffx.media_descriptor import MediaDescriptor
from ffx.track_descriptor import TrackDescriptor
from ffx.helper import dictDiff, setDiff, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY, DIFF_UNCHANGED_KEY
from ffx.track_codec import TrackCodec
from ffx.track_disposition import TrackDisposition
class MediaDescriptorChangeSet():
TAGS_KEY = "tags"
TRACKS_KEY = "tracks"
DISPOSITION_SET_KEY = "disposition_set"
TRACK_DESCRIPTOR_KEY = "track_descriptor"
def __init__(self, context,
targetMediaDescriptor: MediaDescriptor = None,
sourceMediaDescriptor: MediaDescriptor = None):
self.__context = context
self.__logger = context['logger']
self.__configurationData = self.__context['config'].getData()
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
if 'streams' in metadataConfiguration.keys()
and 'remove' in metadataConfiguration['streams'].keys() else [])
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
if 'streams' in metadataConfiguration.keys()
and 'ignore' in metadataConfiguration['streams'].keys() else [])
self.__targetTrackDescriptors = targetMediaDescriptor.getTrackDescriptors() if targetMediaDescriptor is not None else []
self.__sourceTrackDescriptors = sourceMediaDescriptor.getTrackDescriptors() if sourceMediaDescriptor is not None else []
targetMediaTags = targetMediaDescriptor.getTags() if targetMediaDescriptor is not None else {}
sourceMediaTags = sourceMediaDescriptor.getTags() if sourceMediaDescriptor is not None else {}
self.__changeSetObj = {}
#if targetMediaDescriptor is not None:
#!!#
tagsDiff = dictDiff(sourceMediaTags,
targetMediaTags,
ignoreKeys=self.__ignoreGlobalKeys,
removeKeys=self.__removeGlobalKeys)
if tagsDiff:
self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY] = tagsDiff
self.__numTargetTracks = len(self.__targetTrackDescriptors)
# Current track configuration (of file)
self.__numSourceTracks = len(self.__sourceTrackDescriptors)
maxNumOfTracks = max(self.__numSourceTracks, self.__numTargetTracks)
trackCompareResult = {}
for trackIndex in range(maxNumOfTracks):
correspondingSourceTrackDescriptors = [st for st in self.__sourceTrackDescriptors if st.getIndex() == trackIndex]
correspondingTargetTrackDescriptors = [tt for tt in self.__targetTrackDescriptors if tt.getIndex() == trackIndex]
# Track present in target but not in source
if (not correspondingSourceTrackDescriptors
and correspondingTargetTrackDescriptors):
if DIFF_ADDED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_ADDED_KEY] = {}
trackCompareResult[DIFF_ADDED_KEY][trackIndex] = correspondingTargetTrackDescriptors[0]
continue
# Track present in target but not in source
if (correspondingSourceTrackDescriptors
and not correspondingTargetTrackDescriptors):
if DIFF_REMOVED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_REMOVED_KEY] = {}
trackCompareResult[DIFF_REMOVED_KEY][trackIndex] = correspondingSourceTrackDescriptors[0]
continue
if (correspondingSourceTrackDescriptors
and correspondingTargetTrackDescriptors):
# if correspondingTargetTrackDescriptors[0].getIndex() == 3:
# raise click.ClickException(f"{correspondingSourceTrackDescriptors[0].getDispositionSet()} {correspondingTargetTrackDescriptors[0].getDispositionSet()}")
trackDiff = self.compareTracks(correspondingTargetTrackDescriptors[0],
correspondingSourceTrackDescriptors[0])
if trackDiff:
if DIFF_CHANGED_KEY not in trackCompareResult.keys():
trackCompareResult[DIFF_CHANGED_KEY] = {}
trackCompareResult[DIFF_CHANGED_KEY][trackIndex] = trackDiff
if trackCompareResult:
self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY] = trackCompareResult
def compareTracks(self,
targetTrackDescriptor: TrackDescriptor = None,
sourceTrackDescriptor: TrackDescriptor = None):
sourceTrackTags = sourceTrackDescriptor.getTags() if sourceTrackDescriptor is not None else {}
targetTrackTags = targetTrackDescriptor.getTags() if targetTrackDescriptor is not None else {}
trackCompareResult = {}
tagsDiffResult = dictDiff(sourceTrackTags,
targetTrackTags,
ignoreKeys=self.__ignoreTrackKeys,
removeKeys=self.__removeTrackKeys)
if tagsDiffResult:
trackCompareResult[MediaDescriptorChangeSet.TAGS_KEY] = tagsDiffResult
sourceDispositionSet = sourceTrackDescriptor.getDispositionSet() if sourceTrackDescriptor is not None else set()
targetDispositionSet = targetTrackDescriptor.getDispositionSet() if targetTrackDescriptor is not None else set()
# if targetTrackDescriptor.getIndex() == 3:
# raise click.ClickException(f"{sourceDispositionSet} {targetDispositionSet}")
dispositionDiffResult = setDiff(sourceDispositionSet, targetDispositionSet)
if dispositionDiffResult:
trackCompareResult[MediaDescriptorChangeSet.DISPOSITION_SET_KEY] = dispositionDiffResult
return trackCompareResult
def generateDispositionTokens(self):
"""
#Example: -disposition:s:0 default -disposition:s:1 0
"""
dispositionTokens = []
# if MediaDescriptorChangeSet.TRACKS_KEY in self.__changeSetObj.keys():
#
# if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
# addedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY]
# trackDescriptor: TrackDescriptor
# for trackDescriptor in addedTracks.values():
#
# dispositionSet = trackDescriptor.getDispositionSet()
#
# if dispositionSet:
# dispositionTokens += [f"-disposition:{trackDescriptor.getType().indicator()}:{trackDescriptor.getSubIndex()}",
# '+'.join([d.label() for d in dispositionSet])]
#
# if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
# changedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
# trackDiffObj: dict
#
#
# for trackIndex, trackDiffObj in changedTracks.items():
#
# if MediaDescriptorChangeSet.DISPOSITION_SET_KEY in trackDiffObj.keys():
#
# dispositionDiffObj: dict = trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY]
#
# addedDispositions = dispositionDiffObj[DIFF_ADDED_KEY] if DIFF_ADDED_KEY in dispositionDiffObj.keys() else set()
# removedDispositions = dispositionDiffObj[DIFF_REMOVED_KEY] if DIFF_REMOVED_KEY in dispositionDiffObj.keys() else set()
# unchangedDispositions = dispositionDiffObj[DIFF_UNCHANGED_KEY] if DIFF_UNCHANGED_KEY in dispositionDiffObj.keys() else set()
#
# targetDispositions = addedDispositions | unchangedDispositions
#
# trackDescriptor = self.__targetTrackDescriptors[trackIndex]
# streamIndicator = trackDescriptor.getType().indicator()
# subIndex = trackDescriptor.getSubIndex()
#
# if targetDispositions:
# dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in targetDispositions])]
# # if not targetDispositions and removedDispositions:
# else:
# dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
for ttd in self.__targetTrackDescriptors:
targetDispositions = ttd.getDispositionSet()
streamIndicator = ttd.getType().indicator()
subIndex = ttd.getSubIndex()
if targetDispositions:
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in targetDispositions])]
# if not targetDispositions and removedDispositions:
else:
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
return dispositionTokens
def generateMetadataTokens(self):
metadataTokens = []
if MediaDescriptorChangeSet.TAGS_KEY in self.__changeSetObj.keys():
addedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY]
if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
removedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY]
if DIFF_REMOVED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
changedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY]
if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
outputMediaTags = addedMediaTags | changedMediaTags
if (not 'no_signature' in self.__context.keys()
or not self.__context['no_signature']):
outputMediaTags = outputMediaTags | self.__signatureTags
# outputMediaTags = {k:v for k,v in outputMediaTags.items() if k not in self.__removeGlobalKeys}
for tagKey, tagValue in outputMediaTags.items():
metadataTokens += [f"-metadata:g",
f"{tagKey}={tagValue}"]
for tagKey, tagValue in changedMediaTags.items():
metadataTokens += [f"-metadata:g",
f"{tagKey}={tagValue}"]
for removeKey in removedMediaTags.keys():
metadataTokens += [f"-metadata:g",
f"{removeKey}="]
if MediaDescriptorChangeSet.TRACKS_KEY in self.__changeSetObj.keys():
if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
addedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY]
trackDescriptor: TrackDescriptor
for trackDescriptor in addedTracks.values():
for tagKey, tagValue in trackDescriptor.getTags().items():
if not tagKey in self.__removeTrackKeys:
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
+ f":{trackDescriptor.getSubIndex()}",
f"{tagKey}={tagValue}"]
if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
changedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
trackDiffObj: dict
for trackIndex, trackDiffObj in changedTracks.items():
if MediaDescriptorChangeSet.TAGS_KEY in trackDiffObj.keys():
tagsDiffObj = trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY]
addedTrackTags = tagsDiffObj[DIFF_ADDED_KEY] if DIFF_ADDED_KEY in tagsDiffObj.keys() else {}
changedTrackTags = tagsDiffObj[DIFF_CHANGED_KEY] if DIFF_CHANGED_KEY in tagsDiffObj.keys() else {}
unchangedTrackTags = tagsDiffObj[DIFF_UNCHANGED_KEY] if DIFF_UNCHANGED_KEY in tagsDiffObj.keys() else {}
removedTrackTags = tagsDiffObj[DIFF_REMOVED_KEY] if DIFF_REMOVED_KEY in tagsDiffObj.keys() else {}
outputTrackTags = addedTrackTags | changedTrackTags
trackDescriptor = self.__targetTrackDescriptors[trackIndex]
for tagKey, tagValue in outputTrackTags.items():
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
+ f":{trackDescriptor.getSubIndex()}",
f"{tagKey}={tagValue}"]
for removeKey in removedTrackTags.keys():
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
+ f":{trackDescriptor.getSubIndex()}",
f"{removeKey}="]
#HINT: In case of loading a track from an external file
# no tags from source are present for the track so
# the unchanged tracks are passed to the output file as well
if trackDescriptor.getExternalSourceFilePath():
for tagKey, tagValue in unchangedTrackTags.items():
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
+ f":{trackDescriptor.getSubIndex()}",
f"{tagKey}={tagValue}"]
return metadataTokens
def getChangeSetObj(self):
return self.__changeSetObj

View File

@@ -15,6 +15,7 @@ from .show_details_screen import ShowDetailsScreen
from .pattern_details_screen import PatternDetailsScreen
from ffx.track_type import TrackType
from ffx.track_codec import TrackCodec
from ffx.model.track import Track
from ffx.track_disposition import TrackDisposition
@@ -26,7 +27,9 @@ from textual.widgets._data_table import CellDoesNotExist
from ffx.media_descriptor import MediaDescriptor
from ffx.file_properties import FileProperties
from ffx.helper import DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
from ffx.helper import formatRichColor, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY, DIFF_UNCHANGED_KEY
# Screen[dict[int, str, int]]
@@ -36,8 +39,8 @@ class MediaDetailsScreen(Screen):
Grid {
grid-size: 5 8;
grid-rows: 8 2 2 2 8 2 2 8;
grid-columns: 25 25 120 10 75;
grid-rows: 8 2 2 2 2 8 2 2 8;
grid-columns: 15 25 90 10 105;
height: 100%;
width: 100%;
padding: 1;
@@ -90,6 +93,10 @@ class MediaDetailsScreen(Screen):
border: solid green;
}
.purple {
tint: purple 40%;
}
.yellow {
tint: yellow 40%;
}
@@ -105,6 +112,19 @@ class MediaDetailsScreen(Screen):
"""
TRACKS_TABLE_INDEX_COLUMN_LABEL = "Index"
TRACKS_TABLE_TYPE_COLUMN_LABEL = "Type"
TRACKS_TABLE_SUB_INDEX_COLUMN_LABEL = "SubIndex"
TRACKS_TABLE_CODEC_COLUMN_LABEL = "Codec"
TRACKS_TABLE_LAYOUT_COLUMN_LABEL = "Layout"
TRACKS_TABLE_LANGUAGE_COLUMN_LABEL = "Language"
TRACKS_TABLE_TITLE_COLUMN_LABEL = "Title"
TRACKS_TABLE_DEFAULT_COLUMN_LABEL = "Default"
TRACKS_TABLE_FORCED_COLUMN_LABEL = "Forced"
DIFFERENCES_TABLE_DIFFERENCES_COLUMN_LABEL = 'Differences (file->db/output)'
BINDINGS = [
("n", "new_pattern", "New Pattern"),
("u", "update_pattern", "Update Pattern"),
@@ -118,6 +138,22 @@ class MediaDetailsScreen(Screen):
self.context = self.app.getContext()
self.Session = self.context['database']['session'] # convenience
self.__configurationData = self.context['config'].getData()
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
if 'streams' in metadataConfiguration.keys()
and 'remove' in metadataConfiguration['streams'].keys() else [])
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
if 'streams' in metadataConfiguration.keys()
and 'ignore' in metadataConfiguration['streams'].keys() else [])
self.__pc = PatternController(context = self.context)
self.__sc = ShowController(context = self.context)
self.__tc = TrackController(context = self.context)
@@ -137,7 +173,25 @@ class MediaDetailsScreen(Screen):
self.loadProperties()
def getRowIndexFromShowId(self, showId : int) -> int:
def removeShow(self, showId : int = -1):
"""Remove show entry from DataTable.
Removes the <New show> entry if showId is not set"""
for rowKey, row in self.showsTable.rows.items(): # dict[RowKey, Row]
rowData = self.showsTable.get_row(rowKey)
try:
if (showId == -1 and rowData[0] == ' '
or showId == int(rowData[0])):
self.showsTable.remove_row(rowKey)
return
except:
continue
def getRowIndexFromShowId(self, showId : int = -1) -> int:
"""Find the index of the row where the value in the specified column matches the target_value."""
for rowKey, row in self.showsTable.rows.items(): # dict[RowKey, Row]
@@ -145,7 +199,8 @@ class MediaDetailsScreen(Screen):
rowData = self.showsTable.get_row(rowKey)
try:
if showId == int(rowData[0]):
if ((showId == -1 and rowData[0] == ' ')
or showId == int(rowData[0])):
return int(self.showsTable.get_row_index(rowKey))
except:
continue
@@ -156,7 +211,7 @@ class MediaDetailsScreen(Screen):
def loadProperties(self):
self.__mediaFileProperties = FileProperties(self.context, self.__mediaFilename)
self.__currentMediaDescriptor = self.__mediaFileProperties.getMediaDescriptor()
self.__sourceMediaDescriptor = self.__mediaFileProperties.getMediaDescriptor()
#HINT: This is None if the filename did not match anything in database
self.__currentPattern = self.__mediaFileProperties.getPattern()
@@ -167,9 +222,13 @@ class MediaDetailsScreen(Screen):
# Enumerating differences between media descriptors
# from file (=current) vs from stored in database (=target)
try:
self.__mediaDifferences = self.__targetMediaDescriptor.compare(self.__currentMediaDescriptor) if self.__currentPattern is not None else {}
mdcs = MediaDescriptorChangeSet(self.context,
self.__targetMediaDescriptor,
self.__sourceMediaDescriptor)
self.__mediaChangeSetObj = mdcs.getChangeSetObj()
except ValueError:
self.__mediaDifferences = {}
self.__mediaChangeSetObj = {}
def updateDifferences(self):
@@ -178,74 +237,88 @@ class MediaDetailsScreen(Screen):
self.differencesTable.clear()
if MediaDescriptor.TAGS_KEY in self.__mediaDifferences.keys():
currentTags = self.__currentMediaDescriptor.getTags()
targetTags = self.__targetMediaDescriptor.getTags()
if MediaDescriptorChangeSet.TAGS_KEY in self.__mediaChangeSetObj.keys():
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for addedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
row = (f"added media tag: key='{addedTagKey}' value='{targetTags[addedTagKey]}'",)
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY].items():
if tagKey not in self.__ignoreGlobalKeys:
row = (f"add media tag: key='{tagKey}' value='{tagValue}'",)
self.differencesTable.add_row(*map(str, row))
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY].items():
if tagKey not in self.__ignoreGlobalKeys and tagKey not in self.__removeGlobalKeys:
row = (f"remove media tag: key='{tagKey}' value='{tagValue}'",)
self.differencesTable.add_row(*map(str, row))
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY].items():
if tagKey not in self.__ignoreGlobalKeys:
row = (f"change media tag: key='{tagKey}' value='{tagValue}'",)
self.differencesTable.add_row(*map(str, row))
if MediaDescriptorChangeSet.TRACKS_KEY in self.__mediaChangeSetObj.keys():
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
trackDescriptor: TrackDescriptor
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY].items():
row = (f"add {trackDescriptor.getType().label()} track: index={trackDescriptor.getIndex()} lang={trackDescriptor.getLanguage().threeLetter()}",)
self.differencesTable.add_row(*map(str, row))
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for removedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
row = (f"removed media tag: key='{removedTagKey}' value='{currentTags[removedTagKey]}'",)
self.differencesTable.add_row(*map(str, row))
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for changedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_CHANGED_KEY]:
row = (f"changed media tag: key='{changedTagKey}' value='{currentTags[changedTagKey]}'->'{targetTags[changedTagKey]}'",)
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_REMOVED_KEY].items():
row = (f"remove stream #{trackIndex}",)
self.differencesTable.add_row(*map(str, row))
if MediaDescriptor.TRACKS_KEY in self.__mediaDifferences.keys():
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
currentTracks = self.__currentMediaDescriptor.getAllTrackDescriptors() # 0,1,2,3
targetTracks = self.__targetMediaDescriptor.getAllTrackDescriptors() # 0 <- from DB
changedTracks: dict = self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
targetTrackDescriptors = self.__targetMediaDescriptor.getTrackDescriptors()
#raise click.ClickException(f"add track {self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]}")
for addedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]:
addedTrack : Track = targetTracks[addedTrackIndex]
row = (f"added {addedTrack.getType().label()} track: index={addedTrackIndex} lang={addedTrack.getLanguage().threeLetter()}",)
self.differencesTable.add_row(*map(str, row))
trackDiffObj: dict
for trackIndex, trackDiffObj in changedTracks.items():
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
for removedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_REMOVED_KEY]:
row = (f"removed track: index={removedTrackIndex}",)
self.differencesTable.add_row(*map(str, row))
ttd: TrackDescriptor = targetTrackDescriptors[trackIndex]
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
for changedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY].keys():
changedTrack : Track = targetTracks[changedTrackIndex]
changedTrackDiff : dict = self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY][changedTrackIndex]
if MediaDescriptorChangeSet.TAGS_KEY in trackDiffObj.keys():
if MediaDescriptor.TAGS_KEY in changedTrackDiff.keys():
removedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY]
if DIFF_REMOVED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
for tagKey, tagValue in removedTags.items():
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) remove key={tagKey} value={tagValue}",)
self.differencesTable.add_row(*map(str, row))
if DIFF_ADDED_KEY in changedTrackDiff[MediaDescriptor.TAGS_KEY]:
for addedTagKey in changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
addedTagValue = changedTrack.getTags()[addedTagKey]
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} added key={addedTagKey} value={addedTagValue}",)
self.differencesTable.add_row(*map(str, row))
addedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY]
if DIFF_ADDED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
for tagKey, tagValue in addedTags.items():
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) add key={tagKey} value={tagValue}",)
self.differencesTable.add_row(*map(str, row))
if DIFF_REMOVED_KEY in changedTrackDiff[MediaDescriptor.TAGS_KEY]:
for removedTagKey in changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} removed key={removedTagKey}",)
self.differencesTable.add_row(*map(str, row))
changedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY]
if DIFF_CHANGED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
for tagKey, tagValue in changedTags.items():
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) change key={tagKey} value={tagValue}",)
self.differencesTable.add_row(*map(str, row))
if TrackDescriptor.DISPOSITION_SET_KEY in changedTrackDiff.keys():
if MediaDescriptorChangeSet.DISPOSITION_SET_KEY in trackDiffObj.keys():
if DIFF_ADDED_KEY in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]:
for addedDisposition in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY][DIFF_ADDED_KEY]:
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} added disposition={addedDisposition.label()}",)
self.differencesTable.add_row(*map(str, row))
addedDispositions = (trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY][DIFF_ADDED_KEY]
if DIFF_ADDED_KEY in trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY].keys() else set())
for ad in addedDispositions:
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) add disposition={ad.label()}",)
self.differencesTable.add_row(*map(str, row))
if DIFF_REMOVED_KEY in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]:
for removedDisposition in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY][DIFF_REMOVED_KEY]:
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} removed disposition={removedDisposition.label()}",)
self.differencesTable.add_row(*map(str, row))
removedDispositions = (trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY][DIFF_REMOVED_KEY]
if DIFF_REMOVED_KEY in trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY].keys() else set())
for rd in removedDispositions:
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) remove disposition={rd.label()}",)
self.differencesTable.add_row(*map(str, row))
def on_mount(self):
@@ -258,8 +331,15 @@ class MediaDetailsScreen(Screen):
row = (int(show.id), show.name, show.year) # Convert each element to a string before adding
self.showsTable.add_row(*map(str, row))
for mediaTagKey, mediaTagValue in self.__currentMediaDescriptor.getTags().items():
row = (mediaTagKey, mediaTagValue) # Convert each element to a string before adding
for mediaTagKey, mediaTagValue in self.__sourceMediaDescriptor.getTags().items():
textColor = None
if mediaTagKey in self.__ignoreGlobalKeys:
textColor = 'blue'
if mediaTagKey in self.__removeGlobalKeys:
textColor = 'red'
row = (formatRichColor(mediaTagKey, textColor), formatRichColor(mediaTagValue, textColor)) # Convert each element to a string before adding
self.mediaTagsTable.add_row(*map(str, row))
self.updateTracks()
@@ -293,7 +373,8 @@ class MediaDetailsScreen(Screen):
self.tracksTable.clear()
trackDescriptorList = self.__currentMediaDescriptor.getAllTrackDescriptors()
# trackDescriptorList = self.__sourceMediaDescriptor.getAllTrackDescriptors()
trackDescriptorList = self.__sourceMediaDescriptor.getTrackDescriptors()
typeCounter = {}
@@ -328,7 +409,7 @@ class MediaDetailsScreen(Screen):
# Define the columns with headers
self.column_key_show_id = self.showsTable.add_column("ID", width=10)
self.column_key_show_name = self.showsTable.add_column("Name", width=50)
self.column_key_show_name = self.showsTable.add_column("Name", width=80)
self.column_key_show_year = self.showsTable.add_column("Year", width=10)
self.showsTable.cursor_type = 'row'
@@ -337,8 +418,8 @@ class MediaDetailsScreen(Screen):
self.mediaTagsTable = DataTable(classes="two")
# Define the columns with headers
self.column_key_track_tag_key = self.mediaTagsTable.add_column("Key", width=50)
self.column_key_track_tag_value = self.mediaTagsTable.add_column("Value", width=100)
self.column_key_track_tag_key = self.mediaTagsTable.add_column("Key", width=30)
self.column_key_track_tag_value = self.mediaTagsTable.add_column("Value", width=70)
self.mediaTagsTable.cursor_type = 'row'
@@ -346,15 +427,15 @@ class MediaDetailsScreen(Screen):
self.tracksTable = DataTable(classes="two")
# Define the columns with headers
self.column_key_track_index = self.tracksTable.add_column("Index", width=5)
self.column_key_track_type = self.tracksTable.add_column("Type", width=10)
self.column_key_track_sub_index = self.tracksTable.add_column("SubIndex", width=8)
self.column_key_track_codec = self.tracksTable.add_column("Codec", width=10)
self.column_key_track_layout = self.tracksTable.add_column("Layout", width=10)
self.column_key_track_language = self.tracksTable.add_column("Language", width=15)
self.column_key_track_title = self.tracksTable.add_column("Title", width=48)
self.column_key_track_default = self.tracksTable.add_column("Default", width=8)
self.column_key_track_forced = self.tracksTable.add_column("Forced", width=8)
self.column_key_track_index = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_INDEX_COLUMN_LABEL, width=5)
self.column_key_track_type = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_TYPE_COLUMN_LABEL, width=10)
self.column_key_track_sub_index = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_SUB_INDEX_COLUMN_LABEL, width=8)
self.column_key_track_codec = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_CODEC_COLUMN_LABEL, width=10)
self.column_key_track_layout = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_LAYOUT_COLUMN_LABEL, width=10)
self.column_key_track_language = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_LANGUAGE_COLUMN_LABEL, width=15)
self.column_key_track_title = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_TITLE_COLUMN_LABEL, width=48)
self.column_key_track_default = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_DEFAULT_COLUMN_LABEL, width=8)
self.column_key_track_forced = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_FORCED_COLUMN_LABEL, width=8)
self.tracksTable.cursor_type = 'row'
@@ -363,7 +444,7 @@ class MediaDetailsScreen(Screen):
self.differencesTable = DataTable(id='differences-table') # classes="triple"
# Define the columns with headers
self.column_key_differences = self.differencesTable.add_column("Differences (file->db)", width=70)
self.column_key_differences = self.differencesTable.add_column(MediaDetailsScreen.DIFFERENCES_TABLE_DIFFERENCES_COLUMN_LABEL, width=100)
self.differencesTable.cursor_type = 'row'
@@ -376,35 +457,38 @@ class MediaDetailsScreen(Screen):
yield self.showsTable
yield Static(" ")
yield self.differencesTable
# 2
yield Static(" ", classes="four")
# 3
yield Static(" ")
yield Button("Substitute", id="pattern_button")
yield Static(" ", classes="two")
# 3
# 4
yield Static("Pattern")
yield Input(type="text", id='pattern_input', classes="two")
yield Static(" ")
# 4
# 5
yield Static(" ", classes="four")
# 5
# 6
yield Static("Media Tags")
yield self.mediaTagsTable
yield Static(" ")
# 6
# 7
yield Static(" ", classes="four")
# 7
# 8
yield Static(" ")
yield Button("Set Default", id="select_default_button")
yield Button("Set Forced", id="select_forced_button")
yield Static(" ")
# 8
# 9
yield Static("Streams")
yield self.tracksTable
yield Static(" ")
@@ -412,15 +496,15 @@ class MediaDetailsScreen(Screen):
yield Footer()
def getPatternDescriptorFromInput(self):
"""Returns show id and pattern from corresponding inputs"""
patternDescriptor = {}
def getPatternObjFromInput(self):
"""Returns show id and pattern as obj from corresponding inputs"""
patternObj = {}
try:
patternDescriptor['show_id'] = self.getSelectedShowDescriptor().getId()
patternDescriptor['pattern'] = str(self.query_one("#pattern_input", Input).value)
patternObj['show_id'] = self.getSelectedShowDescriptor().getId()
patternObj['pattern'] = str(self.query_one("#pattern_input", Input).value)
except:
pass
return patternDescriptor
return {}
return patternObj
def on_button_pressed(self, event: Button.Pressed) -> None:
@@ -437,12 +521,12 @@ class MediaDetailsScreen(Screen):
if event.button.id == "select_default_button":
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
self.__currentMediaDescriptor.setDefaultSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
self.__sourceMediaDescriptor.setDefaultSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
self.updateTracks()
if event.button.id == "select_forced_button":
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
self.__currentMediaDescriptor.setForcedSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
self.__sourceMediaDescriptor.setForcedSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
self.updateTracks()
@@ -462,7 +546,7 @@ class MediaDetailsScreen(Screen):
kwargs[TrackDescriptor.INDEX_KEY] = int(selected_track_data[0])
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.fromLabel(selected_track_data[1])
kwargs[TrackDescriptor.SUB_INDEX_KEY] = int(selected_track_data[2])
kwargs[TrackDescriptor.CODEC_NAME_KEY] = int(selected_track_data[3])
kwargs[TrackDescriptor.CODEC_KEY] = TrackCodec.fromLabel(selected_track_data[3])
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(selected_track_data[4])
return TrackDescriptor(**kwargs)
@@ -473,11 +557,10 @@ class MediaDetailsScreen(Screen):
return None
def getSelectedShowDescriptor(self):
def getSelectedShowDescriptor(self) -> ShowDescriptor:
try:
# Fetch the currently selected row when 'Enter' is pressed
#selected_row_index = self.table.cursor_row
row_key, col_key = self.showsTable.coordinate_to_cell_key(self.showsTable.cursor_coordinate)
if row_key is not None:
@@ -500,34 +583,44 @@ class MediaDetailsScreen(Screen):
def handle_new_pattern(self, showDescriptor: ShowDescriptor):
""""""
show = (showDescriptor.getId(), showDescriptor.getName(), showDescriptor.getYear())
self.showsTable.add_row(*map(str, show))
if type(showDescriptor) is not ShowDescriptor:
raise TypeError("MediaDetailsScreen.handle_new_pattern(): Argument 'showDescriptor' has to be of type ShowDescriptor")
self.removeShow()
showRowIndex = self.getRowIndexFromShowId(showDescriptor.getId())
if showRowIndex is None:
show = (showDescriptor.getId(), showDescriptor.getName(), showDescriptor.getYear())
self.showsTable.add_row(*map(str, show))
showRowIndex = self.getRowIndexFromShowId(showDescriptor.getId())
if showRowIndex is not None:
self.showsTable.move_cursor(row=showRowIndex)
patternDescriptor = self.getPatternDescriptorFromInput()
patternObj = self.getPatternObjFromInput()
if patternDescriptor:
patternId = self.__pc.addPattern(patternDescriptor)
if patternObj:
patternId = self.__pc.addPattern(patternObj)
if patternId:
self.highlightPattern(False)
for tagKey, tagValue in self.__currentMediaDescriptor.getTags().items():
self.__tac.updateMediaTag(patternId, tagKey, tagValue)
for tagKey, tagValue in self.__sourceMediaDescriptor.getTags().items():
for trackDescriptor in self.__currentMediaDescriptor.getAllTrackDescriptors():
# Filter tags that make no sense to preserve
if tagKey not in self.__ignoreGlobalKeys and not tagKey in self.__removeGlobalKeys:
self.__tac.updateMediaTag(patternId, tagKey, tagValue)
# for trackDescriptor in self.__sourceMediaDescriptor.getAllTrackDescriptors():
for trackDescriptor in self.__sourceMediaDescriptor.getTrackDescriptors():
self.__tc.addTrack(trackDescriptor, patternId = patternId)
def action_new_pattern(self):
try:
self.__currentMediaDescriptor.checkConfiguration()
except ValueError:
return
"""Adding new patterns
If the corresponding show does not exists in DB it is added beforehand"""
selectedShowDescriptor = self.getSelectedShowDescriptor()
@@ -540,90 +633,104 @@ class MediaDetailsScreen(Screen):
def action_update_pattern(self):
"""When updating the database the actions must reverse the difference (eq to diff db->file)"""
"""Updating patterns
When updating the database the actions must reverse the difference (eq to diff db->file)"""
if self.__currentPattern is not None:
patternDescriptor = self.getPatternDescriptorFromInput()
if (patternDescriptor
and self.__currentPattern.getPattern() != patternDescriptor['pattern']):
return self.__pc.updatePattern(self.__currentPattern.getId(), patternDescriptor)
patternObj = self.getPatternObjFromInput()
if (patternObj
and self.__currentPattern.getPattern() != patternObj['pattern']):
return self.__pc.updatePattern(self.__currentPattern.getId(), patternObj)
self.loadProperties()
if MediaDescriptor.TAGS_KEY in self.__mediaDifferences.keys():
# __mediaChangeSetObj is file vs database
if MediaDescriptorChangeSet.TAGS_KEY in self.__mediaChangeSetObj.keys():
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for addedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
for addedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY].keys():
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} addedTagKey={addedTagKey}")
self.__tac.deleteMediaTagByKey(self.__currentPattern.getId(), addedTagKey)
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for removedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
currentTags = self.__currentMediaDescriptor.getTags()
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
for removedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY].keys():
currentTags = self.__sourceMediaDescriptor.getTags()
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} removedTagKey={removedTagKey} currentTags={currentTags[removedTagKey]}")
self.__tac.updateMediaTag(self.__currentPattern.getId(), removedTagKey, currentTags[removedTagKey])
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
for changedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_CHANGED_KEY]:
currentTags = self.__currentMediaDescriptor.getTags()
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
for changedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY].keys():
currentTags = self.__sourceMediaDescriptor.getTags()
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} changedTagKey={changedTagKey} currentTags={currentTags[changedTagKey]}")
self.__tac.updateMediaTag(self.__currentPattern.getId(), changedTagKey, currentTags[changedTagKey])
if MediaDescriptor.TRACKS_KEY in self.__mediaDifferences.keys():
if MediaDescriptorChangeSet.TRACKS_KEY in self.__mediaChangeSetObj.keys():
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
for addedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]:
targetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == addedTrackIndex]
if targetTracks:
self.__tc.deleteTrack(targetTracks[0].getId()) # id
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY].items():
#targetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == addedTrackIndex]
# if targetTracks:
# self.__tc.deleteTrack(targetTracks[0].getId()) # id
# self.__tc.deleteTrack(targetTracks[0].getId())
self.__tc.addTrack(trackDescriptor, patternId = self.__currentPattern.getId())
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
for removedTrackIndex, removedTrack in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_REMOVED_KEY].items():
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
trackDescriptor: TrackDescriptor
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_REMOVED_KEY].items():
# Track per inspect/update hinzufügen
self.__tc.addTrack(removedTrack, patternId = self.__currentPattern.getId())
#self.__tc.addTrack(removedTrack, patternId = self.__currentPattern.getId())
self.__tc.deleteTrack(trackDescriptor.getId())
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
# [vsTracks[tp].getIndex()] = trackDiff
for changedTrackIndex, changedTrackDiff in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY].items():
for trackIndex, trackDiff in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY].items():
changedTargetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == changedTrackIndex]
changedTargeTrackId = changedTargetTracks[0].getId() if changedTargetTracks else None
changedTargetTrackIndex = changedTargetTracks[0].getIndex() if changedTargetTracks else None
targetTracks = [t for t in self.__targetMediaDescriptor.getTrackDescriptors() if t.getIndex() == trackIndex]
targetTrackId = targetTracks[0].getId() if targetTracks else None
targetTrackIndex = targetTracks[0].getIndex() if targetTracks else None
changedCurrentTracks = [t for t in self.__currentMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == changedTrackIndex]
changedCurrentTracks = [t for t in self.__sourceMediaDescriptor.getTrackDescriptors() if t.getIndex() == trackIndex]
# changedCurrentTrackId #HINT: Undefined as track descriptors do not come from file with track_id
if TrackDescriptor.TAGS_KEY in changedTrackDiff.keys():
changedTrackTagsDiff = changedTrackDiff[TrackDescriptor.TAGS_KEY]
if TrackDescriptor.TAGS_KEY in trackDiff.keys():
tagsDiff = trackDiff[TrackDescriptor.TAGS_KEY]
if DIFF_ADDED_KEY in changedTrackTagsDiff.keys():
for addedTrackTagKey in changedTrackTagsDiff[DIFF_ADDED_KEY]:
if DIFF_ADDED_KEY in tagsDiff.keys():
for tagKey, tagValue in tagsDiff[DIFF_ADDED_KEY].items():
if changedTargetTracks:
self.__tac.deleteTrackTagByKey(changedTargeTrackId, addedTrackTagKey)
# if targetTracks:
# self.__tac.deleteTrackTagByKey(targetTrackId, addedTrackTagKey)
self.__tac.updateTrackTag(targetTrackId, tagKey, tagValue)
if DIFF_REMOVED_KEY in changedTrackTagsDiff.keys():
for removedTrackTagKey in changedTrackTagsDiff[DIFF_REMOVED_KEY]:
if changedCurrentTracks:
self.__tac.updateTrackTag(changedTargeTrackId, removedTrackTagKey, changedCurrentTracks[0].getTags()[removedTrackTagKey])
if DIFF_CHANGED_KEY in changedTrackTagsDiff.keys():
for changedTrackTagKey in changedTrackTagsDiff[DIFF_CHANGED_KEY]:
if changedCurrentTracks:
self.__tac.updateTrackTag(changedTargeTrackId, changedTrackTagKey, changedCurrentTracks[0].getTags()[changedTrackTagKey])
if DIFF_REMOVED_KEY in tagsDiff.keys():
for tagKey, tagValue in tagsDiff[DIFF_REMOVED_KEY].items():
# if changedCurrentTracks:
# self.__tac.updateTrackTag(targetTrackId, removedTrackTagKey, changedCurrentTracks[0].getTags()[removedTrackTagKey])
self.__tac.deleteTrackTagByKey(targetTrackId, tagKey)
if TrackDescriptor.DISPOSITION_SET_KEY in changedTrackDiff.keys():
changedTrackDispositionDiff = changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]
if DIFF_CHANGED_KEY in tagsDiff.keys():
for tagKey, tagValue in tagsDiff[DIFF_CHANGED_KEY].items():
# if changedCurrentTracks:
# self.__tac.updateTrackTag(targetTrackId, changedTrackTagKey, changedCurrentTracks[0].getTags()[changedTrackTagKey])
self.__tac.updateTrackTag(targetTrackId, tagKey, tagValue)
if TrackDescriptor.DISPOSITION_SET_KEY in trackDiff.keys():
changedTrackDispositionDiff = trackDiff[TrackDescriptor.DISPOSITION_SET_KEY]
if DIFF_ADDED_KEY in changedTrackDispositionDiff.keys():
for changedTrackAddedDisposition in changedTrackDispositionDiff[DIFF_ADDED_KEY]:
if changedTargetTrackIndex is not None:
self.__tc.setDispositionState(self.__currentPattern.getId(), changedTargetTrackIndex, changedTrackAddedDisposition, False)
for changedDisposition in changedTrackDispositionDiff[DIFF_ADDED_KEY]:
if targetTrackIndex is not None:
self.__tc.setDispositionState(self.__currentPattern.getId(), targetTrackIndex, changedDisposition, True)
if DIFF_REMOVED_KEY in changedTrackDispositionDiff.keys():
for changedTrackRemovedDisposition in changedTrackDispositionDiff[DIFF_REMOVED_KEY]:
if changedTargetTrackIndex is not None:
self.__tc.setDispositionState(self.__currentPattern.getId(), changedTargetTrackIndex, changedTrackRemovedDisposition, True)
for changedDisposition in changedTrackDispositionDiff[DIFF_REMOVED_KEY]:
if targetTrackIndex is not None:
self.__tc.setDispositionState(self.__currentPattern.getId(), targetTrackIndex, changedDisposition, False)
self.updateDifferences()
@@ -632,11 +739,11 @@ class MediaDetailsScreen(Screen):
def action_edit_pattern(self):
patternDescriptor = self.getPatternDescriptorFromInput()
patternObj = self.getPatternObjFromInput()
if patternDescriptor['pattern']:
if patternObj['pattern']:
selectedPatternId = self.__pc.findPattern(patternDescriptor)
selectedPatternId = self.__pc.findPattern(patternObj)
if selectedPatternId is None:
raise click.ClickException(f"MediaDetailsScreen.action_edit_pattern(): Pattern to edit has no id")

View File

@@ -1,6 +1,6 @@
import click
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy import Column, Integer, String, Text, ForeignKey
from sqlalchemy.orm import relationship
from .show import Base, Show
@@ -31,9 +31,13 @@ class Pattern(Base):
tracks = relationship('Track', back_populates='pattern', cascade="all, delete", lazy='joined')
media_tags = relationship('MediaTag', back_populates='pattern', cascade="all, delete", lazy='joined')
quality = Column(Integer, default=0)
notes = Column(Text, default='')
def getId(self):
return int(self.id)

View File

@@ -11,17 +11,20 @@ class PatternController():
self.Session = self.context['database']['session'] # convenience
def addPattern(self, patternDescriptor):
def addPattern(self, patternObj):
"""Adds pattern to database from obj
Returns database id or 0 if pattern already exists"""
try:
s = self.Session()
q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']),
Pattern.pattern == str(patternDescriptor['pattern']))
q = s.query(Pattern).filter(Pattern.show_id == int(patternObj['show_id']),
Pattern.pattern == str(patternObj['pattern']))
if not q.count():
pattern = Pattern(show_id = int(patternDescriptor['show_id']),
pattern = str(patternDescriptor['pattern']))
pattern = Pattern(show_id = int(patternObj['show_id']),
pattern = str(patternObj['pattern']))
s.add(pattern)
s.commit()
return pattern.getId()
@@ -34,7 +37,7 @@ class PatternController():
s.close()
def updatePattern(self, patternId, patternDescriptor):
def updatePattern(self, patternId, patternObj):
try:
s = self.Session()
@@ -42,10 +45,12 @@ class PatternController():
if q.count():
pattern = q.first()
pattern: Pattern = q.first()
pattern.show_id = int(patternDescriptor['show_id'])
pattern.pattern = str(patternDescriptor['pattern'])
pattern.show_id = int(patternObj['show_id'])
pattern.pattern = str(patternObj['pattern'])
pattern.quality = str(patternObj['quality'])
pattern.notes = str(patternObj['notes'])
s.commit()
return True
@@ -60,11 +65,11 @@ class PatternController():
def findPattern(self, patternDescriptor):
def findPattern(self, patternObj):
try:
s = self.Session()
q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']), Pattern.pattern == str(patternDescriptor['pattern']))
q = s.query(Pattern).filter(Pattern.show_id == int(patternObj['show_id']), Pattern.pattern == str(patternObj['pattern']))
if q.count():
pattern = q.first()

View File

@@ -2,7 +2,7 @@ import click, re
from typing import List
from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button, Input, DataTable
from textual.widgets import Header, Footer, Static, Button, Input, DataTable, TextArea
from textual.containers import Grid
from ffx.model.pattern import Pattern
@@ -30,6 +30,8 @@ from ffx.file_properties import FileProperties
from ffx.iso_language import IsoLanguage
from ffx.audio_layout import AudioLayout
from ffx.helper import formatRichColor, removeRichColor
# Screen[dict[int, str, int]]
class PatternDetailsScreen(Screen):
@@ -37,8 +39,8 @@ class PatternDetailsScreen(Screen):
CSS = """
Grid {
grid-size: 7 13;
grid-rows: 2 2 2 2 2 8 2 2 8 2 2 2 2;
grid-size: 7 17;
grid-rows: 2 2 2 2 2 2 6 2 2 8 2 2 8 2 2 2 2;
grid-columns: 25 25 25 25 25 25 25;
height: 100%;
width: 100%;
@@ -87,6 +89,12 @@ class PatternDetailsScreen(Screen):
column-span: 7;
}
.four_box {
min-height: 6;
}
.box {
height: 100%;
border: solid green;
@@ -103,6 +111,20 @@ class PatternDetailsScreen(Screen):
self.context = self.app.getContext()
self.Session = self.context['database']['session'] # convenience
self.__configurationData = self.context['config'].getData()
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
if 'streams' in metadataConfiguration.keys()
and 'remove' in metadataConfiguration['streams'].keys() else [])
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
if 'streams' in metadataConfiguration.keys()
and 'ignore' in metadataConfiguration['streams'].keys() else [])
self.__pc = PatternController(context = self.context)
self.__sc = ShowController(context = self.context)
self.__tc = TrackController(context = self.context)
@@ -147,29 +169,31 @@ class PatternDetailsScreen(Screen):
td : TrackDescriptor = tr.getDescriptor(self.context)
trackType = td.getType()
if not trackType in typeCounter.keys():
typeCounter[trackType] = 0
if (trackType := td.getType()) != TrackType.ATTACHMENT:
dispoSet = td.getDispositionSet()
if not trackType in typeCounter.keys():
typeCounter[trackType] = 0
trackLanguage = td.getLanguage()
audioLayout = td.getAudioLayout()
row = (td.getIndex(),
trackType.label(),
typeCounter[trackType],
td.getCodec().label(),
audioLayout.label() if trackType == TrackType.AUDIO
and audioLayout != AudioLayout.LAYOUT_UNDEFINED else ' ',
trackLanguage.label() if trackLanguage != IsoLanguage.UNDEFINED else ' ',
td.getTitle(),
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
'Yes' if TrackDisposition.FORCED in dispoSet else 'No',
td.getSourceIndex())
dispoSet = td.getDispositionSet()
self.tracksTable.add_row(*map(str, row))
trackLanguage = td.getLanguage()
audioLayout = td.getAudioLayout()
typeCounter[trackType] += 1
row = (td.getIndex(),
trackType.label(),
typeCounter[trackType],
td.getCodec().label(),
audioLayout.label() if trackType == TrackType.AUDIO
and audioLayout != AudioLayout.LAYOUT_UNDEFINED else ' ',
trackLanguage.label() if trackLanguage != IsoLanguage.UNDEFINED else ' ',
td.getTitle(),
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
'Yes' if TrackDisposition.FORCED in dispoSet else 'No',
td.getSourceIndex())
self.tracksTable.add_row(*map(str, row))
typeCounter[trackType] += 1
def swapTracks(self, trackIndex1: int, trackIndex2: int):
@@ -217,7 +241,15 @@ class PatternDetailsScreen(Screen):
tags = self.__tac.findAllMediaTags(self.__pattern.getId())
for tagKey, tagValue in tags.items():
row = (tagKey, tagValue)
textColor = None
if tagKey in self.__ignoreGlobalKeys:
textColor = 'blue'
if tagKey in self.__removeGlobalKeys:
textColor = 'red'
# if tagKey not in self.__ignoreTrackKeys:
row = (formatRichColor(tagKey, textColor), formatRichColor(tagValue, textColor))
self.tagsTable.add_row(*map(str, row))
@@ -230,6 +262,12 @@ class PatternDetailsScreen(Screen):
self.query_one("#pattern_input", Input).value = str(self.__pattern.getPattern())
if self.__pattern and self.__pattern.quality:
self.query_one("#quality_input", Input).value = str(self.__pattern.quality)
if self.__pattern and self.__pattern.notes:
self.query_one("#notes_textarea", TextArea).text = str(self.__pattern.notes)
self.updateTags()
self.updateTracks()
@@ -276,10 +314,31 @@ class PatternDetailsScreen(Screen):
# 3
yield Static(" ", classes="seven")
# 4
yield Static(" ", classes="seven")
yield Static("Quality")
yield Input(type="integer", id="quality_input")
yield Static(' ', classes="five")
# 5
yield Static(" ", classes="seven")
# 6
yield Static("Notes")
yield Static(" ", classes="six")
# 7
yield TextArea(id="notes_textarea", classes="four_box seven")
# 8
yield Static(" ", classes="seven")
# 9
yield Static("Media Tags")
@@ -296,13 +355,13 @@ class PatternDetailsScreen(Screen):
yield Static(" ")
yield Static(" ")
# 6
# 10
yield self.tagsTable
# 7
# 11
yield Static(" ", classes="seven")
# 8
# 12
yield Static("Streams")
@@ -319,21 +378,21 @@ class PatternDetailsScreen(Screen):
yield Button("Up", id="button_track_up")
yield Button("Down", id="button_track_down")
# 9
# 13
yield self.tracksTable
# 10
# 14
yield Static(" ", classes="seven")
# 11
# 15
yield Static(" ", classes="seven")
# 12
# 16
yield Button("Save", id="save_button")
yield Button("Cancel", id="cancel_button")
yield Static(" ", classes="five")
# 13
# 17
yield Static(" ", classes="seven")
yield Footer()
@@ -342,6 +401,14 @@ class PatternDetailsScreen(Screen):
def getPatternFromInput(self):
return str(self.query_one("#pattern_input", Input).value)
def getQualityFromInput(self):
try:
return int(self.query_one("#quality_input", Input).value)
except ValueError:
return 0
def getNotesFromInput(self):
return str(self.query_one("#notes_textarea", TextArea).text)
def getSelectedTrackDescriptor(self):
@@ -382,8 +449,8 @@ class PatternDetailsScreen(Screen):
if row_key is not None:
selected_tag_data = self.tagsTable.get_row(row_key)
tagKey = str(selected_tag_data[0])
tagValue = str(selected_tag_data[1])
tagKey = removeRichColor(selected_tag_data[0])
tagValue = removeRichColor(selected_tag_data[1])
return tagKey, tagValue
@@ -403,6 +470,8 @@ class PatternDetailsScreen(Screen):
patternDescriptor = {}
patternDescriptor['show_id'] = self.__showDescriptor.getId()
patternDescriptor['pattern'] = self.getPatternFromInput()
patternDescriptor['quality'] = self.getQualityFromInput()
patternDescriptor['notes'] = self.getNotesFromInput()
if self.__pattern is not None:
@@ -491,13 +560,14 @@ class PatternDetailsScreen(Screen):
trackType = trackDescriptor.getType()
index = trackDescriptor.getIndex()
subIndex = trackDescriptor.getSubIndex()
codec = trackDescriptor.getCodec()
language = trackDescriptor.getLanguage()
title = trackDescriptor.getTitle()
row = (index,
trackType.label(),
subIndex,
" ",
codec.label(),
language.label(),
title,
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
@@ -512,11 +582,16 @@ class PatternDetailsScreen(Screen):
row_key, col_key = self.tracksTable.coordinate_to_cell_key(self.tracksTable.cursor_coordinate)
self.tracksTable.update_cell(row_key, self.column_key_track_audio_layout, trackDescriptor.getAudioLayout().label())
self.tracksTable.update_cell(row_key, self.column_key_track_audio_layout,
trackDescriptor.getAudioLayout().label()
if trackDescriptor.getType() == TrackType.AUDIO else ' ')
self.tracksTable.update_cell(row_key, self.column_key_track_language, trackDescriptor.getLanguage().label())
self.tracksTable.update_cell(row_key, self.column_key_track_title, trackDescriptor.getTitle())
self.tracksTable.update_cell(row_key, self.column_key_track_default, 'Yes' if TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet() else 'No')
self.tracksTable.update_cell(row_key, self.column_key_track_forced, 'Yes' if TrackDisposition.FORCED in trackDescriptor.getDispositionSet() else 'No')
self.tracksTable.update_cell(row_key, self.column_key_track_default,
'Yes' if TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet() else 'No')
self.tracksTable.update_cell(row_key, self.column_key_track_forced,
'Yes' if TrackDisposition.FORCED in trackDescriptor.getDispositionSet() else 'No')
except CellDoesNotExist:
pass
@@ -541,4 +616,6 @@ class PatternDetailsScreen(Screen):
raise click.ClickException(f"PatternDetailsScreen.handle_delete_tag: pattern not set")
if self.__tac.deleteMediaTagByKey(self.__pattern.getId(), tag[0]):
self.updateTags()
self.updateTags()
else:
raise click.ClickException('tag delete failed')

View File

@@ -15,14 +15,8 @@ def executeProcess(commandSequence: List[str], directory: str = None, context: d
niceSequence = []
niceness = (int(context['resource_limits']['niceness'])
if not context is None
and 'resource_limits' in context.keys()
and 'niceness' in context['resource_limits'].keys() else 99)
cpu_percent = (int(context['resource_limits']['cpu_percent'])
if not context is None
and 'resource_limits' in context.keys()
and 'cpu_percent' in context['resource_limits'].keys() else 0)
niceness = int((context or {}).get('resource_limits', {}).get('niceness', 99))
cpu_percent = int((context or {}).get('resource_limits', {}).get('cpu_percent', 0))
if niceness >= -20 and niceness <= 19:
niceSequence += ['nice', '-n', str(niceness)]

View File

@@ -18,9 +18,16 @@ class ShiftedSeasonController():
self.Session = self.context['database']['session'] # convenience
def checkShiftedSeason(self, showId: int, shiftedSeasonObj: dict, shiftedSeasonId: int = 0):
"""
Check if for a particula season
shiftedSeasonId
"""
try:
s = self.Session()
originalSeason = shiftedSeasonObj['original_season']
firstEpisode = int(shiftedSeasonObj['first_episode'])
lastEpisode = int(shiftedSeasonObj['last_episode'])
@@ -31,11 +38,14 @@ class ShiftedSeasonController():
siblingShiftedSeason: ShiftedSeason
for siblingShiftedSeason in q.all():
siblingOriginalSeason = siblingShiftedSeason.getOriginalSeason
siblingFirstEpisode = siblingShiftedSeason.getFirstEpisode()
siblingLastEpisode = siblingShiftedSeason.getLastEpisode()
if (lastEpisode >= siblingFirstEpisode
if (originalSeason == siblingOriginalSeason
and lastEpisode >= siblingFirstEpisode
and siblingLastEpisode >= firstEpisode):
return False
return True

View File

@@ -400,7 +400,7 @@ class ShowDetailsScreen(Screen):
yield Footer()
def getShowDescriptorFromInput(self):
def getShowDescriptorFromInput(self) -> ShowDescriptor:
kwargs = {}
@@ -444,7 +444,7 @@ class ShowDetailsScreen(Screen):
# Event handler for button press
def on_button_pressed(self, event: Button.Pressed) -> None:
# Check if the button pressed is the one we are interested in
if event.button.id == "save_button":
showDescriptor = self.getShowDescriptorFromInput()

View File

@@ -162,4 +162,7 @@ class ShowsScreen(Screen):
yield self.table
yield Footer()
f = Footer()
f.description = "yolo"
yield f

View File

@@ -68,7 +68,7 @@ class TagController():
s = self.Session()
q = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId),
MediaTag.key == str(tagKey))
MediaTag.key == str(tagKey))
if q.count():
tag = q.first()
s.delete(tag)

View File

@@ -90,7 +90,7 @@ class TagDeleteScreen(Screen):
if event.button.id == "delete_button":
tag = (self.__key, self.__value)
tag = (self.__key, self.__value)
self.dismiss(tag)
if event.button.id == "cancel_button":

View File

@@ -164,7 +164,8 @@ def createMediaTestFile(mediaDescriptor: MediaDescriptor,
subIndexCounter = {}
for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
# for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
for trackDescriptor in mediaDescriptor.getTrackDescriptors():
trackType = trackDescriptor.getType()

View File

@@ -122,7 +122,8 @@ class Scenario2(Scenario):
resultFileProperties = FileProperties(testContext, resultFile)
resultMediaDescriptor = resultFileProperties.getMediaDescriptor()
resultMediaTracks = resultMediaDescriptor.getAllTrackDescriptors()
# resultMediaTracks = resultMediaDescriptor.getAllTrackDescriptors()
resultMediaTracks = resultMediaDescriptor.getTrackDescriptors()
for assertIndex in range(len(assertSelectorList)):

View File

@@ -223,7 +223,8 @@ class Scenario4(Scenario):
self._logger.debug(f"{variantLabel}: Result file properties: {rfp.getFilename()} season={rfp.getSeason()} episode={rfp.getEpisode()}")
rmd = rfp.getMediaDescriptor()
rmt = rmd.getAllTrackDescriptors()
# rmt = rmd.getAllTrackDescriptors()
rmt = rmd.getTrackDescriptors()
for l in rmd.getConfiguration(label = 'resultMediaDescriptor'):
self._logger.debug(l)

View File

@@ -3,15 +3,26 @@ from enum import Enum
class TrackCodec(Enum):
H265 = {'identifier': 'hevc', 'format': 'h265', 'extension': 'h265' ,'label': 'H.265'}
H264 = {'identifier': 'h264', 'format': 'h264', 'extension': 'h264' ,'label': 'H.264'}
AAC = {'identifier': 'aac', 'format': None, 'extension': 'aac' , 'label': 'AAC'}
AC3 = {'identifier': 'ac3', 'format': 'ac3', 'extension': 'ac3' , 'label': 'AC3'}
DTS = {'identifier': 'dts', 'format': 'dts', 'extension': 'dts' , 'label': 'DTS'}
ASS = {'identifier': 'ass', 'format': 'ass', 'extension': 'ass' , 'label': 'ASS'}
PGS = {'identifier': 'hdmv_pgs_subtitle', 'format': 'sup', 'extension': 'sup' , 'label': 'PGS'}
H265 = {'identifier': 'hevc', 'format': 'h265', 'extension': 'h265' ,'label': 'H.265'}
H264 = {'identifier': 'h264', 'format': 'h264', 'extension': 'h264' ,'label': 'H.264'}
MPEG4 = {'identifier': 'mpeg4', 'format': 'm4v', 'extension': 'm4v' ,'label': 'MPEG-4'}
MPEG2 = {'identifier': 'mpeg2video', 'format': 'mpeg2video', 'extension': 'mpg' ,'label': 'MPEG-2'}
UNKNOWN = {'identifier': 'unknown', 'format': None, 'extension': None, 'label': 'UNKNOWN'}
AAC = {'identifier': 'aac', 'format': None, 'extension': 'aac' , 'label': 'AAC'}
AC3 = {'identifier': 'ac3', 'format': 'ac3', 'extension': 'ac3' , 'label': 'AC3'}
EAC3 = {'identifier': 'eac3', 'format': 'eac3', 'extension': 'eac3' , 'label': 'EAC3'}
DTS = {'identifier': 'dts', 'format': 'dts', 'extension': 'dts' , 'label': 'DTS'}
MP3 = {'identifier': 'mp3', 'format': 'mp3', 'extension': 'mp3' , 'label': 'MP3'}
SRT = {'identifier': 'subrip', 'format': 'srt', 'extension': 'srt' , 'label': 'SRT'}
ASS = {'identifier': 'ass', 'format': 'ass', 'extension': 'ass' , 'label': 'ASS'}
TTF = {'identifier': 'ttf', 'format': None, 'extension': 'ttf' , 'label': 'TTF'}
PGS = {'identifier': 'hdmv_pgs_subtitle', 'format': 'sup', 'extension': 'sup' , 'label': 'PGS'}
VOBSUB = {'identifier': 'dvd_subtitle', 'format': None, 'extension': 'mkv' , 'label': 'VobSub'}
PNG = {'identifier': 'png', 'format': None, 'extension': 'png' , 'label': 'PNG'}
UNKNOWN = {'identifier': 'unknown', 'format': None, 'extension': None, 'label': 'UNKNOWN'}
def identifier(self):
@@ -23,8 +34,8 @@ class TrackCodec(Enum):
return str(self.value['label'])
def format(self):
"""Returns the codec as single letter"""
return str(self.value['format'])
"""Returns the codec """
return self.value['format']
def extension(self):
"""Returns the corresponding extension"""

View File

@@ -19,6 +19,20 @@ class TrackController():
self.context = context
self.Session = self.context['database']['session'] # convenience
self.__configurationData = self.context['config'].getData()
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
if 'streams' in metadataConfiguration.keys()
and 'remove' in metadataConfiguration['streams'].keys() else [])
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
if 'streams' in metadataConfiguration.keys()
and 'ignore' in metadataConfiguration['streams'].keys() else [])
def addTrack(self, trackDescriptor : TrackDescriptor, patternId = None):
@@ -29,7 +43,7 @@ class TrackController():
s = self.Session()
track = Track(pattern_id = patId,
track_type = int(trackDescriptor.getType().index()),
codec_name = str(trackDescriptor.getCodec().label()),
codec_name = str(trackDescriptor.getCodec().identifier()),
index = int(trackDescriptor.getIndex()),
source_index = int(trackDescriptor.getSourceIndex()),
disposition_flags = int(TrackDisposition.toFlags(trackDescriptor.getDispositionSet())),
@@ -40,10 +54,12 @@ class TrackController():
for k,v in trackDescriptor.getTags().items():
tag = TrackTag(track_id = track.id,
key = k,
value = v)
s.add(tag)
# Filter tags that make no sense to preserve
if k not in self.__ignoreTrackKeys and k not in self.__removeTrackKeys:
tag = TrackTag(track_id = track.id,
key = k,
value = v)
s.add(tag)
s.commit()
except Exception as ex:

View File

@@ -7,7 +7,7 @@ from .audio_layout import AudioLayout
from .track_disposition import TrackDisposition
from .track_codec import TrackCodec
from .helper import dictDiff, setDiff
# from .helper import dictDiff, setDiff
class TrackDescriptor:
@@ -33,8 +33,7 @@ class TrackDescriptor:
FFPROBE_TAGS_KEY = "tags"
FFPROBE_CODEC_TYPE_KEY = "codec_type"
FFPROBE_CODEC_KEY = "codec_name"
CODEC_PGS = 'hdmv_pgs_subtitle'
def __init__(self, **kwargs):
@@ -321,24 +320,24 @@ class TrackDescriptor:
else:
self.__dispositionSet.discard(disposition)
def compare(self, vsTrackDescriptor: Self):
compareResult = {}
tagsDiffResult = dictDiff(vsTrackDescriptor.getTags(), self.getTags())
if tagsDiffResult:
compareResult[TrackDescriptor.TAGS_KEY] = tagsDiffResult
vsDispositions = vsTrackDescriptor.getDispositionSet()
dispositions = self.getDispositionSet()
dispositionDiffResult = setDiff(vsDispositions, dispositions)
if dispositionDiffResult:
compareResult[TrackDescriptor.DISPOSITION_SET_KEY] = dispositionDiffResult
return compareResult
# def compare(self, vsTrackDescriptor: Self):
#
# compareResult = {}
#
# tagsDiffResult = dictKeysDiff(vsTrackDescriptor.getTags(), self.getTags())
#
# if tagsDiffResult:
# compareResult[TrackDescriptor.TAGS_KEY] = tagsDiffResult
#
# vsDispositions = vsTrackDescriptor.getDispositionSet()
# dispositions = self.getDispositionSet()
#
# dispositionDiffResult = setDiff(vsDispositions, dispositions)
#
# if dispositionDiffResult:
# compareResult[TrackDescriptor.DISPOSITION_SET_KEY] = dispositionDiffResult
#
# return compareResult
def setExternalSourceFilePath(self, filePath: str):
self.__externalSourceFilePath = str(filePath)

View File

@@ -24,6 +24,8 @@ from .tag_delete_screen import TagDeleteScreen
from textual.widgets._data_table import CellDoesNotExist
from ffx.helper import formatRichColor, removeRichColor
# Screen[dict[int, str, int]]
class TrackDetailsScreen(Screen):
@@ -101,6 +103,21 @@ class TrackDetailsScreen(Screen):
self.context = self.app.getContext()
self.Session = self.context['database']['session'] # convenience
self.__configurationData = self.context['config'].getData()
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
if 'streams' in metadataConfiguration.keys()
and 'remove' in metadataConfiguration['streams'].keys() else [])
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
if 'streams' in metadataConfiguration.keys()
and 'ignore' in metadataConfiguration['streams'].keys() else [])
self.__tc = TrackController(context = self.context)
self.__pc = PatternController(context = self.context)
self.__tac = TagController(context = self.context)
@@ -138,7 +155,14 @@ class TrackDetailsScreen(Screen):
for k,v in trackTags.items():
if k != 'language' and k != 'title':
row = (k,v)
textColor = None
if k in self.__ignoreTrackKeys:
textColor = 'blue'
if k in self.__removeTrackKeys:
textColor = 'red'
row = (formatRichColor(k, textColor), formatRichColor(v, textColor))
self.trackTagsTable.add_row(*map(str, row))
@@ -192,7 +216,7 @@ class TrackDetailsScreen(Screen):
# 2
yield Static("for pattern")
yield Static("", id="pattern_label", classes="four")
yield Static("", id="pattern_label", classes="four", markup=False)
# 3
yield Static(" ", classes="five")
@@ -292,7 +316,11 @@ class TrackDetailsScreen(Screen):
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.fromLabel(self.query_one("#type_select", Select).value)
kwargs[TrackDescriptor.CODEC_KEY] = self.__trackCodec
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(self.query_one("#audio_layout_select", Select).value)
if self.__trackType == TrackType.AUDIO:
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(self.query_one("#audio_layout_select", Select).value)
else:
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.LAYOUT_UNDEFINED
trackTags = {}
language = self.query_one("#language_select", Select).value
@@ -324,8 +352,8 @@ class TrackDetailsScreen(Screen):
if row_key is not None:
selected_tag_data = self.trackTagsTable.get_row(row_key)
tagKey = str(selected_tag_data[0])
tagValue = str(selected_tag_data[1])
tagKey = removeRichColor(selected_tag_data[0])
tagValue = removeRichColor(selected_tag_data[1])
return tagKey, tagValue

View File

@@ -5,6 +5,7 @@ class TrackType(Enum):
VIDEO = {'label': 'video', 'index': 1}
AUDIO = {'label': 'audio', 'index': 2}
SUBTITLE = {'label': 'subtitle', 'index': 3}
ATTACHMENT = {'label': 'attachment', 'index': 4}
UNKNOWN = {'label': 'unknown', 'index': 0}

View File

@@ -4,7 +4,8 @@ class VideoEncoder(Enum):
AV1 = {'label': 'av1', 'index': 1}
VP9 = {'label': 'vp9', 'index': 2}
H264 = {'label': 'h264', 'index': 3}
UNDEFINED = {'label': 'undefined', 'index': 0}
def label(self):

View File

@@ -1,8 +0,0 @@
all:
hosts:
hawaii:
ansible_host: refulgent.de
ansible_user: osgw
ffxSystemUsername: osgw
ffxHomeDirectory: /var/local/osgw/lib/osgw

View File

@@ -1,8 +0,0 @@
all:
hosts:
peppermint:
ansible_host: maveno.de
ansible_user: osgw
ffxSystemUsername: osgw
ffxHomeDirectory: /var/local/osgw/lib/osgw

View File

@@ -6,13 +6,26 @@
- name: Update system and install packages
become: true
when: ansible_os_family == 'Debian'
ansible.builtin.apt:
update_cache: true
name:
- python3-virtualenv
- cpulimit
- ffmpeg
- git
- screen
- name: Update system and install packages
become: true
when: ansible_os_family == 'Archlinux'
ansible.builtin.pacman:
update_cache: true
name:
- cpulimit
- ffmpeg
- git
- screen
update_cache: yes
- name: Create sync dir
become: true
@@ -50,16 +63,6 @@
group: "{{ ffxSystemUsername }}"
mode: 0755
- name: Prepare ffx virtualenv
become: true
become_user: "{{ ffxSystemUsername }}"
ansible.builtin.pip:
name:
- click
- textual
- sqlalchemy
- requests
virtualenv: "{{ ffxHomeDirectory }}/.local/share/ffx.venv"
- name: Clone ffx repository
become: true
@@ -70,6 +73,15 @@
version: dev
- name: Install FFX package in venv
become: true
become_user: "{{ ffxSystemUsername }}"
ansible.builtin.pip:
name: .
chdir: "{{ ffxHomeDirectory }}/.local/src/ffx"
virtualenv: "{{ ffxHomeDirectory }}/.local/share/ffx.venv"
- name: Add TMDB API token placeholer to .bashrc
become: true
become_user: "{{ ffxSystemUsername }}"
@@ -77,7 +89,7 @@
path: "{{ ffxHomeDirectory }}/.bashrc"
insertbefore: BOF
line: >-
export TMDB_API_KEY="<TMDB API token>"
export TMDB_API_KEY="{{ ffxTmdbApiKey | default('<TMDB API key>') }}"
- name: Add ffx alias to .bashrc
become: true
@@ -86,8 +98,7 @@
path: "{{ ffxHomeDirectory }}/.bashrc"
insertbefore: BOF
line: >-
alias ffx="{{ ffxHomeDirectory }}/.local/share/ffx.venv/bin/python
{{ ffxHomeDirectory }}/.local/src/ffx/bin/ffx.py"
alias ffx="{{ ffxHomeDirectory }}/.local/share/ffx.venv/bin/ffx
- name: Ensure local sync directory

View File

@@ -2,6 +2,8 @@
. ~/.local/share/ffx.venv/bin/activate
pushd ~/.local/src/ffx/
git checkout "${1:-main}"
git pull
pip install --editable .
popd
deactivate

444
tools/prepare.sh Executable file
View File

@@ -0,0 +1,444 @@
#!/usr/bin/env bash
set -u
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CONFIG_DIR="${FFX_CONFIG_DIR:-${HOME}/.local/etc}"
CONFIG_FILE="${FFX_CONFIG_FILE:-${CONFIG_DIR}/ffx.json}"
VAR_DIR="${FFX_VAR_DIR:-${HOME}/.local/var/ffx}"
LOG_DIR="${FFX_LOG_DIR:-${HOME}/.local/var/log}"
DATABASE_FILE="${FFX_DATABASE_FILE:-${VAR_DIR}/ffx.db}"
CHECK_ONLY=0
MUTATIONS=0
INSTALL_FAILURES=0
READINESS_FAILURES=0
MISSING_REQUIRED_SYSTEM=()
MISSING_OPTIONAL_SYSTEM=()
COLOR_RESET=""
COLOR_GREEN=""
COLOR_YELLOW=""
COLOR_RED=""
if [ -t 1 ]; then
COLOR_RESET="$(printf '\033[0m')"
COLOR_GREEN="$(printf '\033[32m')"
COLOR_YELLOW="$(printf '\033[33m')"
COLOR_RED="$(printf '\033[31m')"
fi
usage() {
cat <<EOF
Usage: $(basename "$0") [--check] [--help]
Prepare the local FFX development environment for this repository.
Options:
--check Report readiness only. Do not create, install, or modify.
--help Show this help text.
Environment overrides:
FFX_CONFIG_DIR Override the parent directory for the seeded ffx.json file.
FFX_CONFIG_FILE Override the seeded config file path directly.
FFX_VAR_DIR Override the default data directory.
FFX_LOG_DIR Override the default log directory.
FFX_DATABASE_FILE Override the database path written into a newly seeded config.
EOF
}
status_ok() {
printf '%sok%s' "${COLOR_GREEN}" "${COLOR_RESET}"
}
status_warn() {
printf '%swarn%s' "${COLOR_YELLOW}" "${COLOR_RESET}"
}
status_fail() {
printf '%sfailed%s' "${COLOR_RED}" "${COLOR_RESET}"
}
report_component() {
local level="$1"
local label="$2"
local detail="$3"
local rendered_status=""
case "${level}" in
ok)
rendered_status="$(status_ok)"
;;
warn)
rendered_status="$(status_warn)"
;;
*)
rendered_status="$(status_fail)"
;;
esac
printf '[%s] %s%s\n' "${rendered_status}" "${label}" "${detail:+: $detail}"
}
command_exists() {
command -v "$1" >/dev/null 2>&1
}
check_command_component() {
command_exists "$2"
}
check_tmdb_key() {
[ -n "${TMDB_API_KEY:-}" ]
}
check_seeded_dir() {
[ -d "$1" ]
}
check_seeded_file() {
[ -f "$1" ]
}
component_detail() {
case "$1" in
git|python3|ffmpeg|ffprobe|cpulimit)
command -v "$1" || printf "command '%s' not found" "$1"
;;
tmdb-key)
if check_tmdb_key; then
printf 'TMDB_API_KEY is set'
else
printf 'TMDB_API_KEY is unset; TMDB-backed flows will be skipped or fail'
fi
;;
config-dir)
if check_seeded_dir "${CONFIG_DIR}"; then
printf '%s' "${CONFIG_DIR}"
else
printf 'missing; prep can create it'
fi
;;
var-dir)
if check_seeded_dir "${VAR_DIR}"; then
printf '%s' "${VAR_DIR}"
else
printf 'missing; prep can create it'
fi
;;
log-dir)
if check_seeded_dir "${LOG_DIR}"; then
printf '%s' "${LOG_DIR}"
else
printf 'missing; prep can create it'
fi
;;
ffx-config)
if check_seeded_file "${CONFIG_FILE}"; then
printf '%s' "${CONFIG_FILE}"
else
printf 'missing; prep can seed a default non-destructively'
fi
;;
esac
}
report_toolchain_component() {
local label="$1"
local command_name="$2"
local required="$3"
if check_command_component "${label}" "${command_name}" "${required}"; then
report_component ok "${label}" "$(component_detail "${command_name}")"
else
if [ "${required}" = "required" ]; then
report_component failed "${label}" "$(component_detail "${command_name}")"
MISSING_REQUIRED_SYSTEM+=("${command_name}")
READINESS_FAILURES=$((READINESS_FAILURES + 1))
else
report_component warn "${label}" "$(component_detail "${command_name}")"
MISSING_OPTIONAL_SYSTEM+=("${command_name}")
fi
fi
}
report_tmdb_component() {
if check_tmdb_key; then
report_component ok "TMDB API key" "$(component_detail tmdb-key)"
else
report_component warn "TMDB API key" "$(component_detail tmdb-key)"
fi
}
report_seeded_component() {
local label="$1"
local key="$2"
local required="$3"
local ok=1
case "${key}" in
config-dir)
check_seeded_dir "${CONFIG_DIR}" || ok=0
;;
var-dir)
check_seeded_dir "${VAR_DIR}" || ok=0
;;
log-dir)
check_seeded_dir "${LOG_DIR}" || ok=0
;;
ffx-config)
check_seeded_file "${CONFIG_FILE}" || ok=0
;;
esac
if [ "${ok}" -eq 1 ]; then
report_component ok "${label}" "$(component_detail "${key}")"
else
if [ "${required}" = "required" ]; then
report_component failed "${label}" "$(component_detail "${key}")"
READINESS_FAILURES=$((READINESS_FAILURES + 1))
else
report_component warn "${label}" "$(component_detail "${key}")"
fi
fi
}
print_dependency_status() {
READINESS_FAILURES=0
MISSING_REQUIRED_SYSTEM=()
MISSING_OPTIONAL_SYSTEM=()
echo "Dependency status:"
report_toolchain_component "git" "git" "required"
report_toolchain_component "python3" "python3" "required"
report_toolchain_component "ffmpeg" "ffmpeg" "required"
report_toolchain_component "ffprobe" "ffprobe" "required"
report_toolchain_component "cpulimit" "cpulimit" "required"
report_tmdb_component
}
print_seeded_file_status() {
echo "Seeded local files:"
report_seeded_component "Config dir" "config-dir" "optional"
report_seeded_component "Var dir" "var-dir" "optional"
report_seeded_component "Log dir" "log-dir" "optional"
report_seeded_component "ffx config" "ffx-config" "optional"
}
detect_package_manager() {
if command_exists apt-get; then
printf 'apt-get\n'
return 0
fi
if command_exists pacman; then
printf 'pacman\n'
return 0
fi
return 1
}
run_root_command() {
if [ "${EUID}" -eq 0 ]; then
"$@"
elif command_exists sudo; then
sudo "$@"
else
return 1
fi
}
install_system_requirements() {
local package_manager
if ! package_manager="$(detect_package_manager)"; then
printf 'No supported package manager found for automatic preparation.\n' >&2
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
return 1
fi
case "${package_manager}" in
apt-get)
printf 'Installing missing system dependencies via apt-get...\n'
if ! run_root_command apt-get update; then
printf 'apt-get update failed.\n' >&2
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
return 1
fi
if ! run_root_command apt-get install -y git python3 ffmpeg cpulimit; then
printf 'apt-get install failed.\n' >&2
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
return 1
fi
;;
pacman)
printf 'Installing missing system dependencies via pacman...\n'
if ! run_root_command pacman -Sy --noconfirm git python ffmpeg cpulimit; then
printf 'pacman install failed.\n' >&2
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
return 1
fi
;;
esac
MUTATIONS=$((MUTATIONS + 1))
return 0
}
seed_default_config() {
if [ "${CHECK_ONLY}" -eq 1 ]; then
return 0
fi
local created_any=0
if [ ! -d "${CONFIG_DIR}" ]; then
printf 'Creating config dir at %s...\n' "${CONFIG_DIR}"
if ! mkdir -p "${CONFIG_DIR}"; then
printf 'Failed to create config dir at %s.\n' "${CONFIG_DIR}" >&2
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
return 1
fi
created_any=1
fi
if [ ! -d "${VAR_DIR}" ]; then
printf 'Creating var dir at %s...\n' "${VAR_DIR}"
if ! mkdir -p "${VAR_DIR}"; then
printf 'Failed to create var dir at %s.\n' "${VAR_DIR}" >&2
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
return 1
fi
created_any=1
fi
if [ ! -d "${LOG_DIR}" ]; then
printf 'Creating log dir at %s...\n' "${LOG_DIR}"
if ! mkdir -p "${LOG_DIR}"; then
printf 'Failed to create log dir at %s.\n' "${LOG_DIR}" >&2
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
return 1
fi
created_any=1
fi
if [ ! -f "${CONFIG_FILE}" ]; then
printf 'Seeding ffx config at %s...\n' "${CONFIG_FILE}"
if ! cat >"${CONFIG_FILE}" <<EOF
{
"databasePath": "${DATABASE_FILE}",
"logDirectory": "${LOG_DIR}",
"metadata": {
"signature": {
"RECODED_WITH": "FFX"
},
"remove": [
"VERSION-eng",
"creation_time",
"NAME"
],
"streams": {
"remove": [
"BPS",
"NUMBER_OF_FRAMES",
"NUMBER_OF_BYTES",
"_STATISTICS_WRITING_APP",
"_STATISTICS_WRITING_DATE_UTC",
"_STATISTICS_TAGS",
"BPS-eng",
"DURATION-eng",
"NUMBER_OF_FRAMES-eng",
"NUMBER_OF_BYTES-eng",
"_STATISTICS_WRITING_APP-eng",
"_STATISTICS_WRITING_DATE_UTC-eng",
"_STATISTICS_TAGS-eng"
]
}
}
}
EOF
then
printf 'Failed to write ffx config at %s.\n' "${CONFIG_FILE}" >&2
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
return 1
fi
created_any=1
fi
if [ "${created_any}" -eq 1 ]; then
MUTATIONS=$((MUTATIONS + 1))
fi
return 0
}
parse_args() {
while [ "$#" -gt 0 ]; do
case "$1" in
--check)
CHECK_ONLY=1
;;
--help|-h)
usage
exit 0
;;
*)
printf 'Unknown option: %s\n\n' "$1" >&2
usage >&2
exit 2
;;
esac
shift
done
}
main() {
parse_args "$@"
print_dependency_status
if [ "${CHECK_ONLY}" -eq 0 ] && [ "${#MISSING_REQUIRED_SYSTEM[@]}" -gt 0 ]; then
install_system_requirements
echo
print_dependency_status
fi
echo
print_seeded_file_status
if [ "${CHECK_ONLY}" -eq 0 ]; then
seed_default_config
echo
print_seeded_file_status
fi
echo
if [ "${INSTALL_FAILURES}" -gt 0 ]; then
echo "One or more install steps failed; see the status checks above." >&2
return 1
fi
if [ "${READINESS_FAILURES}" -gt 0 ]; then
if [ "${CHECK_ONLY}" -eq 1 ]; then
echo "Required system prerequisites are incomplete." >&2
else
echo "Required components are still missing after preparation." >&2
fi
return 1
fi
if [ "${CHECK_ONLY}" -eq 1 ]; then
echo "The FFX preparation environment is ready."
elif [ "${MUTATIONS}" -gt 0 ]; then
echo "The FFX preparation environment is ready."
else
echo "The FFX preparation environment is already prepared."
fi
return 0
}
main "$@"

350
tools/setup.sh Executable file
View File

@@ -0,0 +1,350 @@
#!/usr/bin/env bash
set -u
ROOT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/.." && pwd)"
VENV_DIR="${HOME}/.local/share/ffx.venv"
VENV_BIN_DIR="${VENV_DIR}/bin"
VENV_PYTHON="${VENV_BIN_DIR}/python"
VENV_PIP="${VENV_BIN_DIR}/pip"
VENV_FFX="${VENV_BIN_DIR}/ffx"
BASHRC_FILE="${HOME}/.bashrc"
ALIAS_BLOCK_BEGIN="# >>> ffx alias >>>"
ALIAS_BLOCK_END="# <<< ffx alias <<<"
ALIAS_LINE="alias ffx=\"${VENV_FFX}\""
CHECK_ONLY=0
READINESS_FAILURES=0
INSTALL_FAILURES=0
COLOR_RESET=""
COLOR_GREEN=""
COLOR_YELLOW=""
COLOR_RED=""
if [ -t 1 ]; then
COLOR_RESET="$(printf '\033[0m')"
COLOR_GREEN="$(printf '\033[32m')"
COLOR_YELLOW="$(printf '\033[33m')"
COLOR_RED="$(printf '\033[31m')"
fi
usage() {
cat <<EOF
Usage: $(basename "$0") [--check] [--help]
Prepare the persistent FFX bundle virtualenv at:
${VENV_DIR}
Actions:
- create or reuse ${VENV_DIR}
- install this repository into the venv with pip --editable
- ensure ${BASHRC_FILE} exposes alias ffx -> ${VENV_FFX}
Options:
--check Report readiness only. Do not create or modify anything.
--help Show this help text.
EOF
}
status_ok() {
printf '%sok%s' "${COLOR_GREEN}" "${COLOR_RESET}"
}
status_warn() {
printf '%swarn%s' "${COLOR_YELLOW}" "${COLOR_RESET}"
}
status_fail() {
printf '%sfailed%s' "${COLOR_RED}" "${COLOR_RESET}"
}
report_component() {
local level="$1"
local label="$2"
local detail="$3"
local rendered_status=""
case "${level}" in
ok)
rendered_status="$(status_ok)"
;;
warn)
rendered_status="$(status_warn)"
;;
*)
rendered_status="$(status_fail)"
;;
esac
printf '[%s] %s%s\n' "${rendered_status}" "${label}" "${detail:+: $detail}"
}
command_exists() {
command -v "$1" >/dev/null 2>&1
}
check_python3() {
command_exists python3
}
check_venv_dir() {
[ -x "${VENV_PYTHON}" ]
}
check_venv_pip() {
check_venv_dir && "${VENV_PIP}" --version >/dev/null 2>&1
}
check_venv_ffx() {
[ -x "${VENV_FFX}" ]
}
check_bashrc_file() {
[ -f "${BASHRC_FILE}" ]
}
check_bashrc_alias() {
check_bashrc_file && grep -Fqx "${ALIAS_LINE}" "${BASHRC_FILE}"
}
detail_python3() {
command -v python3 || printf "command 'python3' not found"
}
detail_venv_dir() {
if check_venv_dir; then
printf '%s' "${VENV_DIR}"
else
printf 'missing %s' "${VENV_DIR}"
fi
}
detail_venv_pip() {
if check_venv_pip; then
"${VENV_PIP}" --version
else
printf 'missing pip in %s' "${VENV_DIR}"
fi
}
detail_venv_ffx() {
if check_venv_ffx; then
printf '%s' "${VENV_FFX}"
else
printf 'missing %s' "${VENV_FFX}"
fi
}
detail_bashrc_file() {
if check_bashrc_file; then
printf '%s' "${BASHRC_FILE}"
else
printf 'missing %s; prep can create it' "${BASHRC_FILE}"
fi
}
detail_bashrc_alias() {
if check_bashrc_alias; then
printf '%s' "${ALIAS_LINE}"
else
printf 'missing alias line for %s' "${VENV_FFX}"
fi
}
print_status_report() {
READINESS_FAILURES=0
echo "Dependency status:"
if check_python3; then
report_component ok "python3" "$(detail_python3)"
else
report_component failed "python3" "$(detail_python3)"
READINESS_FAILURES=$((READINESS_FAILURES + 1))
fi
echo
echo "Bundle venv status:"
if check_venv_dir; then
report_component ok "bundle virtualenv" "$(detail_venv_dir)"
else
report_component failed "bundle virtualenv" "$(detail_venv_dir)"
READINESS_FAILURES=$((READINESS_FAILURES + 1))
fi
if check_venv_pip; then
report_component ok "bundle pip" "$(detail_venv_pip)"
else
report_component failed "bundle pip" "$(detail_venv_pip)"
READINESS_FAILURES=$((READINESS_FAILURES + 1))
fi
if check_venv_ffx; then
report_component ok "bundle ffx" "$(detail_venv_ffx)"
else
report_component failed "bundle ffx" "$(detail_venv_ffx)"
READINESS_FAILURES=$((READINESS_FAILURES + 1))
fi
echo
echo "Shell exposure status:"
if check_bashrc_file; then
report_component ok ".bashrc" "$(detail_bashrc_file)"
else
report_component warn ".bashrc" "$(detail_bashrc_file)"
fi
if check_bashrc_alias; then
report_component ok "ffx alias" "$(detail_bashrc_alias)"
else
report_component failed "ffx alias" "$(detail_bashrc_alias)"
READINESS_FAILURES=$((READINESS_FAILURES + 1))
fi
}
ensure_bundle_venv() {
mkdir -p "${HOME}/.local/share"
if ! check_venv_dir; then
printf 'Creating bundle virtualenv at %s...\n' "${VENV_DIR}"
if ! python3 -m venv "${VENV_DIR}"; then
printf 'Failed to create virtualenv at %s.\n' "${VENV_DIR}" >&2
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
return 1
fi
fi
if ! check_venv_pip; then
printf 'Missing pip in %s.\n' "${VENV_DIR}" >&2
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
return 1
fi
printf 'Installing FFX package into %s...\n' "${VENV_DIR}"
if ! "${VENV_PIP}" install --editable "${ROOT_DIR}"; then
printf 'Failed to install FFX package into %s.\n' "${VENV_DIR}" >&2
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
return 1
fi
return 0
}
write_alias_block() {
local bashrc_dir
bashrc_dir="$(dirname "${BASHRC_FILE}")"
mkdir -p "${bashrc_dir}"
touch "${BASHRC_FILE}"
if grep -Fq "${ALIAS_BLOCK_BEGIN}" "${BASHRC_FILE}" || grep -Fq "${ALIAS_BLOCK_END}" "${BASHRC_FILE}"; then
if ! python3 - "${BASHRC_FILE}" "${ALIAS_BLOCK_BEGIN}" "${ALIAS_BLOCK_END}" "${ALIAS_LINE}" <<'PY'
import pathlib
import sys
path = pathlib.Path(sys.argv[1])
begin = sys.argv[2]
end = sys.argv[3]
alias_line = sys.argv[4]
content = path.read_text()
block = f"{begin}\n{alias_line}\n{end}\n"
start = content.find(begin)
stop = content.find(end)
if start != -1 and stop != -1 and stop >= start:
stop += len(end)
if stop < len(content) and content[stop] == "\n":
stop += 1
content = content[:start] + block + content[stop:]
else:
if content and not content.endswith("\n"):
content += "\n"
content += block
path.write_text(content)
PY
then
printf 'Failed to update managed alias block in %s.\n' "${BASHRC_FILE}" >&2
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
return 1
fi
elif check_bashrc_alias; then
:
else
{
if [ -s "${BASHRC_FILE}" ] && [ "$(tail -c 1 "${BASHRC_FILE}" 2>/dev/null || true)" != "" ]; then
printf '\n'
fi
printf '%s\n' "${ALIAS_BLOCK_BEGIN}"
printf '%s\n' "${ALIAS_LINE}"
printf '%s\n' "${ALIAS_BLOCK_END}"
} >>"${BASHRC_FILE}" || {
printf 'Failed to append alias block to %s.\n' "${BASHRC_FILE}" >&2
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
return 1
}
fi
return 0
}
ensure_bashrc_alias() {
printf 'Ensuring ffx alias in %s...\n' "${BASHRC_FILE}"
write_alias_block
}
parse_args() {
while [ "$#" -gt 0 ]; do
case "$1" in
--check)
CHECK_ONLY=1
;;
--help|-h)
usage
exit 0
;;
*)
printf 'Unknown option: %s\n\n' "$1" >&2
usage >&2
exit 2
;;
esac
shift
done
}
main() {
parse_args "$@"
print_status_report
if [ "${CHECK_ONLY}" -eq 0 ]; then
if ! check_python3; then
printf '\npython3 is required before the bundle venv can be prepared.\n' >&2
exit 1
fi
echo
ensure_bundle_venv
ensure_bashrc_alias
echo
print_status_report
fi
echo
if [ "${INSTALL_FAILURES}" -gt 0 ]; then
echo "One or more bundle preparation steps failed; see the status checks above." >&2
exit 1
fi
if [ "${READINESS_FAILURES}" -gt 0 ]; then
echo "The FFX bundle virtualenv and/or alias setup is incomplete." >&2
exit 1
fi
echo "The FFX bundle virtualenv is ready."
}
main "$@"