Compare commits
107 Commits
db7700a6b9
...
f288d445e4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f288d445e4 | ||
|
|
d9db6da191 | ||
|
|
5443881ea1 | ||
|
|
8946b57456 | ||
|
|
686239491b | ||
|
|
126ba4487c | ||
|
|
447cda19ef | ||
|
|
f1ba913a98 | ||
|
|
59336aafb7 | ||
|
|
fd5ad3ed56 | ||
|
|
2d03a3bb10 | ||
|
|
4dc02d52a2 | ||
|
|
ed0cea9c26 | ||
|
|
15bfbdbe88 | ||
|
|
c354ba09ba | ||
| 2eeea08be0 | |||
| fbfc8ea965 | |||
|
|
6ec5db2ea2 | ||
|
|
8feced6f1c | ||
|
|
285649c30a | ||
|
|
558da817f1 | ||
|
|
2a84327f69 | ||
| 535b11dca5 | |||
| 8edc715795 | |||
| cd203703e8 | |||
| 8f2367b71e | |||
| 101c7605d2 | |||
| a5b58e34e4 | |||
| a32e86550c | |||
| 5de3778ae5 | |||
| 81aab0657e | |||
| 8514a0c152 | |||
| c846147c64 | |||
| e52297b2ba | |||
| 655833f13e | |||
| 03dd02ed87 | |||
| b6ee197536 | |||
| d8374ae9f2 | |||
| f262eaa120 | |||
| d940a6e92a | |||
| e1395aeca0 | |||
| 48841c5750 | |||
| d558bbf6bd | |||
| b05d989581 | |||
| bc8af53525 | |||
| 6bd1587947 | |||
| 7d6531b40e | |||
| ab435a4c76 | |||
| 0a88e366b1 | |||
| 1c80cd7d7d | |||
| a45c180aaa | |||
| 0b204ff19c | |||
| d7ec5f7620 | |||
| 3f64304374 | |||
| b459272149 | |||
| 4b05fc194b | |||
| 9d088819ab | |||
| e20f7a1f67 | |||
| 9d683dfa84 | |||
| 867756c661 | |||
| f81a6edb07 | |||
| ec4bce473c | |||
| bf882b741f | |||
| a4e25b5ec8 | |||
| ff6bacb0d5 | |||
| f32b7a06c0 | |||
| 7ceed58e7b | |||
| 153f401dd3 | |||
| 7f1f34fb9f | |||
| 21fe7cb1eb | |||
| 9e63184524 | |||
| 3742221189 | |||
| 478ac15ab8 | |||
| ef0a01bc9b | |||
| 802c11be44 | |||
| 4cbb135772 | |||
| 3d52442471 | |||
| 81640192ab | |||
| 81d760aabe | |||
| c0eff679f7 | |||
| 07097058d7 | |||
| cd7a338541 | |||
| be652f8efb | |||
| dd51b14d49 | |||
| a471808392 | |||
| b3da8ce738 | |||
| fe0c078c3f | |||
| 962522b974 | |||
| 24367ea08a | |||
| f0eebd0bea | |||
| c8e21b9260 | |||
| cdc1664779 | |||
|
|
2849eda05a | ||
|
|
cfb2df8d66 | ||
|
|
12c8ad3782 | ||
|
|
74a39a8f9a | ||
|
|
5eacb0d0cb | ||
|
|
e8c0c3d646 | ||
|
|
6b2671a1f5 | ||
|
|
2d8622506e | ||
|
|
86cc7dfc6f | ||
|
|
d84bee74c4 | ||
|
|
488caa7a08 | ||
|
|
62877dfed6 | ||
|
|
87ff94e204 | ||
|
|
0c78ed7cf7 | ||
|
|
4db9bfd103 |
11
.gitignore
vendored
11
.gitignore
vendored
@@ -2,9 +2,14 @@ __pycache__
|
|||||||
junk/
|
junk/
|
||||||
.vscode
|
.vscode
|
||||||
.ipynb_checkpoints/
|
.ipynb_checkpoints/
|
||||||
ansible/inventory/hawaii.yml
|
tools/ansible/inventory/hawaii.yml
|
||||||
ansible/inventory/peppermint.yml
|
tools/ansible/inventory/peppermint.yml
|
||||||
|
tools/ansible/inventory/cappuccino.yml
|
||||||
|
tools/ansible/inventory/group_vars/all.yml
|
||||||
ffx_test_report.log
|
ffx_test_report.log
|
||||||
bin/conversiontest.py
|
bin/conversiontest.py
|
||||||
*.egg-info/
|
|
||||||
|
|
||||||
|
build/
|
||||||
|
dist/
|
||||||
|
*.egg-info/
|
||||||
|
.codex
|
||||||
|
|||||||
376
AGENTS.md
Normal file
376
AGENTS.md
Normal file
@@ -0,0 +1,376 @@
|
|||||||
|
# AGENTS.md
|
||||||
|
|
||||||
|
This file is the entry point for agent guidance in this repository.
|
||||||
|
|
||||||
|
It is intentionally generic and reusable across projects. Keep this file focused on non-project-specific constraints, working style, and the structure used to link more detailed guidance.
|
||||||
|
|
||||||
|
# Purpose
|
||||||
|
|
||||||
|
- Provide a small default rule set for agents working in this repository.
|
||||||
|
- Keep the base guidance modular and easy to extend.
|
||||||
|
- Separate reusable agent behavior from project-specific requirements.
|
||||||
|
|
||||||
|
# Comment Syntax
|
||||||
|
|
||||||
|
- A segment wrapped in `<!--` and `-->` is a comment and must be ignored by agents.
|
||||||
|
- Use HTML comments for optional guidance that should stay inactive until enabled.
|
||||||
|
- To enable an optional segment, remove the surrounding `<!--` and `-->` markers.
|
||||||
|
|
||||||
|
# Core Principles
|
||||||
|
|
||||||
|
- Prefer the simplest solution that satisfies the current goal.
|
||||||
|
- Keep guidance lightweight: only add detail when it meaningfully improves outcomes.
|
||||||
|
- Reuse modular guideline files instead of expanding this file indefinitely.
|
||||||
|
- Treat project-specific documents as the source of truth for project behavior.
|
||||||
|
- When guidance conflicts, use the most specific applicable document.
|
||||||
|
|
||||||
|
# Rule Terms
|
||||||
|
|
||||||
|
- A `rule` is the general term for any constraint, requirement, definition, or similar guidance item.
|
||||||
|
- A `rule set` addresses all rules inside one file that share the same rule set ID.
|
||||||
|
- Any rule inside a rule set shall use an ID following the schema `RULESET-0001`, `RULESET-0002`, and so on.
|
||||||
|
- Rules without a rule set ID are also valid, but they are not addressable by rule ID.
|
||||||
|
|
||||||
|
# Scope Of This File
|
||||||
|
|
||||||
|
This file should contain:
|
||||||
|
|
||||||
|
- Generic agent behavior and constraints.
|
||||||
|
- Rules that are reusable across multiple projects.
|
||||||
|
- Links to optional guideline modules.
|
||||||
|
- Links to project-specific requirements.
|
||||||
|
- Commented optional templates for released-product documentation and agent-output locations.
|
||||||
|
|
||||||
|
This file should not contain:
|
||||||
|
|
||||||
|
- Project business requirements.
|
||||||
|
- Project architecture decisions.
|
||||||
|
- Stack-specific implementation details unless they are universally applicable.
|
||||||
|
- Task-specific runbooks that belong in dedicated modules.
|
||||||
|
|
||||||
|
# Default Agent Behavior
|
||||||
|
|
||||||
|
- Read the relevant context before making changes.
|
||||||
|
- Prefer small, understandable edits over broad refactors.
|
||||||
|
- Preserve existing patterns unless there is a clear reason to change them.
|
||||||
|
- Document assumptions when context is missing.
|
||||||
|
- Ignore HTML comment segments.
|
||||||
|
- If a more specific enabled guideline exists for the current task, follow it.
|
||||||
|
|
||||||
|
# Guideline Structure
|
||||||
|
|
||||||
|
Use the following structure for reusable guidance files and project-specific documentation as needed:
|
||||||
|
|
||||||
|
```text
|
||||||
|
/
|
||||||
|
|-- AGENTS.md
|
||||||
|
|-- guidance/
|
||||||
|
| |-- stacks/
|
||||||
|
| |-- conventions/
|
||||||
|
| `-- workflows/
|
||||||
|
|-- prompts/
|
||||||
|
`-- requirements/
|
||||||
|
|
||||||
|
Optional files and directories
|
||||||
|
|-- SCRATCHPAD.md
|
||||||
|
|-- docs/
|
||||||
|
| |-- readme.md
|
||||||
|
| |-- installation.md
|
||||||
|
| `-- history.md
|
||||||
|
|-- process/
|
||||||
|
| |-- log.md
|
||||||
|
| `-- coding-handbook.md
|
||||||
|
```
|
||||||
|
|
||||||
|
# Optional Reusable Modules
|
||||||
|
|
||||||
|
Add files under `guidance/` only when they are needed.
|
||||||
|
|
||||||
|
# Optional Scratchpad
|
||||||
|
|
||||||
|
- `SCRATCHPAD.md` is an optional repo-root scratchpad for temporary
|
||||||
|
information aimed at the next iteration.
|
||||||
|
- Developers may create or delete `SCRATCHPAD.md` at any time.
|
||||||
|
- Developers may refer to `SCRATCHPAD.md` as `scratchpad` when giving agents a
|
||||||
|
source or target for information.
|
||||||
|
- Agents may read, update, create, or remove the scratchpad when the task
|
||||||
|
explicitly calls for it.
|
||||||
|
- Treat the scratchpad as low-formality working context rather than canonical
|
||||||
|
project truth.
|
||||||
|
- Use the scratchpad for short-lived notes, open questions, sketches, and
|
||||||
|
temporary decisions that should be resolved away.
|
||||||
|
- Move durable outcomes into `requirements/`, `guidance/`, code, tests, or
|
||||||
|
another long-lived location.
|
||||||
|
- If `SCRATCHPAD.md` is absent, agents should continue normally.
|
||||||
|
|
||||||
|
# Optional Rule Sets
|
||||||
|
|
||||||
|
- Optional rule sets may be stored in `guidance/optional/` or in `guidance/{section}/optional/`.
|
||||||
|
- Optional rule sets are inactive by default and shall only be applied when a prompt explicitly requests them, for example by phrases such as `Apply rules for lean interface iteration in the following steps.` or `Apply LII rules.`
|
||||||
|
- An optional rule set may be requested by its descriptive name, by its rule set ID, or by another equally clear explicit reference.
|
||||||
|
- Agents shall never infer or auto-enable optional rule sets from general intent alone.
|
||||||
|
- If an optional rule or rule set cannot be identified and addressed clearly, agents shall stop and ask before proceeding.
|
||||||
|
|
||||||
|
# Prepared Orders
|
||||||
|
|
||||||
|
- An `order` is a prepared prompt for one isolated operation rather than a general workflow or standing rule set.
|
||||||
|
- Orders shall be stored under `prompts/`.
|
||||||
|
- Order files shall use the naming schema `ORDER-0001-<slug>.md`, `ORDER-0002-<slug>.md`, and so on.
|
||||||
|
- The canonical order identifier is the `ORDER-0001` style prefix. The trailing slug is descriptive only.
|
||||||
|
- Recommended internal order file structure is: prompt ID, prompt name, purpose, trigger examples, scope, operation, and expected output.
|
||||||
|
- Orders shall only be executed when they are explicitly requested by a prompt such as `Execute ORDER-0007.` or `Execute ORDER 7.`
|
||||||
|
- Agents may accept an unambiguous short numeric reference such as `ORDER 7` as an alias for `ORDER-0007`.
|
||||||
|
- If an order cannot be identified uniquely and clearly, agents shall stop and ask before proceeding.
|
||||||
|
|
||||||
|
# Toolstack Guides
|
||||||
|
|
||||||
|
Location:
|
||||||
|
|
||||||
|
```text
|
||||||
|
guidance/stacks/
|
||||||
|
```
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- `guidance/stacks/python.md`
|
||||||
|
- `guidance/stacks/typescript.md`
|
||||||
|
- `guidance/stacks/docker.md`
|
||||||
|
- `guidance/stacks/terraform.md`
|
||||||
|
|
||||||
|
Use for:
|
||||||
|
|
||||||
|
- Language or framework expectations.
|
||||||
|
- Tooling and environment conventions.
|
||||||
|
- Build, test, and runtime guidance tied to a specific stack.
|
||||||
|
|
||||||
|
# Coding Conventions
|
||||||
|
|
||||||
|
Location:
|
||||||
|
|
||||||
|
```text
|
||||||
|
guidance/conventions/
|
||||||
|
```
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- `guidance/conventions/naming.md`
|
||||||
|
- `guidance/conventions/testing.md`
|
||||||
|
- `guidance/conventions/review.md`
|
||||||
|
|
||||||
|
Use for:
|
||||||
|
|
||||||
|
- Naming and structure conventions.
|
||||||
|
- Testing expectations.
|
||||||
|
- Code review and quality rules.
|
||||||
|
|
||||||
|
# Recurring Workflows
|
||||||
|
|
||||||
|
Location:
|
||||||
|
|
||||||
|
```text
|
||||||
|
guidance/workflows/
|
||||||
|
```
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- `guidance/workflows/feature-delivery.md`
|
||||||
|
- `guidance/workflows/bugfix.md`
|
||||||
|
- `guidance/workflows/release.md`
|
||||||
|
- `guidance/workflows/incident-response.md`
|
||||||
|
|
||||||
|
Use for:
|
||||||
|
|
||||||
|
- Repeatable task flows.
|
||||||
|
- Checklists for common delivery work.
|
||||||
|
- Operational or maintenance procedures.
|
||||||
|
|
||||||
|
|
||||||
|
<!-- Enable this optional section by removing the outer HTML comment markers from this segment
|
||||||
|
when you want agents to create, update, and consult released-product
|
||||||
|
documentation in `docs/`.
|
||||||
|
|
||||||
|
# Released Product Documentation
|
||||||
|
|
||||||
|
Released-product documentation should live outside the generic sections above.
|
||||||
|
|
||||||
|
Recommended location:
|
||||||
|
|
||||||
|
```text
|
||||||
|
docs/
|
||||||
|
```
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- `docs/readme.md`
|
||||||
|
- `docs/installation.md`
|
||||||
|
- `docs/history.md`
|
||||||
|
|
||||||
|
Agent rules for docs output:
|
||||||
|
|
||||||
|
- Keep content compact but comprehensive.
|
||||||
|
- Write for end users, operators, or other consumers of the released product.
|
||||||
|
- Prefer shipped behavior, supported workflows, and stable terminology over
|
||||||
|
internal implementation detail.
|
||||||
|
- Keep documentation synchronized with released behavior.
|
||||||
|
- Update release history when user-visible changes are shipped.
|
||||||
|
|
||||||
|
Recommended topics:
|
||||||
|
|
||||||
|
- Product overview and intended use.
|
||||||
|
- Installation, configuration, and upgrade guidance.
|
||||||
|
- Usage patterns, operational instructions, and support boundaries.
|
||||||
|
- Compatibility notes, migration notes, and release history.
|
||||||
|
- Troubleshooting and common pitfalls when relevant. -->
|
||||||
|
|
||||||
|
|
||||||
|
<!-- Enable this optional section by removing the outer HTML comment markers from this
|
||||||
|
segment when you want agents to produce and consult workflow output in `process/`.
|
||||||
|
|
||||||
|
# Agent Output In `process/`
|
||||||
|
|
||||||
|
The `process/` directory is primarily for agent output created during
|
||||||
|
delivery, maintenance, and review work.
|
||||||
|
|
||||||
|
Recommended location:
|
||||||
|
|
||||||
|
```text
|
||||||
|
process/
|
||||||
|
```
|
||||||
|
|
||||||
|
Agent rules for process output:
|
||||||
|
|
||||||
|
- Use `process/` for agent-produced artifacts rather than released-product
|
||||||
|
documentation.
|
||||||
|
- Keep entries concise, traceable, and tied to resulting changes.
|
||||||
|
- Treat `process/` as workflow output, not as the primary source of product
|
||||||
|
truth.
|
||||||
|
- Prefer summaries and rationale over raw transcript dumps unless a workflow
|
||||||
|
explicitly requires full prompt history.
|
||||||
|
|
||||||
|
# Agent Change Log
|
||||||
|
|
||||||
|
Location:
|
||||||
|
|
||||||
|
```text
|
||||||
|
process/log.md
|
||||||
|
```
|
||||||
|
|
||||||
|
Use for:
|
||||||
|
|
||||||
|
- Capturing prompts given to agents.
|
||||||
|
- Recording concise explanations of the resulting changes made by agents.
|
||||||
|
- Preserving task-by-task rationale, decisions, and implementation notes.
|
||||||
|
|
||||||
|
# Coding Handbook
|
||||||
|
|
||||||
|
Location:
|
||||||
|
|
||||||
|
```text
|
||||||
|
process/coding-handbook.md
|
||||||
|
```
|
||||||
|
|
||||||
|
Use for:
|
||||||
|
|
||||||
|
- A tutorial-style handbook that explains the programming components used in
|
||||||
|
the project.
|
||||||
|
- Compact but comprehensive technical onboarding material for future
|
||||||
|
contributors.
|
||||||
|
- Written explanations that connect code structure, concepts, and
|
||||||
|
implementation patterns. -->
|
||||||
|
|
||||||
|
|
||||||
|
# Project-Specific Requirements
|
||||||
|
|
||||||
|
|
||||||
|
Project-specific material should live outside the generic sections above.
|
||||||
|
|
||||||
|
Recommended location:
|
||||||
|
|
||||||
|
```text
|
||||||
|
requirements/
|
||||||
|
```
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- `requirements/project.md`
|
||||||
|
- `requirements/architecture.md`
|
||||||
|
- `requirements/decisions.md`
|
||||||
|
- `requirements/domain.md`
|
||||||
|
|
||||||
|
Use for:
|
||||||
|
|
||||||
|
- Product and business requirements.
|
||||||
|
- Project goals and constraints.
|
||||||
|
- Architecture and design decisions.
|
||||||
|
- Domain knowledge that is specific to this repository.
|
||||||
|
|
||||||
|
# Agent-Level Variables
|
||||||
|
|
||||||
|
When present, `requirements/identifiers.yml` is an optional project-specific
|
||||||
|
input that defines agent-level variables for use inside `requirements/` and
|
||||||
|
`guidance/`.
|
||||||
|
|
||||||
|
Variable schema:
|
||||||
|
|
||||||
|
- Use `@{VARIABLE_NAME}` for agent-level variables.
|
||||||
|
- Prefer uppercase snake case names such as `@{PROJECT_ID}` or `@{VENDOR_ID}`.
|
||||||
|
- Do not treat `${...}` as an agent-level variable form; that syntax may appear
|
||||||
|
in Bash or other code and should not be interpreted as agent metadata.
|
||||||
|
|
||||||
|
Scope:
|
||||||
|
|
||||||
|
- The effective scope of `requirements/identifiers.yml` is limited to
|
||||||
|
`requirements/` and `guidance/`.
|
||||||
|
- Definitions from `requirements/identifiers.yml` must not leak into product code.
|
||||||
|
|
||||||
|
Defaults:
|
||||||
|
|
||||||
|
- Default `@{VENDOR_ID}` is `osgw`.
|
||||||
|
- Default `@{PROJECT_ID}` is the current repository directory name.
|
||||||
|
|
||||||
|
Resolution rules:
|
||||||
|
|
||||||
|
- Treat `requirements/identifiers.yml` as optional; when it is absent, agents
|
||||||
|
may still resolve the defaults defined above.
|
||||||
|
- If a variable is used in `requirements/` or `guidance/` and it is not
|
||||||
|
defined in `requirements/identifiers.yml` and does not have a default in this
|
||||||
|
file, agents may stop and report the undefined variable.
|
||||||
|
- Prefer updating duplicated identifier values in `requirements/` and
|
||||||
|
`guidance/` to use the variable schema when that improves consistency.
|
||||||
|
|
||||||
|
# Precedence
|
||||||
|
|
||||||
|
Some precedence levels may be absent because optional levels can remain inside
|
||||||
|
HTML comments. The smaller numeric index wins.
|
||||||
|
|
||||||
|
Apply guidance in this order:
|
||||||
|
|
||||||
|
1. Direct user or task instructions.
|
||||||
|
2. Project-specific documents in `requirements/`.
|
||||||
|
<!-- 3. Released-product documentation in `docs/` when shipped behavior or
|
||||||
|
user-facing expectations are relevant. -->
|
||||||
|
4. Relevant modular guides in `guidance/stacks/`, `guidance/conventions/`, or `guidance/workflows/`.
|
||||||
|
<!-- 5. Agent output in `process/` when prior prompts, rationale, or
|
||||||
|
implementation notes are relevant. -->
|
||||||
|
6. This `AGENTS.md`.
|
||||||
|
|
||||||
|
# Maintenance
|
||||||
|
|
||||||
|
- Keep this file short and stable.
|
||||||
|
- Move detail into dedicated modules when a section becomes too specific or too long.
|
||||||
|
- Add new guideline files only when they solve a recurring need.
|
||||||
|
- Remove outdated references when the repository structure changes.
|
||||||
|
|
||||||
|
# Current Status
|
||||||
|
|
||||||
|
This repository defines the base `AGENTS.md` structure plus project-specific
|
||||||
|
requirements and modular guidance.
|
||||||
|
|
||||||
|
Future project work can add:
|
||||||
|
|
||||||
|
- Reusable modules under `guidance/`
|
||||||
|
- Project-specific documentation under `requirements/`
|
||||||
|
- Optional temporary iteration context in `SCRATCHPAD.md`
|
||||||
|
- Optional released-product documentation under `docs/` by uncommenting its segment
|
||||||
|
- Optional agent output under `process/` by uncommenting its segment
|
||||||
|
- Cross-references from this file once those documents exist
|
||||||
62
SCRATCHPAD.md
Normal file
62
SCRATCHPAD.md
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
<!--
|
||||||
|
|
||||||
|
# Scratchpad
|
||||||
|
|
||||||
|
Temporary information holder for the next iteration. Developers may create or
|
||||||
|
delete this file at any time. Anything durable should move into code, tests, or
|
||||||
|
canonical docs, then this file should disappear.
|
||||||
|
|
||||||
|
|
||||||
|
## Goal
|
||||||
|
|
||||||
|
Use this section for the current slice of work. It should explain what the
|
||||||
|
scratchpad is helping us move forward right now.
|
||||||
|
|
||||||
|
## Settled
|
||||||
|
|
||||||
|
Use this for decisions that are stable enough to guide the next steps, but are
|
||||||
|
still temporary enough to live in the scratchpad for now.
|
||||||
|
|
||||||
|
## Focused Snapshot
|
||||||
|
|
||||||
|
Use an extra section like this only when one slice needs its own compact
|
||||||
|
summary. This is useful when a specific API, boundary, or model was recently
|
||||||
|
recreated and should be captured clearly.
|
||||||
|
|
||||||
|
|
||||||
|
## Open
|
||||||
|
|
||||||
|
Use this for unresolved questions, design choices, and risks that still need a
|
||||||
|
decision.
|
||||||
|
|
||||||
|
## Sketches
|
||||||
|
|
||||||
|
Use this for rough candidate structures, names, or shapes. Keep it explicit
|
||||||
|
that these are sketches, not committed architecture.
|
||||||
|
|
||||||
|
|
||||||
|
## Gaps Right Now
|
||||||
|
|
||||||
|
Use this for concrete missing pieces in the current repo state. This section
|
||||||
|
should describe what is absent or incomplete, not broad future ambitions.
|
||||||
|
|
||||||
|
## Next
|
||||||
|
|
||||||
|
Use this for the immediate sequence of work. It should be short, ordered, and
|
||||||
|
biased toward the next deliverable rather than a long roadmap.
|
||||||
|
|
||||||
|
## Delete When
|
||||||
|
|
||||||
|
Use this to define when the scratchpad should disappear. That keeps it clearly
|
||||||
|
temporary and helps prevent it from turning into shadow documentation.
|
||||||
|
|
||||||
|
|
||||||
|
## Suggested Style
|
||||||
|
|
||||||
|
- Prefer short bullets over long prose.
|
||||||
|
- Keep facts, questions, and rough sketches in separate sections.
|
||||||
|
- Add custom sections only when they help the next iteration move faster.
|
||||||
|
- Move durable outcomes out of the scratchpad once they stop being temporary.
|
||||||
|
|
||||||
|
|
||||||
|
-->
|
||||||
28
guidance/workflow/optional/lean-interface-iteration.md
Normal file
28
guidance/workflow/optional/lean-interface-iteration.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# Lean Interface Iteration
|
||||||
|
|
||||||
|
Rule set name: `lean-interface-iteration`
|
||||||
|
|
||||||
|
Rule set ID: `LII`
|
||||||
|
|
||||||
|
Status: optional, prompt-activated only
|
||||||
|
|
||||||
|
Trigger examples:
|
||||||
|
|
||||||
|
- `Apply the lean-interface-iteration rules.`
|
||||||
|
- `Apply LII rules.`
|
||||||
|
|
||||||
|
LII-0001: Apply this rule set only when it is explicitly requested in the prompt.
|
||||||
|
|
||||||
|
LII-0002: The target of work under this rule set is the iterated product state for the addressed iteration only.
|
||||||
|
|
||||||
|
LII-0003: Optimize the addressed interface toward the leanest and least complex model that still satisfies the iteration order.
|
||||||
|
|
||||||
|
LII-0004: Backward compatibility, legacy aliases, and compatibility shims are not required unless the prompt explicitly asks to preserve them.
|
||||||
|
|
||||||
|
LII-0005: Prefer one authoritative interface over multiple overlapping parameters, flags, or naming variants.
|
||||||
|
|
||||||
|
LII-0006: Remove or avoid transitional interface layers when they are not required by the addressed iteration order.
|
||||||
|
|
||||||
|
LII-0007: Update affected tests, guidance, requirements, and documentation so they describe the simplified interface model rather than a mixed legacy-and-new model.
|
||||||
|
|
||||||
|
LII-0008: Never change behavior, interfaces, or surrounding areas that are not addressed by the current iteration order.
|
||||||
56
guidance/workflow/optional/preparation-script-design.md
Normal file
56
guidance/workflow/optional/preparation-script-design.md
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
# Preparation Script Design
|
||||||
|
|
||||||
|
Rule set name: `preparation-script-design`
|
||||||
|
|
||||||
|
Rule set ID: `PSD`
|
||||||
|
|
||||||
|
Status: optional, prompt-activated only
|
||||||
|
|
||||||
|
Trigger examples:
|
||||||
|
|
||||||
|
- `Apply the preparation-script-design rules.`
|
||||||
|
- `Apply PSD rules.`
|
||||||
|
|
||||||
|
PSD-0001: Apply this rule set only when it is explicitly requested in the prompt.
|
||||||
|
|
||||||
|
PSD-0002: Use this rule set for scripts whose purpose is to prepare, verify, or expose a local development or automation environment rather than to perform product runtime behavior.
|
||||||
|
|
||||||
|
PSD-0003: Keep a preparation script focused on environment readiness, dependency installation, local helper exposure, and clear verification output; do not mix unrelated product logic into the script.
|
||||||
|
|
||||||
|
PSD-0004: Design the script to be idempotent so repeated runs converge on the same prepared state without unnecessary reinstallation or destructive side effects.
|
||||||
|
|
||||||
|
PSD-0005: Provide a verification-only mode such as `--check` that reports readiness without installing, modifying, or creating dependencies.
|
||||||
|
|
||||||
|
PSD-0006: Separate component checks from installation steps so the script can report what is missing before or after attempted remediation.
|
||||||
|
|
||||||
|
PSD-0007: Group required capabilities into clear purpose-oriented sections such as support toolchains, local package bundles, generated environment helpers, or other relevant readiness areas instead of presenting one undifferentiated dependency list.
|
||||||
|
|
||||||
|
PSD-0008: Prefer explicit per-component check helpers over opaque one-shot checks so failures remain traceable and easy to extend.
|
||||||
|
|
||||||
|
PSD-0009: Generate or update environment helper files only when they provide a stable, reusable way to expose repo-local or workspace-local tools, paths, or environment variables.
|
||||||
|
|
||||||
|
PSD-0010: Generated environment helper files shall be safe to source multiple times and should avoid duplicating path entries or clobbering unrelated user environment state.
|
||||||
|
|
||||||
|
PSD-0011: When a preparation flow seeds optional user-owned files such as config templates, do so non-destructively by creating them only when absent unless the prompt explicitly requests overwrite behavior.
|
||||||
|
|
||||||
|
PSD-0012: Report status in a concise scan-friendly line format of the shape `[status] Label: detail`, where the label names the checked component and the detail string stays short and specific.
|
||||||
|
|
||||||
|
PSD-0013: Prefer a small canonical status vocabulary in those report lines, with `ok` for satisfied checks, `warn` for non-blocking gaps, and a failure status such as `failed` for blocking or unsuccessful states.
|
||||||
|
|
||||||
|
PSD-0014: When a preparation script uses terminal colors in its status output, apply a consistent severity mapping so `ok` is green, `warn` is yellow, and all other status levels are red.
|
||||||
|
|
||||||
|
PSD-0015: In bracketed status markers such as `[ok]` or `[warn]`, keep the square brackets uncolored and apply the severity color only to the inner status text.
|
||||||
|
|
||||||
|
PSD-0016: Colorized status output shall degrade safely in non-terminal or non-color contexts so the script remains readable and automation-friendly without ANSI support.
|
||||||
|
|
||||||
|
PSD-0017: End with an explicit readiness conclusion that distinguishes between successful preparation, incomplete prerequisites, and failed installation attempts.
|
||||||
|
|
||||||
|
PSD-0018: Installation logic should use the narrowest supported platform-specific package-manager actions necessary for the declared scope and should fail clearly when no supported installation path is available.
|
||||||
|
|
||||||
|
PSD-0019: Treat repo-local helper tooling and local package installation boundaries explicitly rather than assuming global installs, especially when the prepared environment is intended to be reproducible.
|
||||||
|
|
||||||
|
PSD-0020: Keep the script suitable for both interactive local developer use and non-interactive automation checks by avoiding prompts during normal execution unless the prompt explicitly requires interactivity.
|
||||||
|
|
||||||
|
PSD-0021: When a script depends on generated helper files or adjacent validation helpers, update those supporting files only as needed to keep the preparation flow coherent and usable.
|
||||||
|
|
||||||
|
PSD-0022: Verify shell syntax after changes and, when feasible, run a dry readiness check so the resulting preparation flow is validated rather than only written.
|
||||||
97
requirements/architecture.md
Normal file
97
requirements/architecture.md
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
# Architecture
|
||||||
|
|
||||||
|
## Architecture Goals
|
||||||
|
|
||||||
|
- Keep the tool small, local, and easy to reason about.
|
||||||
|
- Separate media inspection, stored normalization rules, and conversion execution clearly enough that users can inspect and adjust behavior.
|
||||||
|
- Favor explicit local state and deterministic rule application over opaque automation.
|
||||||
|
- Make external runtime dependencies and platform assumptions visible.
|
||||||
|
|
||||||
|
## System Context
|
||||||
|
|
||||||
|
- Primary actors:
|
||||||
|
- Local operator running the CLI.
|
||||||
|
- Local operator using the Textual TUI to inspect files and maintain rules.
|
||||||
|
- External systems:
|
||||||
|
- `ffprobe` for media introspection.
|
||||||
|
- `ffmpeg` for conversion and extraction.
|
||||||
|
- TMDB API for optional show and episode metadata.
|
||||||
|
- Local filesystem for source media, generated outputs, subtitles, logs, config, and database files.
|
||||||
|
- Data entering the system:
|
||||||
|
- Media container and stream metadata from source files.
|
||||||
|
- Regex patterns and per-show normalization rules entered in the TUI.
|
||||||
|
- Optional config values from `~/.local/etc/ffx.json`.
|
||||||
|
- Optional TMDB identifiers and CLI overrides.
|
||||||
|
- Optional external subtitle files.
|
||||||
|
- Data leaving the system:
|
||||||
|
- Normalized output media files.
|
||||||
|
- Extracted stream files from unmux operations.
|
||||||
|
- SQLite rows representing shows, patterns, tracks, tags, shifted seasons, and properties.
|
||||||
|
- Local log output and console messages.
|
||||||
|
|
||||||
|
## High-Level Building Blocks
|
||||||
|
|
||||||
|
- Frontend, CLI, API, or worker:
|
||||||
|
- A Click-based CLI in [`src/ffx/ffx.py`](/home/osgw/.local/src/codex/ffx/src/ffx/ffx.py).
|
||||||
|
- A Textual terminal UI rooted in [`src/ffx/ffx_app.py`](/home/osgw/.local/src/codex/ffx/src/ffx/ffx_app.py) with screens for shows, patterns, file inspection, tracks, tags, and shifted seasons.
|
||||||
|
- Core business logic:
|
||||||
|
- Descriptor objects model media files, shows, and tracks.
|
||||||
|
- Controllers encapsulate CRUD operations and workflow orchestration for shows, patterns, tags, tracks, season shifts, configuration, and conversion.
|
||||||
|
- `MediaDescriptorChangeSet` computes differences between a file and its stored target schema to drive metadata and disposition updates.
|
||||||
|
- Storage:
|
||||||
|
- SQLite via SQLAlchemy ORM, with schema rooted in shows, patterns, tracks, media tags, track tags, shifted seasons, and generic properties.
|
||||||
|
- A configuration JSON file supplies optional path, metadata-filtering, and filename-template settings.
|
||||||
|
- Integration adapters:
|
||||||
|
- Process execution wrapper for `ffmpeg`, `ffprobe`, `nice`, and `cpulimit`.
|
||||||
|
- HTTP adapter for TMDB via `requests`.
|
||||||
|
|
||||||
|
## Data And Interface Notes
|
||||||
|
|
||||||
|
- Key entities or records:
|
||||||
|
- `Show`: canonical TV show metadata plus digit-formatting rules for generated filenames.
|
||||||
|
- `Pattern`: regex rule tying filenames to one show and one target media schema.
|
||||||
|
- `Track` and `TrackTag`: persisted target stream layout, codec, dispositions, audio layout, and stream-level tags.
|
||||||
|
- `MediaTag`: persisted container-level metadata for a pattern.
|
||||||
|
- `ShiftedSeason`: mapping from source numbering ranges to adjusted season and episode numbers.
|
||||||
|
- `Property`: internal key-value storage currently used for database versioning.
|
||||||
|
- External interfaces:
|
||||||
|
- CLI commands for conversion, inspection, extraction, and crop detection.
|
||||||
|
- TUI workflows for rule authoring and rule maintenance.
|
||||||
|
- Environment variable `TMDB_API_KEY` for TMDB access.
|
||||||
|
- Config keys `databasePath`, `logDirectory`, and `outputFilenameTemplate`, plus optional metadata-filter rules.
|
||||||
|
- Validation rules:
|
||||||
|
- Only supported media-file extensions are accepted for conversion.
|
||||||
|
- Stored database version must match the runtime-required version.
|
||||||
|
- A normalized descriptor may have at most one default and one forced stream per relevant track type.
|
||||||
|
- Stored target tracks must refer to valid source tracks of matching types.
|
||||||
|
- Shifted-season ranges are intended not to overlap for the same show and season.
|
||||||
|
- TMDB lookups require a show ID and season and episode numbers.
|
||||||
|
- Error-handling approach:
|
||||||
|
- User-facing operational failures are raised as `click.ClickException` or warnings.
|
||||||
|
- Ambiguous default and forced stream states trigger prompts unless `--no-prompt` is set, in which case the command fails fast.
|
||||||
|
- External-process failures and invalid media are surfaced through logs and command errors rather than retries, except for TMDB rate-limit retries.
|
||||||
|
|
||||||
|
## Deployment And Operations
|
||||||
|
|
||||||
|
- Runtime environment:
|
||||||
|
- Local Python environment with the package installed and `ffmpeg`, `ffprobe`, `nice`, and `cpulimit` available on `PATH`.
|
||||||
|
- Deployment shape:
|
||||||
|
- Single-process command execution on demand; no daemon, queue, or network service of its own.
|
||||||
|
- Secrets and configuration handling:
|
||||||
|
- TMDB secret is read from `TMDB_API_KEY`.
|
||||||
|
- User config is read from `~/.local/etc/ffx.json`.
|
||||||
|
- Database path may also be overridden per command via `--database-file`.
|
||||||
|
- Logging and monitoring approach:
|
||||||
|
- File and console logging configured per invocation.
|
||||||
|
- Default log file path is `~/.local/var/log/ffx.log`.
|
||||||
|
- No dedicated monitoring integration is present.
|
||||||
|
|
||||||
|
## Open Technical Questions
|
||||||
|
|
||||||
|
- Question: Should Linux-specific assumptions such as `/dev/null`, `nice`, `cpulimit`, and `~/.local` remain part of the supported-platform contract?
|
||||||
|
- Risk: Portability and operational behavior are underspecified for non-Linux environments.
|
||||||
|
- Next decision needed: Either document Linux-like systems as the official support boundary or refactor the process and path handling for broader portability.
|
||||||
|
|
||||||
|
- Question: Should placeholder TUI surfaces such as settings and help become part of the required product surface or stay explicitly out of scope?
|
||||||
|
- Risk: The UI appears broader than the actually finished feature set.
|
||||||
|
- Next decision needed: Either remove or complete placeholder screens and update requirements accordingly.
|
||||||
101
requirements/project.md
Normal file
101
requirements/project.md
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
## Purpose And Scope
|
||||||
|
|
||||||
|
- Project name: FFX
|
||||||
|
- User problem: TV episode files from mixed sources arrive with inconsistent codecs, stream metadata, subtitle layouts, season and episode numbering, and output filenames, which makes them awkward to archive and use in media-player applications.
|
||||||
|
- Target users: Individual operators curating a local TV media library on a workstation, especially users willing to define normalization rules per show.
|
||||||
|
- Success outcome: A user can inspect source files, define reusable show and pattern rules, and produce output files whose streams, metadata, and filenames follow a predictable schema for web playback and library import.
|
||||||
|
- Out of scope:
|
||||||
|
- Multi-user or hosted service workflows.
|
||||||
|
- General movie-library management.
|
||||||
|
- Distributed transcoding or remote job orchestration.
|
||||||
|
- Broad media-server administration beyond file preparation.
|
||||||
|
|
||||||
|
## Required Product
|
||||||
|
|
||||||
|
- Deliverable type: Installable Python command-line application with a Textual terminal UI for inspection and rule editing.
|
||||||
|
- Core capabilities:
|
||||||
|
- Maintain an SQLite-backed database of shows, filename-matching patterns, per-pattern stream layouts and metadata tags, and optional season-shift rules.
|
||||||
|
- Inspect existing media files through `ffprobe` and compare discovered stream metadata with stored normalization rules.
|
||||||
|
- Convert media files through `ffmpeg` into a normalized output layout, including video recoding, audio transcoding to Opus, metadata cleanup and rewrite, and controlled disposition flags.
|
||||||
|
- Build output filenames from detected or configured show, season, and episode information, optionally enriched from TMDB and a configurable Jinja-style filename template.
|
||||||
|
- Support auxiliary file operations such as subtitle import, unmuxing, crop detection, and rename-only runs.
|
||||||
|
- Supported environments:
|
||||||
|
- Local execution on a Python-capable workstation.
|
||||||
|
- Best-supported on Linux-like systems because the implementation assumes `~/.local`, `/dev/null`, `nice`, and `cpulimit`.
|
||||||
|
- Requires `ffmpeg`, `ffprobe`, and `cpulimit` on `PATH`.
|
||||||
|
- Operational owner: The local user running the tool and maintaining its config, database, and external tooling.
|
||||||
|
|
||||||
|
## Suggested User Stories
|
||||||
|
|
||||||
|
- As a library maintainer, I want to define show-specific matching rules once so that future source files can be normalized automatically.
|
||||||
|
- As an operator, I want to inspect a file before conversion so that I can compare its actual streams and tags against the stored target schema.
|
||||||
|
- As a user preparing web-playback files, I want to recode video and audio with a small set of predictable options so that results are compatible and consistently named.
|
||||||
|
- As a user dealing with nonstandard releases, I want CLI overrides for language, title, stream order, default and forced tracks, and season and episode data so that one-off fixes do not require database edits first.
|
||||||
|
- As a user importing anime or other shifted numbering schemes, I want season and episode offsets per show so that generated filenames align with TMDB and media-library expectations.
|
||||||
|
|
||||||
|
## Functional Requirements
|
||||||
|
|
||||||
|
- The system shall provide a CLI entrypoint named `ffx` with commands for `convert`, `inspect`, `shows`, `unmux`, `cropdetect`, `version`, and `help`.
|
||||||
|
- The system shall persist reusable normalization rules in SQLite for:
|
||||||
|
- shows and show formatting digits,
|
||||||
|
- regex-based filename patterns,
|
||||||
|
- per-pattern media tags,
|
||||||
|
- per-pattern stream definitions,
|
||||||
|
- shifted-season mappings,
|
||||||
|
- internal database version properties.
|
||||||
|
- The system shall inspect source media using `ffprobe` and derive a structured description of container metadata and streams.
|
||||||
|
- The system shall optionally open a Textual UI to browse shows, inspect files, and create, edit, or delete shows, patterns, stream definitions, tags, and shifted-season rules.
|
||||||
|
- The system shall match filenames against stored regex patterns to decide whether an input file should inherit a target stream and metadata schema.
|
||||||
|
- The system shall convert supported input files (`mkv`, `mp4`, `avi`, `flv`, `webm`) with `ffmpeg`, supporting at least:
|
||||||
|
- VP9, AV1, and H.264 video encoding,
|
||||||
|
- Opus audio encoding with bitrate selection based on channel layout,
|
||||||
|
- metadata and disposition rewriting,
|
||||||
|
- optional crop detection and crop application,
|
||||||
|
- optional deinterlacing and denoising,
|
||||||
|
- optional subtitle import from external files,
|
||||||
|
- rename-only copy mode.
|
||||||
|
- The system shall support optional TMDB lookups to resolve show names, years, and episode titles when a show ID, season, and episode are available.
|
||||||
|
- The system shall generate output filenames from show metadata, season and episode indices, and episode names using the configured filename template.
|
||||||
|
- The system shall allow CLI overrides for stream languages, stream titles, default and forced tracks, stream order, TMDB show and episode data, output directory, label prefix, and processing resource limits.
|
||||||
|
- The system shall support extracting streams into separate files via `unmux` and reporting suggested crop parameters via `cropdetect`.
|
||||||
|
- The system shall handle invalid input and system failures gracefully by logging warnings or raising `click` errors for missing files, invalid media, missing TMDB credentials, incompatible database versions, and ambiguous track dispositions when prompting is disabled.
|
||||||
|
|
||||||
|
## Quality Requirements
|
||||||
|
|
||||||
|
- The system should stay understandable as a small local tool: controllers, descriptors, models, and screens should remain separate enough for contributors to trace a workflow end to end.
|
||||||
|
- The system should produce predictable output for the same database rules, CLI overrides, and source files.
|
||||||
|
- The system should preserve a lightweight operational footprint: local SQLite state, local log file, no mandatory background services.
|
||||||
|
- The system should be testable through the existing combinatorial CLI-oriented test harness and through isolated logic in descriptors and controllers.
|
||||||
|
- The system should expose enough logging to diagnose failed probes, failed conversions, and rule mismatches without requiring a debugger.
|
||||||
|
|
||||||
|
## Constraints And Assumptions
|
||||||
|
|
||||||
|
- Technology constraints:
|
||||||
|
- Python package built with setuptools.
|
||||||
|
- Primary libraries: `click`, `textual`, `sqlalchemy`, `jinja2`, `requests`.
|
||||||
|
- Conversion and inspection rely on external executables rather than pure-Python media libraries.
|
||||||
|
- Hosting or infrastructure constraints:
|
||||||
|
- Intended for local execution, not server deployment.
|
||||||
|
- Stores default state in `~/.local/etc/ffx.json`, `~/.local/var/ffx/ffx.db`, and `~/.local/var/log/ffx.log`.
|
||||||
|
- Timeline constraints:
|
||||||
|
- The current implemented scope reflects a compact alpha release stream up to version `0.2.3`.
|
||||||
|
- Team capacity assumptions:
|
||||||
|
- Maintained as a small codebase where simple patterns and direct controller logic are preferred over framework-heavy abstractions.
|
||||||
|
- Third-party dependencies:
|
||||||
|
- `ffmpeg`, `ffprobe`, and `cpulimit`.
|
||||||
|
- TMDB API access through `TMDB_API_KEY` for metadata enrichment.
|
||||||
|
|
||||||
|
## Acceptance Scope
|
||||||
|
|
||||||
|
- First release boundary:
|
||||||
|
- Local installation through `pip`.
|
||||||
|
- Working SQLite-backed rule storage.
|
||||||
|
- Functional CLI conversion and inspection workflows.
|
||||||
|
- Textual CRUD flows for shows, patterns, tags, tracks, and shifted seasons.
|
||||||
|
- TMDB-assisted filename generation, subtitle import, season shifting, database versioning, and configurable output filename templating.
|
||||||
|
- Excluded follow-up ideas:
|
||||||
|
- Completing placeholder screens such as settings and help.
|
||||||
|
- Hardening platform portability beyond Linux-like systems.
|
||||||
|
- Broader media types, richer release packaging, and production-grade background processing.
|
||||||
|
- Demonstration scenario:
|
||||||
|
- Inspect a TV episode file, define or update the matching show and pattern in the TUI, then run `ffx convert` so the result uses the stored stream schema, optional TMDB episode naming, and a normalized output filename.
|
||||||
@@ -9,6 +9,7 @@ class AudioLayout(Enum):
|
|||||||
LAYOUT_7_1 = {"label": "7.1", "index": 4} #TODO: Does this exist?
|
LAYOUT_7_1 = {"label": "7.1", "index": 4} #TODO: Does this exist?
|
||||||
|
|
||||||
LAYOUT_6CH = {"label": "6ch", "index": 5}
|
LAYOUT_6CH = {"label": "6ch", "index": 5}
|
||||||
|
LAYOUT_5_0 = {"label": "5.0(side)", "index": 6}
|
||||||
|
|
||||||
LAYOUT_UNDEFINED = {"label": "undefined", "index": 0}
|
LAYOUT_UNDEFINED = {"label": "undefined", "index": 0}
|
||||||
|
|
||||||
@@ -29,6 +30,15 @@ class AudioLayout(Enum):
|
|||||||
except:
|
except:
|
||||||
return AudioLayout.LAYOUT_UNDEFINED
|
return AudioLayout.LAYOUT_UNDEFINED
|
||||||
|
|
||||||
|
# @staticmethod
|
||||||
|
# def fromIndex(index : int):
|
||||||
|
# try:
|
||||||
|
# target_index = int(index)
|
||||||
|
# except (TypeError, ValueError):
|
||||||
|
# return AudioLayout.LAYOUT_UNDEFINED
|
||||||
|
# return next((a for a in AudioLayout if a.value['index'] == target_index),
|
||||||
|
# AudioLayout.LAYOUT_UNDEFINED)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def fromIndex(index : int):
|
def fromIndex(index : int):
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ DEFAULT_AC3_BANDWIDTH = "256"
|
|||||||
DEFAULT_DTS_BANDWIDTH = "320"
|
DEFAULT_DTS_BANDWIDTH = "320"
|
||||||
DEFAULT_7_1_BANDWIDTH = "384"
|
DEFAULT_7_1_BANDWIDTH = "384"
|
||||||
|
|
||||||
DEFAULT_CROP_START = 60
|
DEFAULT_cut_start = 60
|
||||||
DEFAULT_CROP_LENGTH = 180
|
DEFAULT_cut_length = 180
|
||||||
|
|
||||||
DEFAULT_OUTPUT_FILENAME_TEMPLATE = '{{ ffx_show_name }} - {{ ffx_index }}{{ ffx_index_separator }}{{ ffx_episode_name }}{{ ffx_indicator_separator }}{{ ffx_indicator }}'
|
DEFAULT_OUTPUT_FILENAME_TEMPLATE = '{{ ffx_show_name }} - {{ ffx_index }}{{ ffx_index_separator }}{{ ffx_episode_name }}{{ ffx_indicator_separator }}{{ ffx_indicator }}'
|
||||||
|
|||||||
284
src/ffx/ffx.py
284
src/ffx/ffx.py
@@ -1,6 +1,6 @@
|
|||||||
#! /usr/bin/python3
|
#! /usr/bin/python3
|
||||||
|
|
||||||
import os, click, time, logging
|
import os, click, time, logging, shutil, subprocess
|
||||||
|
|
||||||
from ffx.configuration_controller import ConfigurationController
|
from ffx.configuration_controller import ConfigurationController
|
||||||
|
|
||||||
@@ -30,7 +30,9 @@ from ffx.constants import DEFAULT_STEREO_BANDWIDTH, DEFAULT_AC3_BANDWIDTH, DEFAU
|
|||||||
from ffx.filter.quality_filter import QualityFilter
|
from ffx.filter.quality_filter import QualityFilter
|
||||||
from ffx.filter.preset_filter import PresetFilter
|
from ffx.filter.preset_filter import PresetFilter
|
||||||
|
|
||||||
|
from ffx.filter.crop_filter import CropFilter
|
||||||
from ffx.filter.nlmeans_filter import NlmeansFilter
|
from ffx.filter.nlmeans_filter import NlmeansFilter
|
||||||
|
from ffx.filter.deinterlace_filter import DeinterlaceFilter
|
||||||
|
|
||||||
from ffx.constants import VERSION
|
from ffx.constants import VERSION
|
||||||
|
|
||||||
@@ -47,6 +49,11 @@ def ffx(ctx, database_file, verbose, dry_run):
|
|||||||
|
|
||||||
ctx.obj = {}
|
ctx.obj = {}
|
||||||
|
|
||||||
|
if ctx.invoked_subcommand in ('setup_dependencies', 'upgrade'):
|
||||||
|
ctx.obj['dry_run'] = dry_run
|
||||||
|
ctx.obj['verbosity'] = verbose
|
||||||
|
return
|
||||||
|
|
||||||
ctx.obj['config'] = ConfigurationController()
|
ctx.obj['config'] = ConfigurationController()
|
||||||
|
|
||||||
ctx.obj['database'] = databaseContext(databasePath=database_file
|
ctx.obj['database'] = databaseContext(databasePath=database_file
|
||||||
@@ -95,6 +102,82 @@ def help():
|
|||||||
click.echo(f"Usage: ffx [input file] [output file] [vp9|av1] [q=[nn[,nn,...]]] [p=nn] [a=nnn[k]] [ac3=nnn[k]] [dts=nnn[k]] [crop]")
|
click.echo(f"Usage: ffx [input file] [output file] [vp9|av1] [q=[nn[,nn,...]]] [p=nn] [a=nnn[k]] [ac3=nnn[k]] [dts=nnn[k]] [crop]")
|
||||||
|
|
||||||
|
|
||||||
|
def getRepoRootPath():
|
||||||
|
currentFilePath = os.path.abspath(__file__)
|
||||||
|
return os.path.dirname(os.path.dirname(os.path.dirname(currentFilePath)))
|
||||||
|
|
||||||
|
|
||||||
|
def getPrepareScriptPath():
|
||||||
|
return os.path.join(getRepoRootPath(), 'tools', 'prepare.sh')
|
||||||
|
|
||||||
|
|
||||||
|
def getBundleVenvDirectory():
|
||||||
|
return os.path.join(os.path.expanduser('~'), '.local', 'share', 'ffx.venv')
|
||||||
|
|
||||||
|
|
||||||
|
def getBundlePipPath():
|
||||||
|
return os.path.join(getBundleVenvDirectory(), 'bin', 'pip')
|
||||||
|
|
||||||
|
|
||||||
|
def getBundleRepoPath():
|
||||||
|
return getRepoRootPath()
|
||||||
|
|
||||||
|
|
||||||
|
@ffx.command(name='setup_dependencies')
|
||||||
|
@click.pass_context
|
||||||
|
@click.option('--check', is_flag=True, default=False, help='Only verify dependency readiness')
|
||||||
|
@click.argument('prepare_args', nargs=-1, type=click.UNPROCESSED)
|
||||||
|
def setup_dependencies(ctx, check, prepare_args):
|
||||||
|
prepareScriptPath = getPrepareScriptPath()
|
||||||
|
|
||||||
|
if not os.path.isfile(prepareScriptPath):
|
||||||
|
raise click.ClickException(f"Preparation script not found at {prepareScriptPath}")
|
||||||
|
|
||||||
|
commandSequence = ['bash', prepareScriptPath]
|
||||||
|
|
||||||
|
if check:
|
||||||
|
commandSequence.append('--check')
|
||||||
|
|
||||||
|
commandSequence += list(prepare_args)
|
||||||
|
|
||||||
|
if ctx.obj.get('dry_run', False):
|
||||||
|
click.echo(' '.join(commandSequence))
|
||||||
|
return
|
||||||
|
|
||||||
|
completed = subprocess.run(commandSequence)
|
||||||
|
ctx.exit(completed.returncode)
|
||||||
|
|
||||||
|
|
||||||
|
@ffx.command(name='upgrade')
|
||||||
|
@click.pass_context
|
||||||
|
@click.argument('branch', required=False, default='main')
|
||||||
|
def upgrade(ctx, branch):
|
||||||
|
bundleRepoPath = getBundleRepoPath()
|
||||||
|
bundlePipPath = getBundlePipPath()
|
||||||
|
|
||||||
|
if not os.path.isdir(bundleRepoPath):
|
||||||
|
raise click.ClickException(f"Bundle repository not found at {bundleRepoPath}")
|
||||||
|
|
||||||
|
if not os.path.isfile(bundlePipPath):
|
||||||
|
raise click.ClickException(f"Bundle pip not found at {bundlePipPath}")
|
||||||
|
|
||||||
|
commandSequences = [
|
||||||
|
['git', 'checkout', branch],
|
||||||
|
['git', 'pull'],
|
||||||
|
[bundlePipPath, 'install', '--editable', '.'],
|
||||||
|
]
|
||||||
|
|
||||||
|
if ctx.obj.get('dry_run', False):
|
||||||
|
for commandSequence in commandSequences:
|
||||||
|
click.echo(f"(cd {bundleRepoPath} && {' '.join(commandSequence)})")
|
||||||
|
return
|
||||||
|
|
||||||
|
for commandSequence in commandSequences:
|
||||||
|
completed = subprocess.run(commandSequence, cwd=bundleRepoPath)
|
||||||
|
if completed.returncode != 0:
|
||||||
|
ctx.exit(completed.returncode)
|
||||||
|
|
||||||
|
|
||||||
@ffx.command()
|
@ffx.command()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
@click.argument('filename', nargs=1)
|
@click.argument('filename', nargs=1)
|
||||||
@@ -181,11 +264,12 @@ def unmux(ctx,
|
|||||||
else:
|
else:
|
||||||
ctx.obj['logger'].info(f"\nUnmuxing file {fp.getFilename()}\n")
|
ctx.obj['logger'].info(f"\nUnmuxing file {fp.getFilename()}\n")
|
||||||
|
|
||||||
for trackDescriptor in sourceMediaDescriptor.getAllTrackDescriptors():
|
# for trackDescriptor in sourceMediaDescriptor.getAllTrackDescriptors():
|
||||||
|
for trackDescriptor in sourceMediaDescriptor.getTrackDescriptors():
|
||||||
|
|
||||||
if trackDescriptor.getType() == TrackType.SUBTITLE or not subtitles_only:
|
if trackDescriptor.getType() == TrackType.SUBTITLE or not subtitles_only:
|
||||||
|
|
||||||
# SEASON_EPISODE_STREAM_LANGUAGE_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
|
# SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
|
||||||
targetPrefix = f"{targetLabel}{targetIndicator}_{trackDescriptor.getIndex()}_{trackDescriptor.getLanguage().threeLetter()}"
|
targetPrefix = f"{targetLabel}{targetIndicator}_{trackDescriptor.getIndex()}_{trackDescriptor.getLanguage().threeLetter()}"
|
||||||
|
|
||||||
td: TrackDisposition
|
td: TrackDisposition
|
||||||
@@ -211,6 +295,38 @@ def unmux(ctx,
|
|||||||
ctx.obj['logger'].warning(f"Skipping File {sourcePath} ({ex})")
|
ctx.obj['logger'].warning(f"Skipping File {sourcePath} ({ex})")
|
||||||
|
|
||||||
|
|
||||||
|
@ffx.command()
|
||||||
|
@click.pass_context
|
||||||
|
|
||||||
|
@click.argument('paths', nargs=-1)
|
||||||
|
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
|
||||||
|
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
|
||||||
|
def cropdetect(ctx,
|
||||||
|
paths,
|
||||||
|
nice,
|
||||||
|
cpu):
|
||||||
|
|
||||||
|
existingSourcePaths = [p for p in paths if os.path.isfile(p)]
|
||||||
|
ctx.obj['logger'].debug(f"\nUnmuxing {len(existingSourcePaths)} files")
|
||||||
|
|
||||||
|
ctx.obj['resource_limits'] = {}
|
||||||
|
ctx.obj['resource_limits']['niceness'] = nice
|
||||||
|
ctx.obj['resource_limits']['cpu_percent'] = cpu
|
||||||
|
|
||||||
|
for sourcePath in existingSourcePaths:
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
fp = FileProperties(ctx.obj, sourcePath)
|
||||||
|
cropParams = fp.findCropParams()
|
||||||
|
|
||||||
|
click.echo(cropParams)
|
||||||
|
|
||||||
|
except Exception as ex:
|
||||||
|
ctx.obj['logger'].warning(f"Skipping File {sourcePath} ({ex})")
|
||||||
|
|
||||||
|
|
||||||
@ffx.command()
|
@ffx.command()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
|
|
||||||
@@ -272,9 +388,9 @@ def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
|
|||||||
|
|
||||||
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
|
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
|
||||||
|
|
||||||
@click.option('-v', '--video-encoder', type=str, default=FfxController.DEFAULT_VIDEO_ENCODER, help=f"Target video encoder (vp9 or av1)", show_default=True)
|
@click.option('-v', '--video-encoder', type=str, default=FfxController.DEFAULT_VIDEO_ENCODER, help=f"Target video encoder (vp9, av1 or h264)", show_default=True)
|
||||||
|
|
||||||
@click.option('-q', '--quality', type=str, default="", help=f"Quality settings to be used with VP9 encoder")
|
@click.option('-q', '--quality', type=str, default="", help=f"Quality settings to be used with VP9/H264 encoder")
|
||||||
@click.option('-p', '--preset', type=str, default="", help=f"Quality preset to be used with AV1 encoder")
|
@click.option('-p', '--preset', type=str, default="", help=f"Quality preset to be used with AV1 encoder")
|
||||||
|
|
||||||
@click.option('-a', '--stereo-bitrate', type=int, default=DEFAULT_STEREO_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode stereo audio streams", show_default=True)
|
@click.option('-a', '--stereo-bitrate', type=int, default=DEFAULT_STEREO_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode stereo audio streams", show_default=True)
|
||||||
@@ -296,10 +412,13 @@ def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
|
|||||||
|
|
||||||
@click.option('--rearrange-streams', type=str, default="", help='Rearrange output streams order. Use format comma separated integers')
|
@click.option('--rearrange-streams', type=str, default="", help='Rearrange output streams order. Use format comma separated integers')
|
||||||
|
|
||||||
@click.option("--crop", is_flag=False, flag_value="default", default="none")
|
@click.option("--crop", is_flag=False, flag_value="auto", default="none")
|
||||||
|
@click.option("--cut", is_flag=False, flag_value="default", default="none")
|
||||||
|
|
||||||
@click.option("--output-directory", type=str, default='')
|
@click.option("--output-directory", type=str, default='')
|
||||||
|
|
||||||
|
@click.option("--deinterlace", is_flag=False, flag_value="default", default="none")
|
||||||
|
|
||||||
@click.option("--denoise", is_flag=False, flag_value="default", default="none")
|
@click.option("--denoise", is_flag=False, flag_value="default", default="none")
|
||||||
@click.option("--denoise-use-hw", is_flag=True, default=False)
|
@click.option("--denoise-use-hw", is_flag=True, default=False)
|
||||||
@click.option('--denoise-strength', type=str, default='', help='Denoising strength, more blurring vs more details.')
|
@click.option('--denoise-strength', type=str, default='', help='Denoising strength, more blurring vs more details.')
|
||||||
@@ -324,6 +443,8 @@ def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
|
|||||||
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
|
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
|
||||||
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
|
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
|
||||||
|
|
||||||
|
@click.option('--rename-only', is_flag=True, default=False, help='Only renaming, no recoding')
|
||||||
|
|
||||||
def convert(ctx,
|
def convert(ctx,
|
||||||
paths,
|
paths,
|
||||||
label,
|
label,
|
||||||
@@ -350,8 +471,12 @@ def convert(ctx,
|
|||||||
rearrange_streams,
|
rearrange_streams,
|
||||||
|
|
||||||
crop,
|
crop,
|
||||||
|
cut,
|
||||||
|
|
||||||
output_directory,
|
output_directory,
|
||||||
|
|
||||||
|
deinterlace,
|
||||||
|
|
||||||
denoise,
|
denoise,
|
||||||
denoise_use_hw,
|
denoise_use_hw,
|
||||||
denoise_strength,
|
denoise_strength,
|
||||||
@@ -372,7 +497,8 @@ def convert(ctx,
|
|||||||
keep_mkvmerge_metadata,
|
keep_mkvmerge_metadata,
|
||||||
|
|
||||||
nice,
|
nice,
|
||||||
cpu):
|
cpu,
|
||||||
|
rename_only):
|
||||||
"""Batch conversion of audiovideo files in format suitable for web playback, e.g. jellyfin
|
"""Batch conversion of audiovideo files in format suitable for web playback, e.g. jellyfin
|
||||||
|
|
||||||
Files found under PATHS will be converted according to parameters.
|
Files found under PATHS will be converted according to parameters.
|
||||||
@@ -386,8 +512,9 @@ def convert(ctx,
|
|||||||
|
|
||||||
context['video_encoder'] = VideoEncoder.fromLabel(video_encoder)
|
context['video_encoder'] = VideoEncoder.fromLabel(video_encoder)
|
||||||
|
|
||||||
targetFormat = FfxController.DEFAULT_FILE_FORMAT
|
#HINT: quick and dirty override for h264, todo improve
|
||||||
targetExtension = FfxController.DEFAULT_FILE_EXTENSION
|
targetFormat = '' if context['video_encoder'] == VideoEncoder.H264 else FfxController.DEFAULT_FILE_FORMAT
|
||||||
|
targetExtension = 'mkv' if context['video_encoder'] == VideoEncoder.H264 else FfxController.DEFAULT_FILE_EXTENSION
|
||||||
|
|
||||||
context['use_tmdb'] = not no_tmdb
|
context['use_tmdb'] = not no_tmdb
|
||||||
context['use_pattern'] = not no_pattern
|
context['use_pattern'] = not no_pattern
|
||||||
@@ -476,14 +603,6 @@ def convert(ctx,
|
|||||||
|
|
||||||
ctx.obj['logger'].debug(f"\nVideo encoder: {video_encoder}")
|
ctx.obj['logger'].debug(f"\nVideo encoder: {video_encoder}")
|
||||||
|
|
||||||
qualityTokens = quality.split(',')
|
|
||||||
q_list = [q for q in qualityTokens if q.isnumeric()]
|
|
||||||
ctx.obj['logger'].debug(f"Qualities: {q_list}")
|
|
||||||
|
|
||||||
presetTokens = preset.split(',')
|
|
||||||
p_list = [p for p in presetTokens if p.isnumeric()]
|
|
||||||
ctx.obj['logger'].debug(f"Presets: {p_list}")
|
|
||||||
|
|
||||||
|
|
||||||
context['bitrates'] = {}
|
context['bitrates'] = {}
|
||||||
context['bitrates']['stereo'] = str(stereo_bitrate) if str(stereo_bitrate).endswith('k') else f"{stereo_bitrate}k"
|
context['bitrates']['stereo'] = str(stereo_bitrate) if str(stereo_bitrate).endswith('k') else f"{stereo_bitrate}k"
|
||||||
@@ -494,26 +613,35 @@ def convert(ctx,
|
|||||||
ctx.obj['logger'].debug(f"AC3 bitrate: {context['bitrates']['ac3']}")
|
ctx.obj['logger'].debug(f"AC3 bitrate: {context['bitrates']['ac3']}")
|
||||||
ctx.obj['logger'].debug(f"DTS bitrate: {context['bitrates']['dts']}")
|
ctx.obj['logger'].debug(f"DTS bitrate: {context['bitrates']['dts']}")
|
||||||
|
|
||||||
|
#->
|
||||||
# Process crop parameters
|
# Process cut parameters
|
||||||
context['perform_crop'] = (crop != 'none')
|
context['perform_cut'] = (cut != 'none')
|
||||||
if context['perform_crop']:
|
if context['perform_cut']:
|
||||||
cTokens = crop.split(',')
|
cutTokens = cut.split(',')
|
||||||
if cTokens and len(cTokens) == 2:
|
if cutTokens and len(cutTokens) == 2:
|
||||||
context['crop_start'] = int(cTokens[0])
|
context['cut_start'] = int(cutTokens[0])
|
||||||
context['crop_length'] = int(cTokens[1])
|
context['cut_length'] = int(cutTokens[1])
|
||||||
ctx.obj['logger'].debug(f"Crop start={context['crop_start']} length={context['crop_length']}")
|
ctx.obj['logger'].debug(f"Cut start={context['cut_start']} length={context['cut_length']}")
|
||||||
|
|
||||||
|
|
||||||
tc = TmdbController() if context['use_tmdb'] else None
|
tc = TmdbController() if context['use_tmdb'] else None
|
||||||
|
|
||||||
qualityKwargs = {QualityFilter.QUALITY_KEY: quality}
|
|
||||||
|
qualityKwargs = {QualityFilter.QUALITY_KEY: str(quality)}
|
||||||
qf = QualityFilter(**qualityKwargs)
|
qf = QualityFilter(**qualityKwargs)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if context['video_encoder'] == VideoEncoder.AV1 and preset:
|
if context['video_encoder'] == VideoEncoder.AV1 and preset:
|
||||||
presetKwargs = {PresetFilter.PRESET_KEY: preset}
|
presetKwargs = {PresetFilter.PRESET_KEY: preset}
|
||||||
PresetFilter(**presetKwargs)
|
PresetFilter(**presetKwargs)
|
||||||
|
|
||||||
|
cf = None
|
||||||
|
# if crop != 'none':
|
||||||
|
if crop == 'auto':
|
||||||
|
cropKwargs = {}
|
||||||
|
cf = CropFilter(**cropKwargs)
|
||||||
|
|
||||||
denoiseKwargs = {}
|
denoiseKwargs = {}
|
||||||
if denoise_strength:
|
if denoise_strength:
|
||||||
denoiseKwargs[NlmeansFilter.STRENGTH_KEY] = denoise_strength
|
denoiseKwargs[NlmeansFilter.STRENGTH_KEY] = denoise_strength
|
||||||
@@ -528,6 +656,9 @@ def convert(ctx,
|
|||||||
if denoise != 'none' or denoiseKwargs:
|
if denoise != 'none' or denoiseKwargs:
|
||||||
NlmeansFilter(**denoiseKwargs)
|
NlmeansFilter(**denoiseKwargs)
|
||||||
|
|
||||||
|
if deinterlace != 'none':
|
||||||
|
DeinterlaceFilter()
|
||||||
|
|
||||||
chainYield = list(qf.getChainYield())
|
chainYield = list(qf.getChainYield())
|
||||||
|
|
||||||
ctx.obj['logger'].info(f"\nRunning {len(existingSourcePaths) * len(chainYield)} jobs")
|
ctx.obj['logger'].info(f"\nRunning {len(existingSourcePaths) * len(chainYield)} jobs")
|
||||||
@@ -548,8 +679,19 @@ def convert(ctx,
|
|||||||
|
|
||||||
targetSuffices = {}
|
targetSuffices = {}
|
||||||
|
|
||||||
|
mediaFileProperties = FileProperties(context, sourcePath)
|
||||||
|
|
||||||
mediaFileProperties = FileProperties(context, sourceFilename)
|
|
||||||
|
# if not cf is None:
|
||||||
|
#
|
||||||
|
cropArguments = {} if cf is None else mediaFileProperties.findCropArguments()
|
||||||
|
#
|
||||||
|
# ctx.obj['logger'].info(f"\nSetting crop arguments: ouput width: {cropArguments[CropFilter.OUTPUT_WIDTH_KEY]} "
|
||||||
|
# + f"height: {cropArguments[CropFilter.OUTPUT_HEIGHT_KEY]} "
|
||||||
|
# + f"offset x: {cropArguments[CropFilter.OFFSET_X_KEY]} "
|
||||||
|
# + f"y: {cropArguments[CropFilter.OFFSET_Y_KEY]}")
|
||||||
|
#
|
||||||
|
# cf.setArguments(**cropArguments)
|
||||||
|
|
||||||
|
|
||||||
ssc = ShiftedSeasonController(context)
|
ssc = ShiftedSeasonController(context)
|
||||||
@@ -572,6 +714,16 @@ def convert(ctx,
|
|||||||
|
|
||||||
sourceMediaDescriptor = mediaFileProperties.getMediaDescriptor()
|
sourceMediaDescriptor = mediaFileProperties.getMediaDescriptor()
|
||||||
|
|
||||||
|
|
||||||
|
if ([smd for smd in sourceMediaDescriptor.getSubtitleTracks()
|
||||||
|
if smd.getCodec() == TrackCodec.ASS]
|
||||||
|
and [amd for amd in sourceMediaDescriptor.getAttachmentTracks()
|
||||||
|
if amd.getCodec() == TrackCodec.TTF]):
|
||||||
|
|
||||||
|
targetFormat = ''
|
||||||
|
targetExtension = 'mkv'
|
||||||
|
|
||||||
|
|
||||||
#HINT: This is None if the filename did not match anything in database
|
#HINT: This is None if the filename did not match anything in database
|
||||||
currentPattern = mediaFileProperties.getPattern() if context['use_pattern'] else None
|
currentPattern = mediaFileProperties.getPattern() if context['use_pattern'] else None
|
||||||
|
|
||||||
@@ -599,18 +751,43 @@ def convert(ctx,
|
|||||||
checkUniqueDispositions(context, targetMediaDescriptor)
|
checkUniqueDispositions(context, targetMediaDescriptor)
|
||||||
currentShowDescriptor = currentPattern.getShowDescriptor(ctx.obj)
|
currentShowDescriptor = currentPattern.getShowDescriptor(ctx.obj)
|
||||||
|
|
||||||
|
|
||||||
|
# Check if source and target track descriptors match
|
||||||
|
sourceTrackDescriptorList = sourceMediaDescriptor.getTrackDescriptors()
|
||||||
|
targetTrackDescriptorList = targetMediaDescriptor.getTrackDescriptors()
|
||||||
|
|
||||||
|
for ttd in targetTrackDescriptorList:
|
||||||
|
|
||||||
|
tti = ttd.getIndex()
|
||||||
|
ttsi = ttd.getSourceIndex()
|
||||||
|
|
||||||
|
stList = [st for st in sourceTrackDescriptorList if st.getIndex() == ttsi]
|
||||||
|
std = stList[0] if stList else None
|
||||||
|
|
||||||
|
if std is None:
|
||||||
|
raise click.ClickException(f"Target track #{tti} refering to non-existent source track #{ttsi}")
|
||||||
|
|
||||||
|
ttType = ttd.getType()
|
||||||
|
stType = std.getType()
|
||||||
|
|
||||||
|
if ttType != stType:
|
||||||
|
raise click.ClickException(f"Target track #{tti} type ({ttType.label()}) not matching source track #{ttsi} type ({stType.label()})")
|
||||||
|
|
||||||
|
|
||||||
if context['import_subtitles']:
|
if context['import_subtitles']:
|
||||||
targetMediaDescriptor.importSubtitles(context['subtitle_directory'],
|
targetMediaDescriptor.importSubtitles(context['subtitle_directory'],
|
||||||
context['subtitle_prefix'],
|
context['subtitle_prefix'],
|
||||||
showSeason,
|
showSeason,
|
||||||
showEpisode)
|
showEpisode)
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
# ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
||||||
|
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getTrackDescriptors()]}")
|
||||||
|
|
||||||
if cliOverrides:
|
if cliOverrides:
|
||||||
targetMediaDescriptor.applyOverrides(cliOverrides)
|
targetMediaDescriptor.applyOverrides(cliOverrides)
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
# ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
||||||
|
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getTrackDescriptors()]}")
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"Input mapping tokens (2nd pass): {targetMediaDescriptor.getInputMappingTokens()}")
|
ctx.obj['logger'].debug(f"Input mapping tokens (2nd pass): {targetMediaDescriptor.getInputMappingTokens()}")
|
||||||
|
|
||||||
@@ -678,12 +855,8 @@ def convert(ctx,
|
|||||||
|
|
||||||
for chainIteration in chainYield:
|
for chainIteration in chainYield:
|
||||||
|
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"\nchain iteration: {chainIteration}\n")
|
ctx.obj['logger'].debug(f"\nchain iteration: {chainIteration}\n")
|
||||||
|
|
||||||
# if len(q_list) > 1:
|
|
||||||
# targetSuffices['q'] = f"q{q}"
|
|
||||||
|
|
||||||
chainVariant = '-'.join([fy['variant'] for fy in chainIteration])
|
chainVariant = '-'.join([fy['variant'] for fy in chainIteration])
|
||||||
|
|
||||||
ctx.obj['logger'].debug(f"\nRunning job {jobIndex} file={sourcePath} variant={chainVariant}")
|
ctx.obj['logger'].debug(f"\nRunning job {jobIndex} file={sourcePath} variant={chainVariant}")
|
||||||
@@ -692,10 +865,10 @@ def convert(ctx,
|
|||||||
ctx.obj['logger'].debug(f"label={label if label else 'Falsy'}")
|
ctx.obj['logger'].debug(f"label={label if label else 'Falsy'}")
|
||||||
ctx.obj['logger'].debug(f"sourceFileBasename={sourceFileBasename}")
|
ctx.obj['logger'].debug(f"sourceFileBasename={sourceFileBasename}")
|
||||||
|
|
||||||
# targetFileBasename = mediaFileProperties.assembleTargetFileBasename(label,
|
|
||||||
# q if len(q_list) > 1 else -1,
|
# targetFileBasename = sourceFileBasename if context['use_tmdb'] and not label else label
|
||||||
#
|
|
||||||
targetFileBasename = sourceFileBasename if context['use_tmdb'] and not label else label
|
targetFileBasename = (label or sourceFileBasename) if context['use_tmdb'] else sourceFileBasename
|
||||||
|
|
||||||
|
|
||||||
targetFilenameTokens = [targetFileBasename]
|
targetFilenameTokens = [targetFileBasename]
|
||||||
@@ -703,34 +876,31 @@ def convert(ctx,
|
|||||||
if 'se' in targetSuffices.keys():
|
if 'se' in targetSuffices.keys():
|
||||||
targetFilenameTokens += [targetSuffices['se']]
|
targetFilenameTokens += [targetSuffices['se']]
|
||||||
|
|
||||||
# if 'q' in targetSuffices.keys():
|
|
||||||
# targetFilenameTokens += [targetSuffices['q']]
|
|
||||||
for filterYield in chainIteration:
|
for filterYield in chainIteration:
|
||||||
|
|
||||||
# filterIdentifier = filterYield['identifier']
|
|
||||||
# filterParameters = filterYield['parameters']
|
|
||||||
# filterSuffices = filterYield['suffices']
|
|
||||||
|
|
||||||
targetFilenameTokens += filterYield['suffices']
|
targetFilenameTokens += filterYield['suffices']
|
||||||
|
|
||||||
#TODO #387
|
targetFilename = f"{'_'.join(targetFilenameTokens)}.{sourceFilenameExtension if rename_only else targetExtension}"
|
||||||
# targetFilename = ((f"{sourceFileBasename}_q{q}" if len(q_list) > 1 else sourceFileBasename)
|
|
||||||
# if context['use_tmdb'] else targetFileBasename)
|
|
||||||
|
|
||||||
targetFilename = f"{'_'.join(targetFilenameTokens)}.{targetExtension}"
|
if sourceFilename == targetFilename:
|
||||||
|
targetFilename = f"out_{targetFilename}"
|
||||||
|
|
||||||
targetPath = os.path.join(output_directory if output_directory else sourceDirectory, targetFilename)
|
|
||||||
|
|
||||||
#TODO: target extension anpassen
|
targetPath = os.path.join(output_directory, targetFilename) if output_directory else targetFilename
|
||||||
|
|
||||||
ctx.obj['logger'].info(f"Creating file {targetFilename}")
|
ctx.obj['logger'].info(f"Creating file {targetFilename}")
|
||||||
|
|
||||||
fc.runJob(sourcePath,
|
|
||||||
targetPath,
|
|
||||||
targetFormat,
|
|
||||||
context['video_encoder'],
|
|
||||||
chainIteration)
|
|
||||||
|
|
||||||
#TODO: click.confirm('Warning! This file is not compliant to the defined source schema! Do you want to continue?', abort=True)
|
if rename_only:
|
||||||
|
shutil.copyfile(sourcePath, targetPath)
|
||||||
|
else:
|
||||||
|
fc.runJob(sourcePath,
|
||||||
|
targetPath,
|
||||||
|
targetFormat,
|
||||||
|
chainIteration,
|
||||||
|
cropArguments,
|
||||||
|
currentPattern)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
endTime = time.perf_counter()
|
endTime = time.perf_counter()
|
||||||
ctx.obj['logger'].info(f"\nDONE\nTime elapsed {endTime - startTime}")
|
ctx.obj['logger'].info(f"\nDONE\nTime elapsed {endTime - startTime}")
|
||||||
|
|||||||
@@ -1,18 +1,22 @@
|
|||||||
import os, click
|
import os, click
|
||||||
|
from logging import Logger
|
||||||
|
|
||||||
|
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
|
||||||
|
|
||||||
from ffx.media_descriptor import MediaDescriptor
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
from ffx.track_descriptor import TrackDescriptor
|
|
||||||
from ffx.audio_layout import AudioLayout
|
from ffx.audio_layout import AudioLayout
|
||||||
from ffx.track_type import TrackType
|
from ffx.track_type import TrackType
|
||||||
|
from ffx.track_codec import TrackCodec
|
||||||
from ffx.video_encoder import VideoEncoder
|
from ffx.video_encoder import VideoEncoder
|
||||||
from ffx.process import executeProcess
|
from ffx.process import executeProcess
|
||||||
from ffx.track_disposition import TrackDisposition
|
|
||||||
from ffx.track_codec import TrackCodec
|
|
||||||
|
|
||||||
from ffx.constants import DEFAULT_CROP_START, DEFAULT_CROP_LENGTH
|
from ffx.constants import DEFAULT_cut_start, DEFAULT_cut_length
|
||||||
|
|
||||||
from ffx.filter.quality_filter import QualityFilter
|
from ffx.filter.quality_filter import QualityFilter
|
||||||
from ffx.filter.preset_filter import PresetFilter
|
from ffx.filter.preset_filter import PresetFilter
|
||||||
|
from ffx.filter.crop_filter import CropFilter
|
||||||
|
|
||||||
|
from ffx.model.pattern import Pattern
|
||||||
|
|
||||||
|
|
||||||
class FfxController():
|
class FfxController():
|
||||||
@@ -31,8 +35,7 @@ class FfxController():
|
|||||||
|
|
||||||
CHANNEL_MAP_5_1 = 'FL-FL|FR-FR|FC-FC|LFE-LFE|SL-BL|SR-BR:5.1'
|
CHANNEL_MAP_5_1 = 'FL-FL|FR-FR|FC-FC|LFE-LFE|SL-BL|SR-BR:5.1'
|
||||||
|
|
||||||
#!
|
# SIGNATURE_TAGS = {'RECODED_WITH': 'FFX'}
|
||||||
SIGNATURE_TAGS = {'RECODED_WITH': 'FFX'}
|
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
context : dict,
|
context : dict,
|
||||||
@@ -40,12 +43,15 @@ class FfxController():
|
|||||||
sourceMediaDescriptor : MediaDescriptor = None):
|
sourceMediaDescriptor : MediaDescriptor = None):
|
||||||
|
|
||||||
self.__context = context
|
self.__context = context
|
||||||
self.__sourceMediaDescriptor = sourceMediaDescriptor
|
|
||||||
self.__targetMediaDescriptor = targetMediaDescriptor
|
self.__targetMediaDescriptor = targetMediaDescriptor
|
||||||
|
self.__sourceMediaDescriptor = sourceMediaDescriptor
|
||||||
|
|
||||||
self.__configurationData = self.__context['config'].getData()
|
self.__mdcs = MediaDescriptorChangeSet(context,
|
||||||
|
targetMediaDescriptor,
|
||||||
|
sourceMediaDescriptor)
|
||||||
|
|
||||||
self.__logger = context['logger']
|
self.__logger: Logger = context['logger']
|
||||||
|
|
||||||
|
|
||||||
def generateAV1Tokens(self, quality, preset, subIndex : int = 0):
|
def generateAV1Tokens(self, quality, preset, subIndex : int = 0):
|
||||||
@@ -55,6 +61,14 @@ class FfxController():
|
|||||||
'-pix_fmt', 'yuv420p10le']
|
'-pix_fmt', 'yuv420p10le']
|
||||||
|
|
||||||
|
|
||||||
|
# -c:v libx264 -preset slow -crf 17
|
||||||
|
def generateH264Tokens(self, quality, subIndex : int = 0):
|
||||||
|
|
||||||
|
return [f"-c:v:{int(subIndex)}", 'libx264',
|
||||||
|
"-preset", "slow",
|
||||||
|
'-crf', str(quality)]
|
||||||
|
|
||||||
|
|
||||||
# -c:v:0 libvpx-vp9 -row-mt 1 -crf 32 -pass 1 -speed 4 -frame-parallel 0 -g 9999 -aq-mode 0
|
# -c:v:0 libvpx-vp9 -row-mt 1 -crf 32 -pass 1 -speed 4 -frame-parallel 0 -g 9999 -aq-mode 0
|
||||||
def generateVP9Pass1Tokens(self, quality, subIndex : int = 0):
|
def generateVP9Pass1Tokens(self, quality, subIndex : int = 0):
|
||||||
|
|
||||||
@@ -82,21 +96,28 @@ class FfxController():
|
|||||||
'-auto-alt-ref', '1',
|
'-auto-alt-ref', '1',
|
||||||
'-lag-in-frames', '25']
|
'-lag-in-frames', '25']
|
||||||
|
|
||||||
|
def generateVideoCopyTokens(self, subIndex):
|
||||||
|
return [f"-c:v:{int(subIndex)}",
|
||||||
|
'copy']
|
||||||
|
|
||||||
|
|
||||||
def generateCropTokens(self):
|
def generateCropTokens(self):
|
||||||
|
|
||||||
if 'crop_start' in self.__context.keys() and 'crop_length' in self.__context.keys():
|
if 'cut_start' in self.__context.keys() and 'cut_length' in self.__context.keys():
|
||||||
cropStart = int(self.__context['crop_start'])
|
cropStart = int(self.__context['cut_start'])
|
||||||
cropLength = int(self.__context['crop_length'])
|
cropLength = int(self.__context['cut_length'])
|
||||||
else:
|
else:
|
||||||
cropStart = DEFAULT_CROP_START
|
cropStart = DEFAULT_cut_start
|
||||||
cropLength = DEFAULT_CROP_LENGTH
|
cropLength = DEFAULT_cut_length
|
||||||
|
|
||||||
return ['-ss', str(cropStart), '-t', str(cropLength)]
|
return ['-ss', str(cropStart), '-t', str(cropLength)]
|
||||||
|
|
||||||
|
|
||||||
def generateOutputTokens(self, filePathBase, format = '', ext = ''):
|
def generateOutputTokens(self, filePathBase, format = '', ext = ''):
|
||||||
outputFilePath = f"{filePathBase}{'.'+str(ext) if ext else ''}"
|
|
||||||
|
self.__logger.debug(f"FfxController.generateOutputTokens(): base='{filePathBase}' format='{format}' ext='{ext}'")
|
||||||
|
|
||||||
|
outputFilePath = f"{filePathBase}{('.'+str(ext)) if ext else ''}"
|
||||||
if format:
|
if format:
|
||||||
return ['-f', format, outputFilePath]
|
return ['-f', format, outputFilePath]
|
||||||
else:
|
else:
|
||||||
@@ -108,7 +129,8 @@ class FfxController():
|
|||||||
|
|
||||||
audioTokens = []
|
audioTokens = []
|
||||||
|
|
||||||
targetAudioTrackDescriptors = [td for td in self.__targetMediaDescriptor.getAllTrackDescriptors() if td.getType() == TrackType.AUDIO]
|
# targetAudioTrackDescriptors = [td for td in self.__targetMediaDescriptor.getAllTrackDescriptors() if td.getType() == TrackType.AUDIO]
|
||||||
|
targetAudioTrackDescriptors = self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.AUDIO)
|
||||||
|
|
||||||
trackSubIndex = 0
|
trackSubIndex = 0
|
||||||
for trackDescriptor in targetAudioTrackDescriptors:
|
for trackDescriptor in targetAudioTrackDescriptors:
|
||||||
@@ -144,115 +166,74 @@ class FfxController():
|
|||||||
f"channelmap={FfxController.CHANNEL_MAP_5_1}",
|
f"channelmap={FfxController.CHANNEL_MAP_5_1}",
|
||||||
f"-b:a:{trackSubIndex}",
|
f"-b:a:{trackSubIndex}",
|
||||||
self.__context['bitrates']['ac3']]
|
self.__context['bitrates']['ac3']]
|
||||||
|
|
||||||
|
# -ac 5 ?
|
||||||
|
if trackAudioLayout == AudioLayout.LAYOUT_5_0:
|
||||||
|
audioTokens += [f"-c:a:{trackSubIndex}",
|
||||||
|
'libopus',
|
||||||
|
f"-filter:a:{trackSubIndex}",
|
||||||
|
'channelmap=channel_layout=5.0',
|
||||||
|
f"-b:a:{trackSubIndex}",
|
||||||
|
self.__context['bitrates']['ac3']]
|
||||||
|
|
||||||
trackSubIndex += 1
|
trackSubIndex += 1
|
||||||
return audioTokens
|
return audioTokens
|
||||||
|
|
||||||
|
|
||||||
# -disposition:s:0 default -disposition:s:1 0
|
|
||||||
def generateDispositionTokens(self):
|
|
||||||
|
|
||||||
targetTrackDescriptors = self.__targetMediaDescriptor.getAllTrackDescriptors()
|
|
||||||
|
|
||||||
sourceTrackDescriptors = ([] if self.__sourceMediaDescriptor is None
|
|
||||||
else self.__sourceMediaDescriptor.getAllTrackDescriptors())
|
|
||||||
|
|
||||||
dispositionTokens = []
|
|
||||||
|
|
||||||
for trackIndex in range(len(targetTrackDescriptors)):
|
|
||||||
|
|
||||||
td = targetTrackDescriptors[trackIndex]
|
|
||||||
|
|
||||||
#HINT: No dispositions for pgs subtitle tracks that have no external file source
|
|
||||||
if (td.getExternalSourceFilePath()
|
|
||||||
or td.getCodec() != TrackCodec.PGS):
|
|
||||||
|
|
||||||
subIndex = td.getSubIndex()
|
|
||||||
streamIndicator = td.getType().indicator()
|
|
||||||
|
|
||||||
|
|
||||||
sourceDispositionSet = sourceTrackDescriptors[td.getSourceIndex()].getDispositionSet() if sourceTrackDescriptors else set()
|
|
||||||
|
|
||||||
#TODO: Alles discarden was im targetDescriptor vorhanden ist (?)
|
|
||||||
sourceDispositionSet.discard(TrackDisposition.DEFAULT)
|
|
||||||
|
|
||||||
dispositionSet = td.getDispositionSet() | sourceDispositionSet
|
|
||||||
|
|
||||||
if dispositionSet:
|
|
||||||
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in dispositionSet])]
|
|
||||||
else:
|
|
||||||
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
|
|
||||||
|
|
||||||
return dispositionTokens
|
|
||||||
|
|
||||||
|
|
||||||
def generateMetadataTokens(self):
|
|
||||||
|
|
||||||
metadataTokens = []
|
|
||||||
|
|
||||||
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
|
|
||||||
|
|
||||||
signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
|
|
||||||
removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
|
|
||||||
removeTrackKeys = metadataConfiguration['streams']['remove'] if 'streams' in metadataConfiguration.keys() and 'remove' in metadataConfiguration['streams'].keys() else []
|
|
||||||
|
|
||||||
mediaTags = {k:v for k,v in self.__targetMediaDescriptor.getTags().items() if not k in removeGlobalKeys}
|
|
||||||
|
|
||||||
if (not 'no_signature' in self.__context.keys()
|
|
||||||
or not self.__context['no_signature']):
|
|
||||||
outputMediaTags = mediaTags | signatureTags
|
|
||||||
else:
|
|
||||||
outputMediaTags = mediaTags
|
|
||||||
|
|
||||||
for tagKey, tagValue in outputMediaTags.items():
|
|
||||||
metadataTokens += [f"-metadata:g",
|
|
||||||
f"{tagKey}={tagValue}"]
|
|
||||||
|
|
||||||
for removeKey in removeGlobalKeys:
|
|
||||||
metadataTokens += [f"-metadata:g",
|
|
||||||
f"{removeKey}="]
|
|
||||||
|
|
||||||
|
|
||||||
removeMkvmergeMetadata = (not 'keep_mkvmerge_metadata' in self.__context.keys()
|
|
||||||
or not self.__context['keep_mkvmerge_metadata'])
|
|
||||||
|
|
||||||
#HINT: With current ffmpeg version track metadata tags are not passed to the outfile
|
|
||||||
for td in self.__targetMediaDescriptor.getAllTrackDescriptors():
|
|
||||||
|
|
||||||
typeIndicator = td.getType().indicator()
|
|
||||||
subIndex = td.getSubIndex()
|
|
||||||
|
|
||||||
for tagKey, tagValue in td.getTags().items():
|
|
||||||
|
|
||||||
if not tagKey in removeTrackKeys:
|
|
||||||
metadataTokens += [f"-metadata:s:{typeIndicator}:{subIndex}",
|
|
||||||
f"{tagKey}={tagValue}"]
|
|
||||||
|
|
||||||
for removeKey in removeTrackKeys:
|
|
||||||
metadataTokens += [f"-metadata:s:{typeIndicator}:{subIndex}",
|
|
||||||
f"{removeKey}="]
|
|
||||||
|
|
||||||
|
|
||||||
return metadataTokens
|
|
||||||
|
|
||||||
|
|
||||||
def runJob(self,
|
def runJob(self,
|
||||||
sourcePath,
|
sourcePath,
|
||||||
targetPath,
|
targetPath,
|
||||||
targetFormat: str = '',
|
targetFormat: str = '',
|
||||||
videoEncoder: VideoEncoder = VideoEncoder.VP9,
|
chainIteration: list = [],
|
||||||
chainIteration: list = []):
|
cropArguments: dict = {},
|
||||||
|
currentPattern: Pattern = None):
|
||||||
# quality: int = DEFAULT_QUALITY,
|
# quality: int = DEFAULT_QUALITY,
|
||||||
# preset: int = DEFAULT_AV1_PRESET):
|
# preset: int = DEFAULT_AV1_PRESET):
|
||||||
|
|
||||||
|
|
||||||
|
videoEncoder: VideoEncoder = self.__context.get('video_encoder', VideoEncoder.VP9)
|
||||||
|
|
||||||
|
|
||||||
qualityFilters = [fy for fy in chainIteration if fy['identifier'] == 'quality']
|
qualityFilters = [fy for fy in chainIteration if fy['identifier'] == 'quality']
|
||||||
presetFilters = [fy for fy in chainIteration if fy['identifier'] == 'preset']
|
presetFilters = [fy for fy in chainIteration if fy['identifier'] == 'preset']
|
||||||
denoiseFilters = [fy for fy in chainIteration if fy['identifier'] == 'nlmeans']
|
|
||||||
|
|
||||||
quality = qualityFilters[0]['parameters']['quality'] if qualityFilters else QualityFilter.DEFAULT_QUALITY
|
cropFilters = [fy for fy in chainIteration if fy['identifier'] == 'crop']
|
||||||
|
denoiseFilters = [fy for fy in chainIteration if fy['identifier'] == 'nlmeans']
|
||||||
|
deinterlaceFilters = [fy for fy in chainIteration if fy['identifier'] == 'bwdif']
|
||||||
|
|
||||||
|
|
||||||
|
if qualityFilters and (quality := qualityFilters[0]['parameters']['quality']):
|
||||||
|
self.__logger.info(f"Setting quality {quality} from command line parameter")
|
||||||
|
elif (quality := currentPattern.quality):
|
||||||
|
self.__logger.info(f"Setting quality {quality} from pattern default")
|
||||||
|
else:
|
||||||
|
quality = (QualityFilter.DEFAULT_H264_QUALITY
|
||||||
|
if (videoEncoder == VideoEncoder.H264)
|
||||||
|
else QualityFilter.DEFAULT_VP9_QUALITY)
|
||||||
|
self.__logger.info(f"Setting quality {quality} from default")
|
||||||
|
|
||||||
|
|
||||||
preset = presetFilters[0]['parameters']['preset'] if presetFilters else PresetFilter.DEFAULT_PRESET
|
preset = presetFilters[0]['parameters']['preset'] if presetFilters else PresetFilter.DEFAULT_PRESET
|
||||||
|
|
||||||
|
|
||||||
denoiseTokens = denoiseFilters[0]['tokens'] if denoiseFilters else []
|
filterParamTokens = []
|
||||||
|
|
||||||
|
if cropArguments:
|
||||||
|
|
||||||
|
cropParams = (f"crop="
|
||||||
|
+ f"{cropArguments[CropFilter.OUTPUT_WIDTH_KEY]}"
|
||||||
|
+ f":{cropArguments[CropFilter.OUTPUT_HEIGHT_KEY]}"
|
||||||
|
+ f":{cropArguments[CropFilter.OFFSET_X_KEY]}"
|
||||||
|
+ f":{cropArguments[CropFilter.OFFSET_Y_KEY]}")
|
||||||
|
|
||||||
|
filterParamTokens.append(cropParams)
|
||||||
|
|
||||||
|
filterParamTokens.extend(denoiseFilters[0]['tokens'] if denoiseFilters else [])
|
||||||
|
filterParamTokens.extend(deinterlaceFilters[0]['tokens'] if deinterlaceFilters else [])
|
||||||
|
|
||||||
|
deinterlaceFilters
|
||||||
|
|
||||||
|
filterTokens = ['-vf', ', '.join(filterParamTokens)] if filterParamTokens else []
|
||||||
|
|
||||||
|
|
||||||
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
|
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
|
||||||
@@ -261,19 +242,22 @@ class FfxController():
|
|||||||
|
|
||||||
commandSequence = (commandTokens
|
commandSequence = (commandTokens
|
||||||
+ self.__targetMediaDescriptor.getImportFileTokens()
|
+ self.__targetMediaDescriptor.getImportFileTokens()
|
||||||
+ self.__targetMediaDescriptor.getInputMappingTokens()
|
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
|
||||||
+ self.generateDispositionTokens())
|
+ self.__mdcs.generateDispositionTokens())
|
||||||
|
|
||||||
# Optional tokens
|
# Optional tokens
|
||||||
commandSequence += self.generateMetadataTokens()
|
commandSequence += self.__mdcs.generateMetadataTokens()
|
||||||
commandSequence += denoiseTokens
|
commandSequence += filterTokens
|
||||||
|
|
||||||
commandSequence += (self.generateAudioEncodingTokens()
|
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
|
||||||
+ self.generateAV1Tokens(int(quality), int(preset))
|
#HINT: Attached thumbnails are not supported by .webm container format
|
||||||
+ self.generateAudioEncodingTokens())
|
if td.getCodec != TrackCodec.PNG:
|
||||||
|
commandSequence += self.generateAV1Tokens(int(quality), int(preset))
|
||||||
|
|
||||||
if self.__context['perform_crop']:
|
commandSequence += self.generateAudioEncodingTokens()
|
||||||
commandSequence += FfxController.generateCropTokens()
|
|
||||||
|
if self.__context['perform_cut']:
|
||||||
|
commandSequence += self.generateCropTokens()
|
||||||
|
|
||||||
commandSequence += self.generateOutputTokens(targetPath,
|
commandSequence += self.generateOutputTokens(targetPath,
|
||||||
targetFormat)
|
targetFormat)
|
||||||
@@ -284,6 +268,37 @@ class FfxController():
|
|||||||
executeProcess(commandSequence, context = self.__context)
|
executeProcess(commandSequence, context = self.__context)
|
||||||
|
|
||||||
|
|
||||||
|
if videoEncoder == VideoEncoder.H264:
|
||||||
|
|
||||||
|
commandSequence = (commandTokens
|
||||||
|
+ self.__targetMediaDescriptor.getImportFileTokens()
|
||||||
|
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
|
||||||
|
+ self.__mdcs.generateDispositionTokens())
|
||||||
|
|
||||||
|
# Optional tokens
|
||||||
|
commandSequence += self.__mdcs.generateMetadataTokens()
|
||||||
|
commandSequence += filterTokens
|
||||||
|
|
||||||
|
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
|
||||||
|
#HINT: Attached thumbnails are not supported by .webm container format
|
||||||
|
if td.getCodec != TrackCodec.PNG:
|
||||||
|
commandSequence += self.generateH264Tokens(int(quality))
|
||||||
|
|
||||||
|
commandSequence += self.generateAudioEncodingTokens()
|
||||||
|
|
||||||
|
if self.__context['perform_cut']:
|
||||||
|
commandSequence += self.generateCropTokens()
|
||||||
|
|
||||||
|
commandSequence += self.generateOutputTokens(targetPath,
|
||||||
|
targetFormat)
|
||||||
|
|
||||||
|
self.__logger.debug(f"FfxController.runJob(): Running command sequence")
|
||||||
|
|
||||||
|
if not self.__context['dry_run']:
|
||||||
|
executeProcess(commandSequence, context = self.__context)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if videoEncoder == VideoEncoder.VP9:
|
if videoEncoder == VideoEncoder.VP9:
|
||||||
|
|
||||||
commandSequence1 = (commandTokens
|
commandSequence1 = (commandTokens
|
||||||
@@ -294,11 +309,14 @@ class FfxController():
|
|||||||
# the required bitrate for the second run is determined and recorded
|
# the required bitrate for the second run is determined and recorded
|
||||||
# TODO: Results seems to be slightly better with first pass omitted,
|
# TODO: Results seems to be slightly better with first pass omitted,
|
||||||
# Confirm or find better filter settings for 2-pass
|
# Confirm or find better filter settings for 2-pass
|
||||||
# commandSequence1 += self.__context['denoiser'].generateDenoiseTokens()
|
# commandSequence1 += self.__context['denoiser'].generatefilterTokens()
|
||||||
|
|
||||||
commandSequence1 += self.generateVP9Pass1Tokens(int(quality))
|
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
|
||||||
|
#HINT: Attached thumbnails are not supported by .webm container format
|
||||||
|
if td.getCodec != TrackCodec.PNG:
|
||||||
|
commandSequence1 += self.generateVP9Pass1Tokens(int(quality))
|
||||||
|
|
||||||
if self.__context['perform_crop']:
|
if self.__context['perform_cut']:
|
||||||
commandSequence1 += self.generateCropTokens()
|
commandSequence1 += self.generateCropTokens()
|
||||||
|
|
||||||
commandSequence1 += FfxController.NULL_TOKENS
|
commandSequence1 += FfxController.NULL_TOKENS
|
||||||
@@ -313,16 +331,21 @@ class FfxController():
|
|||||||
|
|
||||||
commandSequence2 = (commandTokens
|
commandSequence2 = (commandTokens
|
||||||
+ self.__targetMediaDescriptor.getImportFileTokens()
|
+ self.__targetMediaDescriptor.getImportFileTokens()
|
||||||
+ self.__targetMediaDescriptor.getInputMappingTokens()
|
+ self.__targetMediaDescriptor.getInputMappingTokens(sourceMediaDescriptor = self.__sourceMediaDescriptor)
|
||||||
+ self.generateDispositionTokens())
|
+ self.__mdcs.generateDispositionTokens())
|
||||||
|
|
||||||
# Optional tokens
|
# Optional tokens
|
||||||
commandSequence2 += self.generateMetadataTokens()
|
commandSequence2 += self.__mdcs.generateMetadataTokens()
|
||||||
commandSequence2 += denoiseTokens
|
commandSequence2 += filterTokens
|
||||||
|
|
||||||
commandSequence2 += self.generateVP9Pass2Tokens(int(quality)) + self.generateAudioEncodingTokens()
|
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
|
||||||
|
#HINT: Attached thumbnails are not supported by .webm container format
|
||||||
|
if td.getCodec != TrackCodec.PNG:
|
||||||
|
commandSequence2 += self.generateVP9Pass2Tokens(int(quality))
|
||||||
|
|
||||||
if self.__context['perform_crop']:
|
commandSequence2 += self.generateAudioEncodingTokens()
|
||||||
|
|
||||||
|
if self.__context['perform_cut']:
|
||||||
commandSequence2 += self.generateCropTokens()
|
commandSequence2 += self.generateCropTokens()
|
||||||
|
|
||||||
commandSequence2 += self.generateOutputTokens(targetPath,
|
commandSequence2 += self.generateOutputTokens(targetPath,
|
||||||
|
|||||||
@@ -3,6 +3,8 @@ import os, re, json
|
|||||||
from .media_descriptor import MediaDescriptor
|
from .media_descriptor import MediaDescriptor
|
||||||
from .pattern_controller import PatternController
|
from .pattern_controller import PatternController
|
||||||
|
|
||||||
|
from ffx.filter.crop_filter import CropFilter
|
||||||
|
|
||||||
from .process import executeProcess
|
from .process import executeProcess
|
||||||
|
|
||||||
from ffx.model.pattern import Pattern
|
from ffx.model.pattern import Pattern
|
||||||
@@ -16,6 +18,8 @@ class FileProperties():
|
|||||||
SEASON_EPISODE_INDICATOR_MATCH = '[sS]([0-9]+)[eE]([0-9]+)'
|
SEASON_EPISODE_INDICATOR_MATCH = '[sS]([0-9]+)[eE]([0-9]+)'
|
||||||
EPISODE_INDICATOR_MATCH = '[eE]([0-9]+)'
|
EPISODE_INDICATOR_MATCH = '[eE]([0-9]+)'
|
||||||
|
|
||||||
|
CROPDETECT_PATTERN = 'crop=[0-9]+:[0-9]+:[0-9]+:[0-9]+$'
|
||||||
|
|
||||||
DEFAULT_INDEX_DIGITS = 3
|
DEFAULT_INDEX_DIGITS = 3
|
||||||
|
|
||||||
def __init__(self, context, sourcePath):
|
def __init__(self, context, sourcePath):
|
||||||
@@ -174,6 +178,49 @@ class FileProperties():
|
|||||||
return json.loads(ffprobeOutput)['streams']
|
return json.loads(ffprobeOutput)['streams']
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def findCropArguments(self):
|
||||||
|
""""""
|
||||||
|
|
||||||
|
# ffmpeg -i <input.file> -vf cropdetect -f null -
|
||||||
|
ffprobeOutput, ffprobeError, returnCode = executeProcess(["ffmpeg", "-i",
|
||||||
|
self.__sourcePath,
|
||||||
|
"-vf", "cropdetect",
|
||||||
|
"-ss", "60",
|
||||||
|
"-t", "180",
|
||||||
|
"-f", "null", "-"
|
||||||
|
])
|
||||||
|
|
||||||
|
errorLines = ffprobeError.split('\n')
|
||||||
|
|
||||||
|
crops = {}
|
||||||
|
for el in errorLines:
|
||||||
|
|
||||||
|
cropdetect_match = re.search(FileProperties.CROPDETECT_PATTERN, el)
|
||||||
|
|
||||||
|
if cropdetect_match is not None:
|
||||||
|
cropParam = str(cropdetect_match.group(0))
|
||||||
|
|
||||||
|
crops[cropParam] = crops.get(cropParam, 0) + 1
|
||||||
|
|
||||||
|
if crops:
|
||||||
|
cropHistogram = sorted(crops, reverse=True)
|
||||||
|
cropString = cropHistogram[0]
|
||||||
|
|
||||||
|
cropTokens = cropString.split('=')
|
||||||
|
cropValueTokens = cropTokens[1]
|
||||||
|
cropValues = cropValueTokens.split(':')
|
||||||
|
|
||||||
|
return {
|
||||||
|
CropFilter.OUTPUT_WIDTH_KEY: cropValues[0],
|
||||||
|
CropFilter.OUTPUT_HEIGHT_KEY: cropValues[1],
|
||||||
|
CropFilter.OFFSET_X_KEY: cropValues[2],
|
||||||
|
CropFilter.OFFSET_Y_KEY: cropValues[3]
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
def getMediaDescriptor(self):
|
def getMediaDescriptor(self):
|
||||||
return MediaDescriptor.fromFfprobe(self.context, self.getFormatData(), self.getStreamData())
|
return MediaDescriptor.fromFfprobe(self.context, self.getFormatData(), self.getStreamData())
|
||||||
|
|
||||||
|
|||||||
51
src/ffx/filter/crop_filter.py
Normal file
51
src/ffx/filter/crop_filter.py
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
import itertools
|
||||||
|
|
||||||
|
from .filter import Filter
|
||||||
|
|
||||||
|
|
||||||
|
class CropFilter(Filter):
|
||||||
|
|
||||||
|
IDENTIFIER = 'crop'
|
||||||
|
|
||||||
|
OUTPUT_WIDTH_KEY = 'output_width'
|
||||||
|
OUTPUT_HEIGHT_KEY = 'output_height'
|
||||||
|
OFFSET_X_KEY = 'x_offset'
|
||||||
|
OFFSET_Y_KEY = 'y_offset'
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
|
||||||
|
self.__outputWidth = int(kwargs.get(CropFilter.OUTPUT_WIDTH_KEY, 0))
|
||||||
|
self.__outputHeight = int(kwargs.get(CropFilter.OUTPUT_HEIGHT_KEY, 0))
|
||||||
|
self.__offsetX = int(kwargs.get(CropFilter.OFFSET_X_KEY, 0))
|
||||||
|
self.__offsetY = int(kwargs.get(CropFilter.OFFSET_Y_KEY, 0))
|
||||||
|
|
||||||
|
super().__init__(self)
|
||||||
|
|
||||||
|
def setArguments(self, **kwargs):
|
||||||
|
self.__outputWidth = int(kwargs.get(CropFilter.OUTPUT_WIDTH_KEY))
|
||||||
|
self.__outputHeight = int(kwargs.get(CropFilter.OUTPUT_HEIGHT_KEY))
|
||||||
|
self.__offsetX = int(kwargs.get(CropFilter.OFFSET_X_KEY,))
|
||||||
|
self.__offsetY = int(kwargs.get(CropFilter.OFFSET_Y_KEY,))
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
payload = {'identifier': CropFilter.IDENTIFIER,
|
||||||
|
'parameters': {
|
||||||
|
CropFilter.OUTPUT_WIDTH_KEY: self.__outputWidth,
|
||||||
|
CropFilter.OUTPUT_HEIGHT_KEY: self.__outputHeight,
|
||||||
|
CropFilter.OFFSET_X_KEY: self.__offsetX,
|
||||||
|
CropFilter.OFFSET_Y_KEY: self.__offsetY
|
||||||
|
},
|
||||||
|
'suffices': [],
|
||||||
|
'variant': f"C{self.__outputWidth}-{self.__outputHeight}-{self.__offsetX}-{self.__offsetY}",
|
||||||
|
'tokens': ['crop='
|
||||||
|
+ f"{self.__outputWidth}"
|
||||||
|
+ f":{self.__outputHeight}"
|
||||||
|
+ f":{self.__offsetX}"
|
||||||
|
+ f":{self.__offsetY}"]}
|
||||||
|
|
||||||
|
return payload
|
||||||
|
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
yield self.getPayload()
|
||||||
140
src/ffx/filter/deinterlace_filter.py
Normal file
140
src/ffx/filter/deinterlace_filter.py
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
import itertools
|
||||||
|
|
||||||
|
from .filter import Filter
|
||||||
|
|
||||||
|
|
||||||
|
class DeinterlaceFilter(Filter):
|
||||||
|
|
||||||
|
IDENTIFIER = 'bwdif'
|
||||||
|
|
||||||
|
# DEFAULT_STRENGTH: float = 2.8
|
||||||
|
# DEFAULT_PATCH_SIZE: int = 13
|
||||||
|
# DEFAULT_CHROMA_PATCH_SIZE: int = 9
|
||||||
|
# DEFAULT_RESEARCH_WINDOW: int = 23
|
||||||
|
# DEFAULT_CHROMA_RESEARCH_WINDOW: int= 17
|
||||||
|
|
||||||
|
# STRENGTH_KEY = 'strength'
|
||||||
|
# PATCH_SIZE_KEY = 'patch_size'
|
||||||
|
# CHROMA_PATCH_SIZE_KEY = 'chroma_patch_size'
|
||||||
|
# RESEARCH_WINDOW_KEY = 'research_window'
|
||||||
|
# CHROMA_RESEARCH_WINDOW_KEY = 'chroma_research_window'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
|
||||||
|
# self.__useHardware = kwargs.get('use_hardware', False)
|
||||||
|
|
||||||
|
# self.__strengthList = []
|
||||||
|
# strength = kwargs.get(NlmeansFilter.STRENGTH_KEY, '')
|
||||||
|
# if strength:
|
||||||
|
# strengthTokens = strength.split(',')
|
||||||
|
# for st in strengthTokens:
|
||||||
|
# try:
|
||||||
|
# strengthValue = float(st)
|
||||||
|
# except:
|
||||||
|
# raise ValueError('NlmeansFilter: Strength value has to be of type float')
|
||||||
|
# if strengthValue < 1.0 or strengthValue > 30.0:
|
||||||
|
# raise ValueError('NlmeansFilter: Strength value has to be between 1.0 and 30.0')
|
||||||
|
# self.__strengthList.append(strengthValue)
|
||||||
|
# else:
|
||||||
|
# self.__strengthList = [NlmeansFilter.DEFAULT_STRENGTH]
|
||||||
|
|
||||||
|
# self.__patchSizeList = []
|
||||||
|
# patchSize = kwargs.get(NlmeansFilter.PATCH_SIZE_KEY, '')
|
||||||
|
# if patchSize:
|
||||||
|
# patchSizeTokens = patchSize.split(',')
|
||||||
|
# for pst in patchSizeTokens:
|
||||||
|
# try:
|
||||||
|
# patchSizeValue = int(pst)
|
||||||
|
# except:
|
||||||
|
# raise ValueError('NlmeansFilter: Patch size value has to be of type int')
|
||||||
|
# if patchSizeValue < 0 or patchSizeValue > 99:
|
||||||
|
# raise ValueError('NlmeansFilter: Patch size value has to be between 0 and 99')
|
||||||
|
# if patchSizeValue % 2 == 0:
|
||||||
|
# raise ValueError('NlmeansFilter: Patch size value has to an odd number')
|
||||||
|
# self.__patchSizeList.append(patchSizeValue)
|
||||||
|
# else:
|
||||||
|
# self.__patchSizeList = [NlmeansFilter.DEFAULT_PATCH_SIZE]
|
||||||
|
|
||||||
|
# self.__chromaPatchSizeList = []
|
||||||
|
# chromaPatchSize = kwargs.get(NlmeansFilter.CHROMA_PATCH_SIZE_KEY, '')
|
||||||
|
# if chromaPatchSize:
|
||||||
|
# chromaPatchSizeTokens = chromaPatchSize.split(',')
|
||||||
|
# for cpst in chromaPatchSizeTokens:
|
||||||
|
# try:
|
||||||
|
# chromaPatchSizeValue = int(pst)
|
||||||
|
# except:
|
||||||
|
# raise ValueError('NlmeansFilter: Chroma patch size value has to be of type int')
|
||||||
|
# if chromaPatchSizeValue < 0 or chromaPatchSizeValue > 99:
|
||||||
|
# raise ValueError('NlmeansFilter: Chroma patch value has to be between 0 and 99')
|
||||||
|
# if chromaPatchSizeValue % 2 == 0:
|
||||||
|
# raise ValueError('NlmeansFilter: Chroma patch value has to an odd number')
|
||||||
|
# self.__chromaPatchSizeList.append(chromaPatchSizeValue)
|
||||||
|
# else:
|
||||||
|
# self.__chromaPatchSizeList = [NlmeansFilter.DEFAULT_CHROMA_PATCH_SIZE]
|
||||||
|
|
||||||
|
# self.__researchWindowList = []
|
||||||
|
# researchWindow = kwargs.get(NlmeansFilter.RESEARCH_WINDOW_KEY, '')
|
||||||
|
# if researchWindow:
|
||||||
|
# researchWindowTokens = researchWindow.split(',')
|
||||||
|
# for rwt in researchWindowTokens:
|
||||||
|
# try:
|
||||||
|
# researchWindowValue = int(rwt)
|
||||||
|
# except:
|
||||||
|
# raise ValueError('NlmeansFilter: Research window value has to be of type int')
|
||||||
|
# if researchWindowValue < 0 or researchWindowValue > 99:
|
||||||
|
# raise ValueError('NlmeansFilter: Research window value has to be between 0 and 99')
|
||||||
|
# if researchWindowValue % 2 == 0:
|
||||||
|
# raise ValueError('NlmeansFilter: Research window value has to an odd number')
|
||||||
|
# self.__researchWindowList.append(researchWindowValue)
|
||||||
|
# else:
|
||||||
|
# self.__researchWindowList = [NlmeansFilter.DEFAULT_RESEARCH_WINDOW]
|
||||||
|
|
||||||
|
# self.__chromaResearchWindowList = []
|
||||||
|
# chromaResearchWindow = kwargs.get(NlmeansFilter.CHROMA_RESEARCH_WINDOW_KEY, '')
|
||||||
|
# if chromaResearchWindow:
|
||||||
|
# chromaResearchWindowTokens = chromaResearchWindow.split(',')
|
||||||
|
# for crwt in chromaResearchWindowTokens:
|
||||||
|
# try:
|
||||||
|
# chromaResearchWindowValue = int(crwt)
|
||||||
|
# except:
|
||||||
|
# raise ValueError('NlmeansFilter: Chroma research window value has to be of type int')
|
||||||
|
# if chromaResearchWindowValue < 0 or chromaResearchWindowValue > 99:
|
||||||
|
# raise ValueError('NlmeansFilter: Chroma research window value has to be between 0 and 99')
|
||||||
|
# if chromaResearchWindowValue % 2 == 0:
|
||||||
|
# raise ValueError('NlmeansFilter: Chroma research window value has to an odd number')
|
||||||
|
# self.__chromaResearchWindowList.append(chromaResearchWindowValue)
|
||||||
|
# else:
|
||||||
|
# self.__chromaResearchWindowList = [NlmeansFilter.DEFAULT_CHROMA_RESEARCH_WINDOW]
|
||||||
|
|
||||||
|
super().__init__(self)
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
# strength = iteration[0]
|
||||||
|
# patchSize = iteration[1]
|
||||||
|
# chromaPatchSize = iteration[2]
|
||||||
|
# researchWindow = iteration[3]
|
||||||
|
# chromaResearchWindow = iteration[4]
|
||||||
|
|
||||||
|
suffices = []
|
||||||
|
|
||||||
|
# filterName = 'nlmeans_opencl' if self.__useHardware else 'nlmeans'
|
||||||
|
|
||||||
|
payload = {'identifier': DeinterlaceFilter.IDENTIFIER,
|
||||||
|
'parameters': {},
|
||||||
|
'suffices': suffices,
|
||||||
|
'variant': f"DEINT",
|
||||||
|
'tokens': ['bwdif=mode=1']}
|
||||||
|
|
||||||
|
return payload
|
||||||
|
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
# for it in itertools.product(self.__strengthList,
|
||||||
|
# self.__patchSizeList,
|
||||||
|
# self.__chromaPatchSizeList,
|
||||||
|
# self.__researchWindowList,
|
||||||
|
# self.__chromaResearchWindowList):
|
||||||
|
yield self.getPayload()
|
||||||
@@ -144,11 +144,11 @@ class NlmeansFilter(Filter):
|
|||||||
'suffices': suffices,
|
'suffices': suffices,
|
||||||
'variant': f"DS{strength}-DP{patchSize}-DPC{chromaPatchSize}"
|
'variant': f"DS{strength}-DP{patchSize}-DPC{chromaPatchSize}"
|
||||||
+ f"-DR{researchWindow}-DRC{chromaResearchWindow}",
|
+ f"-DR{researchWindow}-DRC{chromaResearchWindow}",
|
||||||
'tokens': ['-vf', f"{filterName}=s={strength}"
|
'tokens': [f"{filterName}=s={strength}"
|
||||||
+ f":p={patchSize}"
|
+ f":p={patchSize}"
|
||||||
+ f":pc={chromaPatchSize}"
|
+ f":pc={chromaPatchSize}"
|
||||||
+ f":r={researchWindow}"
|
+ f":r={researchWindow}"
|
||||||
+ f":rc={chromaResearchWindow}"]}
|
+ f":rc={chromaResearchWindow}"]}
|
||||||
|
|
||||||
return payload
|
return payload
|
||||||
|
|
||||||
|
|||||||
@@ -1,18 +1,24 @@
|
|||||||
import itertools
|
import click
|
||||||
|
|
||||||
from .filter import Filter
|
from .filter import Filter
|
||||||
|
|
||||||
|
from ffx.video_encoder import VideoEncoder
|
||||||
|
|
||||||
|
|
||||||
class QualityFilter(Filter):
|
class QualityFilter(Filter):
|
||||||
|
|
||||||
IDENTIFIER = 'quality'
|
IDENTIFIER = 'quality'
|
||||||
|
|
||||||
DEFAULT_QUALITY = 32
|
DEFAULT_VP9_QUALITY = 32
|
||||||
|
DEFAULT_H264_QUALITY = 17
|
||||||
|
|
||||||
QUALITY_KEY = 'quality'
|
QUALITY_KEY = 'quality'
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
|
|
||||||
|
context = click.get_current_context().obj
|
||||||
|
|
||||||
|
|
||||||
self.__qualitiesList = []
|
self.__qualitiesList = []
|
||||||
qualities = kwargs.get(QualityFilter.QUALITY_KEY, '')
|
qualities = kwargs.get(QualityFilter.QUALITY_KEY, '')
|
||||||
if qualities:
|
if qualities:
|
||||||
@@ -26,7 +32,9 @@ class QualityFilter(Filter):
|
|||||||
raise ValueError('QualityFilter: Quality value has to be between 0 and 63')
|
raise ValueError('QualityFilter: Quality value has to be between 0 and 63')
|
||||||
self.__qualitiesList.append(qualityValue)
|
self.__qualitiesList.append(qualityValue)
|
||||||
else:
|
else:
|
||||||
self.__qualitiesList = [QualityFilter.DEFAULT_QUALITY]
|
|
||||||
|
self.__qualitiesList = [None]
|
||||||
|
|
||||||
|
|
||||||
super().__init__(self)
|
super().__init__(self)
|
||||||
|
|
||||||
|
|||||||
@@ -15,8 +15,41 @@ DIFF_REMOVED_KEY = 'removed'
|
|||||||
DIFF_CHANGED_KEY = 'changed'
|
DIFF_CHANGED_KEY = 'changed'
|
||||||
DIFF_UNCHANGED_KEY = 'unchanged'
|
DIFF_UNCHANGED_KEY = 'unchanged'
|
||||||
|
|
||||||
|
RICH_COLOR_PATTERN = '\[[a-z_]+\](.+)\[\/[a-z_]+\]'
|
||||||
|
|
||||||
def dictDiff(a : dict, b : dict):
|
|
||||||
|
def dictDiff(a : dict, b : dict, ignoreKeys: list = [], removeKeys: list = []):
|
||||||
|
"""
|
||||||
|
ignoreKeys: Ignored keys are filtered from calculating diff at all
|
||||||
|
removeKeys: Override diff calculation to remove keys certainly
|
||||||
|
"""
|
||||||
|
|
||||||
|
a_filtered = {k:v for k,v in a.items() if not k in ignoreKeys}
|
||||||
|
b_filtered = {k:v for k,v in b.items() if not k in ignoreKeys and k not in removeKeys}
|
||||||
|
|
||||||
|
a_only = {k:v for k,v in a_filtered.items() if not k in b_filtered.keys()}
|
||||||
|
b_only = {k:v for k,v in b_filtered.items() if not k in a_filtered.keys()}
|
||||||
|
|
||||||
|
a_b = set(a_filtered.keys()) & set(b_filtered.keys())
|
||||||
|
|
||||||
|
changed = {k:b_filtered[k] for k in a_b if a_filtered[k] != b_filtered[k]}
|
||||||
|
unchanged = {k:b_filtered[k] for k in a_b if a_filtered[k] == b_filtered[k]}
|
||||||
|
|
||||||
|
diffResult = {}
|
||||||
|
|
||||||
|
|
||||||
|
if a_only:
|
||||||
|
diffResult[DIFF_REMOVED_KEY] = a_only
|
||||||
|
diffResult[DIFF_UNCHANGED_KEY] = unchanged
|
||||||
|
if b_only:
|
||||||
|
diffResult[DIFF_ADDED_KEY] = b_only
|
||||||
|
if changed:
|
||||||
|
diffResult[DIFF_CHANGED_KEY] = changed
|
||||||
|
|
||||||
|
return diffResult
|
||||||
|
|
||||||
|
|
||||||
|
def dictKeysDiff(a : dict, b : dict):
|
||||||
|
|
||||||
a_keys = set(a.keys())
|
a_keys = set(a.keys())
|
||||||
b_keys = set(b.keys())
|
b_keys = set(b.keys())
|
||||||
@@ -40,9 +73,10 @@ def dictDiff(a : dict, b : dict):
|
|||||||
|
|
||||||
return diffResult
|
return diffResult
|
||||||
|
|
||||||
|
|
||||||
def dictCache(element: dict, cache: list = []):
|
def dictCache(element: dict, cache: list = []):
|
||||||
for index in range(len(cache)):
|
for index in range(len(cache)):
|
||||||
diff = dictDiff(cache[index], element)
|
diff = dictKeysDiff(cache[index], element)
|
||||||
if not diff:
|
if not diff:
|
||||||
return index, cache
|
return index, cache
|
||||||
cache.append(element)
|
cache.append(element)
|
||||||
@@ -53,11 +87,13 @@ def setDiff(a : set, b : set) -> set:
|
|||||||
|
|
||||||
a_only = a - b
|
a_only = a - b
|
||||||
b_only = b - a
|
b_only = b - a
|
||||||
|
a_and_b = a & b
|
||||||
|
|
||||||
diffResult = {}
|
diffResult = {}
|
||||||
|
|
||||||
if a_only:
|
if a_only:
|
||||||
diffResult[DIFF_REMOVED_KEY] = a_only
|
diffResult[DIFF_REMOVED_KEY] = a_only
|
||||||
|
diffResult[DIFF_UNCHANGED_KEY] = a_and_b
|
||||||
if b_only:
|
if b_only:
|
||||||
diffResult[DIFF_ADDED_KEY] = b_only
|
diffResult[DIFF_ADDED_KEY] = b_only
|
||||||
|
|
||||||
@@ -83,6 +119,8 @@ def filterFilename(fileName: str) -> str:
|
|||||||
fileName = str(fileName).replace('*', '')
|
fileName = str(fileName).replace('*', '')
|
||||||
fileName = str(fileName).replace("'", '')
|
fileName = str(fileName).replace("'", '')
|
||||||
fileName = str(fileName).replace("?", '#')
|
fileName = str(fileName).replace("?", '#')
|
||||||
|
fileName = str(fileName).replace('♥', '')
|
||||||
|
fileName = str(fileName).replace('’', '')
|
||||||
|
|
||||||
return fileName.strip()
|
return fileName.strip()
|
||||||
|
|
||||||
@@ -185,3 +223,17 @@ def getEpisodeFileBasename(showName,
|
|||||||
|
|
||||||
# return ''.join(filenameTokens)
|
# return ''.join(filenameTokens)
|
||||||
|
|
||||||
|
|
||||||
|
def formatRichColor(text: str, color: str = None):
|
||||||
|
if color is None:
|
||||||
|
return text
|
||||||
|
else:
|
||||||
|
return f"[{color}]{text}[/{color}]"
|
||||||
|
|
||||||
|
def removeRichColor(text: str):
|
||||||
|
richColorMatch = re.search(RICH_COLOR_PATTERN, text)
|
||||||
|
if richColorMatch is None:
|
||||||
|
return text
|
||||||
|
else:
|
||||||
|
return str(richColorMatch.group(1))
|
||||||
|
|
||||||
|
|||||||
@@ -3,77 +3,83 @@ import difflib
|
|||||||
|
|
||||||
class IsoLanguage(Enum):
|
class IsoLanguage(Enum):
|
||||||
|
|
||||||
AFRIKAANS = {"name": "Afrikaans", "iso639_1": "af", "iso639_2": "afr"}
|
AFRIKAANS = {"name": "Afrikaans", "iso639_1": "af", "iso639_2": ["afr"]}
|
||||||
ALBANIAN = {"name": "Albanian", "iso639_1": "sq", "iso639_2": "alb"}
|
ALBANIAN = {"name": "Albanian", "iso639_1": "sq", "iso639_2": ["alb"]}
|
||||||
ARABIC = {"name": "Arabic", "iso639_1": "ar", "iso639_2": "ara"}
|
ARABIC = {"name": "Arabic", "iso639_1": "ar", "iso639_2": ["ara"]}
|
||||||
ARMENIAN = {"name": "Armenian", "iso639_1": "hy", "iso639_2": "arm"}
|
ARMENIAN = {"name": "Armenian", "iso639_1": "hy", "iso639_2": ["arm"]}
|
||||||
AZERBAIJANI = {"name": "Azerbaijani", "iso639_1": "az", "iso639_2": "aze"}
|
AZERBAIJANI = {"name": "Azerbaijani", "iso639_1": "az", "iso639_2": ["aze"]}
|
||||||
BASQUE = {"name": "Basque", "iso639_1": "eu", "iso639_2": "baq"}
|
BASQUE = {"name": "Basque", "iso639_1": "eu", "iso639_2": ["baq"]}
|
||||||
BELARUSIAN = {"name": "Belarusian", "iso639_1": "be", "iso639_2": "bel"}
|
BELARUSIAN = {"name": "Belarusian", "iso639_1": "be", "iso639_2": ["bel"]}
|
||||||
BULGARIAN = {"name": "Bulgarian", "iso639_1": "bg", "iso639_2": "bul"}
|
BOKMAL = {"name": "Bokmål", "iso639_1": "nb", "iso639_2": ["nob"]} # Norwegian Bokmål
|
||||||
CATALAN = {"name": "Catalan", "iso639_1": "ca", "iso639_2": "cat"}
|
BULGARIAN = {"name": "Bulgarian", "iso639_1": "bg", "iso639_2": ["bul"]}
|
||||||
CHINESE = {"name": "Chinese", "iso639_1": "zh", "iso639_2": "chi"}
|
CATALAN = {"name": "Catalan", "iso639_1": "ca", "iso639_2": ["cat"]}
|
||||||
CROATIAN = {"name": "Croatian", "iso639_1": "hr", "iso639_2": "hrv"}
|
CHINESE = {"name": "Chinese", "iso639_1": "zh", "iso639_2": ["zho", "chi"]}
|
||||||
CZECH = {"name": "Czech", "iso639_1": "cs", "iso639_2": "cze"}
|
CROATIAN = {"name": "Croatian", "iso639_1": "hr", "iso639_2": ["hrv"]}
|
||||||
DANISH = {"name": "Danish", "iso639_1": "da", "iso639_2": "dan"}
|
CZECH = {"name": "Czech", "iso639_1": "cs", "iso639_2": ["cze"]}
|
||||||
DUTCH = {"name": "Dutch", "iso639_1": "nl", "iso639_2": "dut"}
|
DANISH = {"name": "Danish", "iso639_1": "da", "iso639_2": ["dan"]}
|
||||||
ENGLISH = {"name": "English", "iso639_1": "en", "iso639_2": "eng"}
|
DUTCH = {"name": "Dutch", "iso639_1": "nl", "iso639_2": ["nld", "dut"]}
|
||||||
ESTONIAN = {"name": "Estonian", "iso639_1": "et", "iso639_2": "est"}
|
ENGLISH = {"name": "English", "iso639_1": "en", "iso639_2": ["eng"]}
|
||||||
FINNISH = {"name": "Finnish", "iso639_1": "fi", "iso639_2": "fin"}
|
ESTONIAN = {"name": "Estonian", "iso639_1": "et", "iso639_2": ["est"]}
|
||||||
FRENCH = {"name": "French", "iso639_1": "fr", "iso639_2": "fre"}
|
FILIPINO = {"name": "Filipino", "iso639_1": "tl", "iso639_2": ["fil"]} # Tagalog
|
||||||
GEORGIAN = {"name": "Georgian", "iso639_1": "ka", "iso639_2": "geo"}
|
FINNISH = {"name": "Finnish", "iso639_1": "fi", "iso639_2": ["fin"]}
|
||||||
GERMAN = {"name": "German", "iso639_1": "de", "iso639_2": "ger"}
|
FRENCH = {"name": "French", "iso639_1": "fr", "iso639_2": ["fra", "fre"]}
|
||||||
GREEK = {"name": "Greek", "iso639_1": "el", "iso639_2": "gre"}
|
GALICIAN = {"name": "Galician", "iso639_1": "gl", "iso639_2": ["glg"]}
|
||||||
HEBREW = {"name": "Hebrew", "iso639_1": "he", "iso639_2": "heb"}
|
GEORGIAN = {"name": "Georgian", "iso639_1": "ka", "iso639_2": ["geo"]}
|
||||||
HINDI = {"name": "Hindi", "iso639_1": "hi", "iso639_2": "hin"}
|
GERMAN = {"name": "German", "iso639_1": "de", "iso639_2": ["deu", "ger"]}
|
||||||
HUNGARIAN = {"name": "Hungarian", "iso639_1": "hu", "iso639_2": "hun"}
|
GREEK = {"name": "Greek", "iso639_1": "el", "iso639_2": ["gre"]}
|
||||||
ICELANDIC = {"name": "Icelandic", "iso639_1": "is", "iso639_2": "ice"}
|
HEBREW = {"name": "Hebrew", "iso639_1": "he", "iso639_2": ["heb"]}
|
||||||
INDONESIAN = {"name": "Indonesian", "iso639_1": "id", "iso639_2": "ind"}
|
HINDI = {"name": "Hindi", "iso639_1": "hi", "iso639_2": ["hin"]}
|
||||||
IRISH = {"name": "Irish", "iso639_1": "ga", "iso639_2": "gle"}
|
HUNGARIAN = {"name": "Hungarian", "iso639_1": "hu", "iso639_2": ["hun"]}
|
||||||
ITALIAN = {"name": "Italian", "iso639_1": "it", "iso639_2": "ita"}
|
ICELANDIC = {"name": "Icelandic", "iso639_1": "is", "iso639_2": ["ice"]}
|
||||||
JAPANESE = {"name": "Japanese", "iso639_1": "ja", "iso639_2": "jpn"}
|
INDONESIAN = {"name": "Indonesian", "iso639_1": "id", "iso639_2": ["ind"]}
|
||||||
KAZAKH = {"name": "Kazakh", "iso639_1": "kk", "iso639_2": "kaz"}
|
IRISH = {"name": "Irish", "iso639_1": "ga", "iso639_2": ["gle"]}
|
||||||
KOREAN = {"name": "Korean", "iso639_1": "ko", "iso639_2": "kor"}
|
ITALIAN = {"name": "Italian", "iso639_1": "it", "iso639_2": ["ita"]}
|
||||||
LATIN = {"name": "Latin", "iso639_1": "la", "iso639_2": "lat"}
|
JAPANESE = {"name": "Japanese", "iso639_1": "ja", "iso639_2": ["jpn"]}
|
||||||
LATVIAN = {"name": "Latvian", "iso639_1": "lv", "iso639_2": "lav"}
|
KANNADA = {"name": "Kannada", "iso639_1": "kn", "iso639_2": ["kan"]}
|
||||||
LITHUANIAN = {"name": "Lithuanian", "iso639_1": "lt", "iso639_2": "lit"}
|
KAZAKH = {"name": "Kazakh", "iso639_1": "kk", "iso639_2": ["kaz"]}
|
||||||
MACEDONIAN = {"name": "Macedonian", "iso639_1": "mk", "iso639_2": "mac"}
|
KOREAN = {"name": "Korean", "iso639_1": "ko", "iso639_2": ["kor"]}
|
||||||
MALAY = {"name": "Malay", "iso639_1": "ms", "iso639_2": "may"}
|
LATIN = {"name": "Latin", "iso639_1": "la", "iso639_2": ["lat"]}
|
||||||
MALTESE = {"name": "Maltese", "iso639_1": "mt", "iso639_2": "mlt"}
|
LATVIAN = {"name": "Latvian", "iso639_1": "lv", "iso639_2": ["lav"]}
|
||||||
NORWEGIAN = {"name": "Norwegian", "iso639_1": "no", "iso639_2": "nor"}
|
LITHUANIAN = {"name": "Lithuanian", "iso639_1": "lt", "iso639_2": ["lit"]}
|
||||||
PERSIAN = {"name": "Persian", "iso639_1": "fa", "iso639_2": "per"}
|
MACEDONIAN = {"name": "Macedonian", "iso639_1": "mk", "iso639_2": ["mac"]}
|
||||||
POLISH = {"name": "Polish", "iso639_1": "pl", "iso639_2": "pol"}
|
MALAY = {"name": "Malay", "iso639_1": "ms", "iso639_2": ["may"]}
|
||||||
PORTUGUESE = {"name": "Portuguese", "iso639_1": "pt", "iso639_2": "por"}
|
MALAYALAM = {"name": "Malayalam", "iso639_1": "ml", "iso639_2": ["mal"]}
|
||||||
ROMANIAN = {"name": "Romanian", "iso639_1": "ro", "iso639_2": "rum"}
|
MALTESE = {"name": "Maltese", "iso639_1": "mt", "iso639_2": ["mlt"]}
|
||||||
RUSSIAN = {"name": "Russian", "iso639_1": "ru", "iso639_2": "rus"}
|
NORWEGIAN = {"name": "Norwegian", "iso639_1": "no", "iso639_2": ["nor"]}
|
||||||
NORTHERN_SAMI = {"name": "Northern Sami", "iso639_1": "se", "iso639_2": "sme"}
|
PERSIAN = {"name": "Persian", "iso639_1": "fa", "iso639_2": ["per"]}
|
||||||
SAMOAN = {"name": "Samoan", "iso639_1": "sm", "iso639_2": "smo"}
|
POLISH = {"name": "Polish", "iso639_1": "pl", "iso639_2": ["pol"]}
|
||||||
SANGO = {"name": "Sango", "iso639_1": "sg", "iso639_2": "sag"}
|
PORTUGUESE = {"name": "Portuguese", "iso639_1": "pt", "iso639_2": ["por"]}
|
||||||
SANSKRIT = {"name": "Sanskrit", "iso639_1": "sa", "iso639_2": "san"}
|
ROMANIAN = {"name": "Romanian", "iso639_1": "ro", "iso639_2": ["rum"]}
|
||||||
SARDINIAN = {"name": "Sardinian", "iso639_1": "sc", "iso639_2": "srd"}
|
RUSSIAN = {"name": "Russian", "iso639_1": "ru", "iso639_2": ["rus"]}
|
||||||
SERBIAN = {"name": "Serbian", "iso639_1": "sr", "iso639_2": "srp"}
|
NORTHERN_SAMI = {"name": "Northern Sami", "iso639_1": "se", "iso639_2": ["sme"]}
|
||||||
SHONA = {"name": "Shona", "iso639_1": "sn", "iso639_2": "sna"}
|
SAMOAN = {"name": "Samoan", "iso639_1": "sm", "iso639_2": ["smo"]}
|
||||||
SINDHI = {"name": "Sindhi", "iso639_1": "sd", "iso639_2": "snd"}
|
SANGO = {"name": "Sango", "iso639_1": "sg", "iso639_2": ["sag"]}
|
||||||
SINHALA = {"name": "Sinhala", "iso639_1": "si", "iso639_2": "sin"}
|
SANSKRIT = {"name": "Sanskrit", "iso639_1": "sa", "iso639_2": ["san"]}
|
||||||
SLOVAK = {"name": "Slovak", "iso639_1": "sk", "iso639_2": "slk"}
|
SARDINIAN = {"name": "Sardinian", "iso639_1": "sc", "iso639_2": ["srd"]}
|
||||||
SLOVENIAN = {"name": "Slovenian", "iso639_1": "sl", "iso639_2": "slv"}
|
SERBIAN = {"name": "Serbian", "iso639_1": "sr", "iso639_2": ["srp"]}
|
||||||
SOMALI = {"name": "Somali", "iso639_1": "so", "iso639_2": "som"}
|
SHONA = {"name": "Shona", "iso639_1": "sn", "iso639_2": ["sna"]}
|
||||||
SOUTHERN_SOTHO = {"name": "Southern Sotho", "iso639_1": "st", "iso639_2": "sot"}
|
SINDHI = {"name": "Sindhi", "iso639_1": "sd", "iso639_2": ["snd"]}
|
||||||
SPANISH = {"name": "Spanish", "iso639_1": "es", "iso639_2": "spa"}
|
SINHALA = {"name": "Sinhala", "iso639_1": "si", "iso639_2": ["sin"]}
|
||||||
SUNDANESE = {"name": "Sundanese", "iso639_1": "su", "iso639_2": "sun"}
|
SLOVAK = {"name": "Slovak", "iso639_1": "sk", "iso639_2": ["slk"]}
|
||||||
SWAHILI = {"name": "Swahili", "iso639_1": "sw", "iso639_2": "swa"}
|
SLOVENIAN = {"name": "Slovenian", "iso639_1": "sl", "iso639_2": ["slv"]}
|
||||||
SWATI = {"name": "Swati", "iso639_1": "ss", "iso639_2": "ssw"}
|
SOMALI = {"name": "Somali", "iso639_1": "so", "iso639_2": ["som"]}
|
||||||
SWEDISH = {"name": "Swedish", "iso639_1": "sv", "iso639_2": "swe"}
|
SOUTHERN_SOTHO = {"name": "Southern Sotho", "iso639_1": "st", "iso639_2": ["sot"]}
|
||||||
TAGALOG = {"name": "Tagalog", "iso639_1": "tl", "iso639_2": "tgl"}
|
SPANISH = {"name": "Spanish", "iso639_1": "es", "iso639_2": ["spa"]}
|
||||||
TAMIL = {"name": "Tamil", "iso639_1": "ta", "iso639_2": "tam"}
|
SUNDANESE = {"name": "Sundanese", "iso639_1": "su", "iso639_2": ["sun"]}
|
||||||
THAI = {"name": "Thai", "iso639_1": "th", "iso639_2": "tha"}
|
SWAHILI = {"name": "Swahili", "iso639_1": "sw", "iso639_2": ["swa"]}
|
||||||
TURKISH = {"name": "Turkish", "iso639_1": "tr", "iso639_2": "tur"}
|
SWATI = {"name": "Swati", "iso639_1": "ss", "iso639_2": ["ssw"]}
|
||||||
UKRAINIAN = {"name": "Ukrainian", "iso639_1": "uk", "iso639_2": "ukr"}
|
SWEDISH = {"name": "Swedish", "iso639_1": "sv", "iso639_2": ["swe"]}
|
||||||
URDU = {"name": "Urdu", "iso639_1": "ur", "iso639_2": "urd"}
|
TAGALOG = {"name": "Tagalog", "iso639_1": "tl", "iso639_2": ["tgl"]}
|
||||||
VIETNAMESE = {"name": "Vietnamese", "iso639_1": "vi", "iso639_2": "vie"}
|
TAMIL = {"name": "Tamil", "iso639_1": "ta", "iso639_2": ["tam"]}
|
||||||
WELSH = {"name": "Welsh", "iso639_1": "cy", "iso639_2": "wel"}
|
TELUGU = {"name": "Telugu", "iso639_1": "te", "iso639_2": ["tel"]}
|
||||||
|
THAI = {"name": "Thai", "iso639_1": "th", "iso639_2": ["tha"]}
|
||||||
|
TURKISH = {"name": "Turkish", "iso639_1": "tr", "iso639_2": ["tur"]}
|
||||||
|
UKRAINIAN = {"name": "Ukrainian", "iso639_1": "uk", "iso639_2": ["ukr"]}
|
||||||
|
URDU = {"name": "Urdu", "iso639_1": "ur", "iso639_2": ["urd"]}
|
||||||
|
VIETNAMESE = {"name": "Vietnamese", "iso639_1": "vi", "iso639_2":[ "vie"]}
|
||||||
|
WELSH = {"name": "Welsh", "iso639_1": "cy", "iso639_2": ["wel"]}
|
||||||
|
|
||||||
UNDEFINED = {"name": "undefined", "iso639_1": "xx", "iso639_2": "und"}
|
UNDEFINED = {"name": "undefined", "iso639_1": "xx", "iso639_2": ["und"]}
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -89,7 +95,7 @@ class IsoLanguage(Enum):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def findThreeLetter(theeLetter : str):
|
def findThreeLetter(theeLetter : str):
|
||||||
foundLangs = [l for l in IsoLanguage if l.value['iso639_2'] == str(theeLetter)]
|
foundLangs = [l for l in IsoLanguage if str(theeLetter) in l.value['iso639_2']]
|
||||||
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
|
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
|
||||||
|
|
||||||
|
|
||||||
@@ -100,7 +106,6 @@ class IsoLanguage(Enum):
|
|||||||
return str(self.value['iso639_1'])
|
return str(self.value['iso639_1'])
|
||||||
|
|
||||||
def threeLetter(self):
|
def threeLetter(self):
|
||||||
return str(self.value['iso639_2'])
|
return str(self.value['iso639_2'][0])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -32,7 +32,8 @@ class MediaController():
|
|||||||
|
|
||||||
for mediaTagKey, mediaTagValue in mediaDescriptor.getTags():
|
for mediaTagKey, mediaTagValue in mediaDescriptor.getTags():
|
||||||
self.__tac.updateMediaTag(pid, mediaTagKey, mediaTagValue)
|
self.__tac.updateMediaTag(pid, mediaTagKey, mediaTagValue)
|
||||||
for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
|
# for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
|
||||||
|
for trackDescriptor in mediaDescriptor.getTrackDescriptors():
|
||||||
self.__tc.addTrack(trackDescriptor, patternId = pid)
|
self.__tc.addTrack(trackDescriptor, patternId = pid)
|
||||||
|
|
||||||
s.commit()
|
s.commit()
|
||||||
|
|||||||
@@ -10,8 +10,6 @@ from ffx.track_codec import TrackCodec
|
|||||||
|
|
||||||
from ffx.track_descriptor import TrackDescriptor
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
|
||||||
from ffx.helper import dictDiff, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY
|
|
||||||
|
|
||||||
|
|
||||||
class MediaDescriptor:
|
class MediaDescriptor:
|
||||||
"""This class represents the structural content of a media file including streams and metadata"""
|
"""This class represents the structural content of a media file including streams and metadata"""
|
||||||
@@ -22,6 +20,7 @@ class MediaDescriptor:
|
|||||||
TRACKS_KEY = "tracks"
|
TRACKS_KEY = "tracks"
|
||||||
|
|
||||||
TRACK_DESCRIPTOR_LIST_KEY = "track_descriptors"
|
TRACK_DESCRIPTOR_LIST_KEY = "track_descriptors"
|
||||||
|
ATTACHMENT_DESCRIPTOR_LIST_KEY = "attachment_descriptors"
|
||||||
CLEAR_TAGS_FLAG_KEY = "clear_tags"
|
CLEAR_TAGS_FLAG_KEY = "clear_tags"
|
||||||
|
|
||||||
FFPROBE_DISPOSITION_KEY = "disposition"
|
FFPROBE_DISPOSITION_KEY = "disposition"
|
||||||
@@ -31,7 +30,9 @@ class MediaDescriptor:
|
|||||||
#407 remove as well
|
#407 remove as well
|
||||||
EXCLUDED_MEDIA_TAGS = ["creation_time"]
|
EXCLUDED_MEDIA_TAGS = ["creation_time"]
|
||||||
|
|
||||||
SEASON_EPISODE_STREAM_LANGUAGE_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
|
SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
|
||||||
|
STREAM_LANGUAGE_DISPOSITIONS_MATCH = '([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
|
||||||
|
|
||||||
SUBTITLE_FILE_EXTENSION = 'vtt'
|
SUBTITLE_FILE_EXTENSION = 'vtt'
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
@@ -69,9 +70,9 @@ class MediaDescriptor:
|
|||||||
raise TypeError(
|
raise TypeError(
|
||||||
f"TrackDesciptor.__init__(): All elements of argument list {MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY} are required to be of type TrackDescriptor"
|
f"TrackDesciptor.__init__(): All elements of argument list {MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY} are required to be of type TrackDescriptor"
|
||||||
)
|
)
|
||||||
self.__trackDescriptors = kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY]
|
self.__trackDescriptors: List[TrackDescriptor] = kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY]
|
||||||
else:
|
else:
|
||||||
self.__trackDescriptors = []
|
self.__trackDescriptors: List[TrackDescriptor] = []
|
||||||
|
|
||||||
def setTrackLanguage(self, language: str, index: int, trackType: TrackType = None):
|
def setTrackLanguage(self, language: str, index: int, trackType: TrackType = None):
|
||||||
|
|
||||||
@@ -107,14 +108,16 @@ class MediaDescriptor:
|
|||||||
|
|
||||||
|
|
||||||
def setDefaultSubTrack(self, trackType: TrackType, subIndex: int):
|
def setDefaultSubTrack(self, trackType: TrackType, subIndex: int):
|
||||||
for t in self.getAllTrackDescriptors():
|
# for t in self.getAllTrackDescriptors():
|
||||||
|
for t in self.getTrackDescriptors():
|
||||||
if t.getType() == trackType:
|
if t.getType() == trackType:
|
||||||
t.setDispositionFlag(
|
t.setDispositionFlag(
|
||||||
TrackDisposition.DEFAULT, t.getSubIndex() == int(subIndex)
|
TrackDisposition.DEFAULT, t.getSubIndex() == int(subIndex)
|
||||||
)
|
)
|
||||||
|
|
||||||
def setForcedSubTrack(self, trackType: TrackType, subIndex: int):
|
def setForcedSubTrack(self, trackType: TrackType, subIndex: int):
|
||||||
for t in self.getAllTrackDescriptors():
|
# for t in self.getAllTrackDescriptors():
|
||||||
|
for t in self.getTrackDescriptors():
|
||||||
if t.getType() == trackType:
|
if t.getType() == trackType:
|
||||||
t.setDispositionFlag(
|
t.setDispositionFlag(
|
||||||
TrackDisposition.FORCED, t.getSubIndex() == int(subIndex)
|
TrackDisposition.FORCED, t.getSubIndex() == int(subIndex)
|
||||||
@@ -190,7 +193,8 @@ class MediaDescriptor:
|
|||||||
|
|
||||||
|
|
||||||
def applySourceIndices(self, sourceMediaDescriptor: Self):
|
def applySourceIndices(self, sourceMediaDescriptor: Self):
|
||||||
sourceTrackDescriptors = sourceMediaDescriptor.getAllTrackDescriptors()
|
# sourceTrackDescriptors = sourceMediaDescriptor.getAllTrackDescriptors()
|
||||||
|
sourceTrackDescriptors = sourceMediaDescriptor.getTrackDescriptors()
|
||||||
|
|
||||||
numTrackDescriptors = len(self.__trackDescriptors)
|
numTrackDescriptors = len(self.__trackDescriptors)
|
||||||
if len(sourceTrackDescriptors) != numTrackDescriptors:
|
if len(sourceTrackDescriptors) != numTrackDescriptors:
|
||||||
@@ -285,9 +289,9 @@ class MediaDescriptor:
|
|||||||
tdList[trackIndex].setIndex(trackIndex)
|
tdList[trackIndex].setIndex(trackIndex)
|
||||||
|
|
||||||
|
|
||||||
def getAllTrackDescriptors(self):
|
# def getAllTrackDescriptors(self):
|
||||||
"""Returns all track descriptors sorted by type: video, audio then subtitles"""
|
# """Returns all track descriptors sorted by type: video, audio then subtitles"""
|
||||||
return self.getVideoTracks() + self.getAudioTracks() + self.getSubtitleTracks()
|
# return self.getVideoTracks() + self.getAudioTracks() + self.getSubtitleTracks()
|
||||||
|
|
||||||
|
|
||||||
def getTrackDescriptors(self,
|
def getTrackDescriptors(self,
|
||||||
@@ -317,82 +321,16 @@ class MediaDescriptor:
|
|||||||
if s.getType() == TrackType.SUBTITLE
|
if s.getType() == TrackType.SUBTITLE
|
||||||
]
|
]
|
||||||
|
|
||||||
|
def getAttachmentTracks(self) -> List[TrackDescriptor]:
|
||||||
def compare(self, vsMediaDescriptor: Self):
|
return [
|
||||||
|
s
|
||||||
if not isinstance(vsMediaDescriptor, self.__class__):
|
for s in self.__trackDescriptors
|
||||||
self.__logger.error(f"MediaDescriptor.compare(): Argument is required to be of type {self.__class__}")
|
if s.getType() == TrackType.ATTACHMENT
|
||||||
raise click.Abort()
|
]
|
||||||
|
|
||||||
vsTags = vsMediaDescriptor.getTags()
|
|
||||||
tags = self.getTags()
|
|
||||||
|
|
||||||
# HINT: Some tags differ per file, for example creation_time, so these are removed before diff
|
|
||||||
for emt in MediaDescriptor.EXCLUDED_MEDIA_TAGS:
|
|
||||||
if emt in tags.keys():
|
|
||||||
del tags[emt]
|
|
||||||
if emt in vsTags.keys():
|
|
||||||
del vsTags[emt]
|
|
||||||
|
|
||||||
tagsDiff = dictDiff(vsTags, tags)
|
|
||||||
|
|
||||||
compareResult = {}
|
|
||||||
|
|
||||||
if tagsDiff:
|
|
||||||
compareResult[MediaDescriptor.TAGS_KEY] = tagsDiff
|
|
||||||
|
|
||||||
# Target track configuration (from DB)
|
|
||||||
# tracks = self.getAllTrackDescriptors()
|
|
||||||
tracks = self.getAllTrackDescriptors() # filtern
|
|
||||||
numTracks = len(tracks)
|
|
||||||
|
|
||||||
# Current track configuration (of file)
|
|
||||||
vsTracks = vsMediaDescriptor.getAllTrackDescriptors()
|
|
||||||
numVsTracks = len(vsTracks)
|
|
||||||
|
|
||||||
maxNumOfTracks = max(numVsTracks, numTracks)
|
|
||||||
|
|
||||||
trackCompareResult = {}
|
|
||||||
|
|
||||||
for tp in range(maxNumOfTracks):
|
|
||||||
|
|
||||||
#!
|
|
||||||
vsTrackIndex = tracks[tp].getSourceIndex()
|
|
||||||
|
|
||||||
# Will trigger if tracks are missing in file
|
|
||||||
if tp > (numVsTracks - 1):
|
|
||||||
if DIFF_ADDED_KEY not in trackCompareResult.keys():
|
|
||||||
trackCompareResult[DIFF_ADDED_KEY] = set()
|
|
||||||
trackCompareResult[DIFF_ADDED_KEY].add(tracks[tp].getIndex())
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Will trigger if tracks are missing in DB definition
|
|
||||||
# New tracks will be added per update via this way
|
|
||||||
if tp > (numTracks - 1):
|
|
||||||
if DIFF_REMOVED_KEY not in trackCompareResult.keys():
|
|
||||||
trackCompareResult[DIFF_REMOVED_KEY] = {}
|
|
||||||
trackCompareResult[DIFF_REMOVED_KEY][
|
|
||||||
vsTracks[vsTrackIndex].getIndex()
|
|
||||||
] = vsTracks[vsTrackIndex]
|
|
||||||
continue
|
|
||||||
|
|
||||||
# assumption is made here that the track order will not change for all files of a sequence
|
|
||||||
trackDiff = tracks[tp].compare(vsTracks[vsTrackIndex])
|
|
||||||
|
|
||||||
if trackDiff:
|
|
||||||
if DIFF_CHANGED_KEY not in trackCompareResult.keys():
|
|
||||||
trackCompareResult[DIFF_CHANGED_KEY] = {}
|
|
||||||
trackCompareResult[DIFF_CHANGED_KEY][
|
|
||||||
vsTracks[vsTrackIndex].getIndex()
|
|
||||||
] = trackDiff
|
|
||||||
|
|
||||||
if trackCompareResult:
|
|
||||||
compareResult[MediaDescriptor.TRACKS_KEY] = trackCompareResult
|
|
||||||
|
|
||||||
return compareResult
|
|
||||||
|
|
||||||
|
|
||||||
def getImportFileTokens(self, use_sub_index: bool = True):
|
def getImportFileTokens(self, use_sub_index: bool = True):
|
||||||
|
"""Generate ffmpeg import options for external stream files"""
|
||||||
|
|
||||||
importFileTokens = []
|
importFileTokens = []
|
||||||
|
|
||||||
@@ -415,76 +353,103 @@ class MediaDescriptor:
|
|||||||
return importFileTokens
|
return importFileTokens
|
||||||
|
|
||||||
|
|
||||||
def getInputMappingTokens(self, use_sub_index: bool = True, only_video: bool = False):
|
def getInputMappingTokens(self,
|
||||||
|
use_sub_index: bool = True,
|
||||||
|
only_video: bool = False,
|
||||||
|
sourceMediaDescriptor: Self = None):
|
||||||
"""Tracks must be reordered for source index order"""
|
"""Tracks must be reordered for source index order"""
|
||||||
|
|
||||||
inputMappingTokens = []
|
inputMappingTokens = []
|
||||||
|
|
||||||
|
sortedTrackDescriptors = sorted(self.__trackDescriptors, key=lambda d: d.getIndex())
|
||||||
|
|
||||||
|
# raise click.ClickException(' '.join([f"\nindex={td.getIndex()} subIndex={td.getSubIndex()} srcIndex={td.getSourceIndex()} type={td.getType().label()}" for td in self.__trackDescriptors]))
|
||||||
|
|
||||||
filePointer = 1
|
filePointer = 1
|
||||||
for trackIndex in range(len(self.__trackDescriptors)):
|
for trackIndex in range(len(sortedTrackDescriptors)):
|
||||||
|
|
||||||
td = self.__trackDescriptors[trackIndex]
|
td: TrackDescriptor = sortedTrackDescriptors[trackIndex]
|
||||||
|
|
||||||
stdi = self.__trackDescriptors[td.getSourceIndex()].getIndex()
|
#HINT: Attached thumbnails are not supported by .webm container format
|
||||||
stdsi = self.__trackDescriptors[td.getSourceIndex()].getSubIndex()
|
if td.getCodec() != TrackCodec.PNG:
|
||||||
|
|
||||||
# sti = self.__trackDescriptors[trackIndex].getSourceIndex()
|
stdi = sortedTrackDescriptors[td.getSourceIndex()].getIndex()
|
||||||
# sotd = sourceOrderTrackDescriptors[sti]
|
stdsi = sortedTrackDescriptors[td.getSourceIndex()].getSubIndex()
|
||||||
|
|
||||||
trackType = td.getType()
|
trackType = td.getType()
|
||||||
|
trackCodec = td.getCodec()
|
||||||
|
|
||||||
if (trackType == TrackType.VIDEO or not only_video):
|
if (trackType != TrackType.ATTACHMENT
|
||||||
|
and (trackType == TrackType.VIDEO or not only_video)):
|
||||||
|
|
||||||
importedFilePath = td.getExternalSourceFilePath()
|
|
||||||
|
|
||||||
if use_sub_index:
|
importedFilePath = td.getExternalSourceFilePath()
|
||||||
|
|
||||||
if importedFilePath:
|
if use_sub_index:
|
||||||
|
|
||||||
inputMappingTokens += [
|
if importedFilePath:
|
||||||
"-map",
|
|
||||||
f"{filePointer}:{trackType.indicator()}:0",
|
|
||||||
]
|
|
||||||
filePointer += 1
|
|
||||||
|
|
||||||
else:
|
|
||||||
|
|
||||||
if td.getCodec() != TrackCodec.PGS:
|
|
||||||
inputMappingTokens += [
|
inputMappingTokens += [
|
||||||
"-map",
|
"-map",
|
||||||
f"0:{trackType.indicator()}:{stdsi}",
|
f"{filePointer}:{trackType.indicator()}:0",
|
||||||
]
|
]
|
||||||
|
filePointer += 1
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if td.getCodec() != TrackCodec.PGS:
|
|
||||||
inputMappingTokens += ["-map", f"0:{stdi}"]
|
if not trackCodec in [TrackCodec.PGS, TrackCodec.VOBSUB]:
|
||||||
|
inputMappingTokens += [
|
||||||
|
"-map",
|
||||||
|
f"0:{trackType.indicator()}:{stdsi}",
|
||||||
|
]
|
||||||
|
|
||||||
|
else:
|
||||||
|
if not trackCodec in [TrackCodec.PGS, TrackCodec.VOBSUB]:
|
||||||
|
inputMappingTokens += ["-map", f"0:{stdi}"]
|
||||||
|
|
||||||
|
if sourceMediaDescriptor:
|
||||||
|
fontDescriptors = [ftd for ftd in sourceMediaDescriptor.getAttachmentTracks()
|
||||||
|
if ftd.getCodec() == TrackCodec.TTF]
|
||||||
|
else:
|
||||||
|
fontDescriptors = [ftd for ftd in self.__trackDescriptors
|
||||||
|
if ftd.getType() == TrackType.ATTACHMENT
|
||||||
|
and ftd.getCodec() == TrackCodec.TTF]
|
||||||
|
|
||||||
|
for ad in sorted(fontDescriptors, key=lambda d: d.getIndex()):
|
||||||
|
inputMappingTokens += ["-map", f"0:{ad.getIndex()}"]
|
||||||
|
|
||||||
return inputMappingTokens
|
return inputMappingTokens
|
||||||
|
|
||||||
|
|
||||||
def searchSubtitleFiles(self, searchDirectory, prefix):
|
def searchSubtitleFiles(self, searchDirectory, prefix):
|
||||||
|
|
||||||
sesl_match = re.compile(MediaDescriptor.SEASON_EPISODE_STREAM_LANGUAGE_MATCH)
|
sesld_match = re.compile(f"{prefix}_{MediaDescriptor.SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH}")
|
||||||
|
sld_match = re.compile(f"{prefix}_{MediaDescriptor.STREAM_LANGUAGE_DISPOSITIONS_MATCH}")
|
||||||
|
|
||||||
subtitleFileDescriptors = []
|
subtitleFileDescriptors = []
|
||||||
|
|
||||||
for subtitleFilename in os.listdir(searchDirectory):
|
for subtitleFilename in os.listdir(searchDirectory):
|
||||||
if subtitleFilename.startswith(prefix) and subtitleFilename.endswith(
|
if subtitleFilename.startswith(prefix) and subtitleFilename.endswith(
|
||||||
"." + MediaDescriptor.SUBTITLE_FILE_EXTENSION
|
"." + MediaDescriptor.SUBTITLE_FILE_EXTENSION
|
||||||
):
|
):
|
||||||
sesl_result = sesl_match.search(subtitleFilename)
|
|
||||||
if sesl_result is not None:
|
sesld_result = sesld_match.search(subtitleFilename)
|
||||||
|
sld_result = None if not sesld_result is None else sld_match.search(subtitleFilename)
|
||||||
|
|
||||||
|
if not sesld_result is None:
|
||||||
|
|
||||||
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
|
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
|
||||||
if os.path.isfile(subtitleFilePath):
|
if os.path.isfile(subtitleFilePath):
|
||||||
|
|
||||||
subtitleFileDescriptor = {}
|
subtitleFileDescriptor = {}
|
||||||
subtitleFileDescriptor["path"] = subtitleFilePath
|
subtitleFileDescriptor["path"] = subtitleFilePath
|
||||||
subtitleFileDescriptor["season"] = int(sesl_result.group(1))
|
subtitleFileDescriptor["season"] = int(sesld_result.group(1))
|
||||||
subtitleFileDescriptor["episode"] = int(sesl_result.group(2))
|
subtitleFileDescriptor["episode"] = int(sesld_result.group(2))
|
||||||
subtitleFileDescriptor["index"] = int(sesl_result.group(3))
|
subtitleFileDescriptor["index"] = int(sesld_result.group(3))
|
||||||
subtitleFileDescriptor["language"] = sesl_result.group(4)
|
subtitleFileDescriptor["language"] = sesld_result.group(4)
|
||||||
|
|
||||||
dispSet = set()
|
dispSet = set()
|
||||||
dispCaptGroups = sesl_result.groups()
|
dispCaptGroups = sesld_result.groups()
|
||||||
numCaptGroups = len(dispCaptGroups)
|
numCaptGroups = len(dispCaptGroups)
|
||||||
if numCaptGroups > 4:
|
if numCaptGroups > 4:
|
||||||
for groupIndex in range(numCaptGroups - 4):
|
for groupIndex in range(numCaptGroups - 4):
|
||||||
@@ -495,6 +460,29 @@ class MediaDescriptor:
|
|||||||
|
|
||||||
subtitleFileDescriptors.append(subtitleFileDescriptor)
|
subtitleFileDescriptors.append(subtitleFileDescriptor)
|
||||||
|
|
||||||
|
if not sld_result is None:
|
||||||
|
|
||||||
|
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
|
||||||
|
if os.path.isfile(subtitleFilePath):
|
||||||
|
|
||||||
|
subtitleFileDescriptor = {}
|
||||||
|
subtitleFileDescriptor["path"] = subtitleFilePath
|
||||||
|
subtitleFileDescriptor["index"] = int(sld_result.group(1))
|
||||||
|
subtitleFileDescriptor["language"] = sld_result.group(2)
|
||||||
|
|
||||||
|
dispSet = set()
|
||||||
|
dispCaptGroups = sld_result.groups()
|
||||||
|
numCaptGroups = len(dispCaptGroups)
|
||||||
|
if numCaptGroups > 2:
|
||||||
|
for groupIndex in range(numCaptGroups - 2):
|
||||||
|
disp = TrackDisposition.fromIndicator(dispCaptGroups[groupIndex + 2])
|
||||||
|
if disp is not None:
|
||||||
|
dispSet.add(disp)
|
||||||
|
subtitleFileDescriptor["disposition_set"] = dispSet
|
||||||
|
|
||||||
|
subtitleFileDescriptors.append(subtitleFileDescriptor)
|
||||||
|
|
||||||
|
|
||||||
self.__logger.debug(f"searchSubtitleFiles(): Available subtitle files {subtitleFileDescriptors}")
|
self.__logger.debug(f"searchSubtitleFiles(): Available subtitle files {subtitleFileDescriptors}")
|
||||||
|
|
||||||
return subtitleFileDescriptors
|
return subtitleFileDescriptors
|
||||||
@@ -518,7 +506,8 @@ class MediaDescriptor:
|
|||||||
[
|
[
|
||||||
d
|
d
|
||||||
for d in availableFileSubtitleDescriptors
|
for d in availableFileSubtitleDescriptors
|
||||||
if d["season"] == int(season) and d["episode"] == int(episode)
|
if ((season == -1 and episode == -1)
|
||||||
|
or (d["season"] == int(season) and d["episode"] == int(episode)))
|
||||||
],
|
],
|
||||||
key=lambda d: d["index"],
|
key=lambda d: d["index"],
|
||||||
)
|
)
|
||||||
@@ -541,7 +530,8 @@ class MediaDescriptor:
|
|||||||
|
|
||||||
def getConfiguration(self, label: str = ''):
|
def getConfiguration(self, label: str = ''):
|
||||||
yield f"--- {label if label else 'MediaDescriptor '+str(id(self))} {' '.join([str(k)+'='+str(v) for k,v in self.__mediaTags.items()])}"
|
yield f"--- {label if label else 'MediaDescriptor '+str(id(self))} {' '.join([str(k)+'='+str(v) for k,v in self.__mediaTags.items()])}"
|
||||||
for td in self.getAllTrackDescriptors():
|
# for td in self.getAllTrackDescriptors():
|
||||||
|
for td in self.getTrackDescriptors():
|
||||||
yield (f"{td.getIndex()}:{td.getType().indicator()}:{td.getSubIndex()} "
|
yield (f"{td.getIndex()}:{td.getType().indicator()}:{td.getSubIndex()} "
|
||||||
+ '|'.join([d.indicator() for d in td.getDispositionSet()])
|
+ '|'.join([d.indicator() for d in td.getDispositionSet()])
|
||||||
+ ' ' + ' '.join([str(k)+'='+str(v) for k,v in td.getTags().items()]))
|
+ ' ' + ' '.join([str(k)+'='+str(v) for k,v in td.getTags().items()]))
|
||||||
|
|||||||
302
src/ffx/media_descriptor_change_set.py
Normal file
302
src/ffx/media_descriptor_change_set.py
Normal file
@@ -0,0 +1,302 @@
|
|||||||
|
import click
|
||||||
|
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
|
||||||
|
from ffx.helper import dictDiff, setDiff, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY, DIFF_UNCHANGED_KEY
|
||||||
|
|
||||||
|
from ffx.track_codec import TrackCodec
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
|
||||||
|
|
||||||
|
class MediaDescriptorChangeSet():
|
||||||
|
|
||||||
|
TAGS_KEY = "tags"
|
||||||
|
TRACKS_KEY = "tracks"
|
||||||
|
DISPOSITION_SET_KEY = "disposition_set"
|
||||||
|
|
||||||
|
TRACK_DESCRIPTOR_KEY = "track_descriptor"
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context,
|
||||||
|
targetMediaDescriptor: MediaDescriptor = None,
|
||||||
|
sourceMediaDescriptor: MediaDescriptor = None):
|
||||||
|
|
||||||
|
self.__context = context
|
||||||
|
self.__logger = context['logger']
|
||||||
|
|
||||||
|
self.__configurationData = self.__context['config'].getData()
|
||||||
|
|
||||||
|
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
|
||||||
|
|
||||||
|
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
|
||||||
|
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
|
||||||
|
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
|
||||||
|
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'remove' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'ignore' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
|
||||||
|
|
||||||
|
self.__targetTrackDescriptors = targetMediaDescriptor.getTrackDescriptors() if targetMediaDescriptor is not None else []
|
||||||
|
self.__sourceTrackDescriptors = sourceMediaDescriptor.getTrackDescriptors() if sourceMediaDescriptor is not None else []
|
||||||
|
|
||||||
|
targetMediaTags = targetMediaDescriptor.getTags() if targetMediaDescriptor is not None else {}
|
||||||
|
sourceMediaTags = sourceMediaDescriptor.getTags() if sourceMediaDescriptor is not None else {}
|
||||||
|
|
||||||
|
|
||||||
|
self.__changeSetObj = {}
|
||||||
|
|
||||||
|
#if targetMediaDescriptor is not None:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#!!#
|
||||||
|
tagsDiff = dictDiff(sourceMediaTags,
|
||||||
|
targetMediaTags,
|
||||||
|
ignoreKeys=self.__ignoreGlobalKeys,
|
||||||
|
removeKeys=self.__removeGlobalKeys)
|
||||||
|
|
||||||
|
if tagsDiff:
|
||||||
|
self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY] = tagsDiff
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
self.__numTargetTracks = len(self.__targetTrackDescriptors)
|
||||||
|
|
||||||
|
# Current track configuration (of file)
|
||||||
|
|
||||||
|
self.__numSourceTracks = len(self.__sourceTrackDescriptors)
|
||||||
|
|
||||||
|
maxNumOfTracks = max(self.__numSourceTracks, self.__numTargetTracks)
|
||||||
|
|
||||||
|
trackCompareResult = {}
|
||||||
|
|
||||||
|
|
||||||
|
for trackIndex in range(maxNumOfTracks):
|
||||||
|
|
||||||
|
correspondingSourceTrackDescriptors = [st for st in self.__sourceTrackDescriptors if st.getIndex() == trackIndex]
|
||||||
|
correspondingTargetTrackDescriptors = [tt for tt in self.__targetTrackDescriptors if tt.getIndex() == trackIndex]
|
||||||
|
|
||||||
|
# Track present in target but not in source
|
||||||
|
if (not correspondingSourceTrackDescriptors
|
||||||
|
and correspondingTargetTrackDescriptors):
|
||||||
|
|
||||||
|
if DIFF_ADDED_KEY not in trackCompareResult.keys():
|
||||||
|
trackCompareResult[DIFF_ADDED_KEY] = {}
|
||||||
|
|
||||||
|
trackCompareResult[DIFF_ADDED_KEY][trackIndex] = correspondingTargetTrackDescriptors[0]
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Track present in target but not in source
|
||||||
|
if (correspondingSourceTrackDescriptors
|
||||||
|
and not correspondingTargetTrackDescriptors):
|
||||||
|
|
||||||
|
if DIFF_REMOVED_KEY not in trackCompareResult.keys():
|
||||||
|
trackCompareResult[DIFF_REMOVED_KEY] = {}
|
||||||
|
|
||||||
|
trackCompareResult[DIFF_REMOVED_KEY][trackIndex] = correspondingSourceTrackDescriptors[0]
|
||||||
|
continue
|
||||||
|
|
||||||
|
if (correspondingSourceTrackDescriptors
|
||||||
|
and correspondingTargetTrackDescriptors):
|
||||||
|
|
||||||
|
# if correspondingTargetTrackDescriptors[0].getIndex() == 3:
|
||||||
|
# raise click.ClickException(f"{correspondingSourceTrackDescriptors[0].getDispositionSet()} {correspondingTargetTrackDescriptors[0].getDispositionSet()}")
|
||||||
|
|
||||||
|
|
||||||
|
trackDiff = self.compareTracks(correspondingTargetTrackDescriptors[0],
|
||||||
|
correspondingSourceTrackDescriptors[0])
|
||||||
|
|
||||||
|
if trackDiff:
|
||||||
|
if DIFF_CHANGED_KEY not in trackCompareResult.keys():
|
||||||
|
trackCompareResult[DIFF_CHANGED_KEY] = {}
|
||||||
|
|
||||||
|
trackCompareResult[DIFF_CHANGED_KEY][trackIndex] = trackDiff
|
||||||
|
|
||||||
|
|
||||||
|
if trackCompareResult:
|
||||||
|
self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY] = trackCompareResult
|
||||||
|
|
||||||
|
|
||||||
|
def compareTracks(self,
|
||||||
|
targetTrackDescriptor: TrackDescriptor = None,
|
||||||
|
sourceTrackDescriptor: TrackDescriptor = None):
|
||||||
|
|
||||||
|
sourceTrackTags = sourceTrackDescriptor.getTags() if sourceTrackDescriptor is not None else {}
|
||||||
|
targetTrackTags = targetTrackDescriptor.getTags() if targetTrackDescriptor is not None else {}
|
||||||
|
|
||||||
|
trackCompareResult = {}
|
||||||
|
|
||||||
|
tagsDiffResult = dictDiff(sourceTrackTags,
|
||||||
|
targetTrackTags,
|
||||||
|
ignoreKeys=self.__ignoreTrackKeys,
|
||||||
|
removeKeys=self.__removeTrackKeys)
|
||||||
|
|
||||||
|
if tagsDiffResult:
|
||||||
|
trackCompareResult[MediaDescriptorChangeSet.TAGS_KEY] = tagsDiffResult
|
||||||
|
|
||||||
|
sourceDispositionSet = sourceTrackDescriptor.getDispositionSet() if sourceTrackDescriptor is not None else set()
|
||||||
|
targetDispositionSet = targetTrackDescriptor.getDispositionSet() if targetTrackDescriptor is not None else set()
|
||||||
|
|
||||||
|
# if targetTrackDescriptor.getIndex() == 3:
|
||||||
|
# raise click.ClickException(f"{sourceDispositionSet} {targetDispositionSet}")
|
||||||
|
|
||||||
|
dispositionDiffResult = setDiff(sourceDispositionSet, targetDispositionSet)
|
||||||
|
|
||||||
|
if dispositionDiffResult:
|
||||||
|
trackCompareResult[MediaDescriptorChangeSet.DISPOSITION_SET_KEY] = dispositionDiffResult
|
||||||
|
|
||||||
|
return trackCompareResult
|
||||||
|
|
||||||
|
|
||||||
|
def generateDispositionTokens(self):
|
||||||
|
"""
|
||||||
|
#Example: -disposition:s:0 default -disposition:s:1 0
|
||||||
|
"""
|
||||||
|
dispositionTokens = []
|
||||||
|
|
||||||
|
# if MediaDescriptorChangeSet.TRACKS_KEY in self.__changeSetObj.keys():
|
||||||
|
#
|
||||||
|
# if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
# addedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY]
|
||||||
|
# trackDescriptor: TrackDescriptor
|
||||||
|
# for trackDescriptor in addedTracks.values():
|
||||||
|
#
|
||||||
|
# dispositionSet = trackDescriptor.getDispositionSet()
|
||||||
|
#
|
||||||
|
# if dispositionSet:
|
||||||
|
# dispositionTokens += [f"-disposition:{trackDescriptor.getType().indicator()}:{trackDescriptor.getSubIndex()}",
|
||||||
|
# '+'.join([d.label() for d in dispositionSet])]
|
||||||
|
#
|
||||||
|
# if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
# changedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
|
||||||
|
# trackDiffObj: dict
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# for trackIndex, trackDiffObj in changedTracks.items():
|
||||||
|
#
|
||||||
|
# if MediaDescriptorChangeSet.DISPOSITION_SET_KEY in trackDiffObj.keys():
|
||||||
|
#
|
||||||
|
# dispositionDiffObj: dict = trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY]
|
||||||
|
#
|
||||||
|
# addedDispositions = dispositionDiffObj[DIFF_ADDED_KEY] if DIFF_ADDED_KEY in dispositionDiffObj.keys() else set()
|
||||||
|
# removedDispositions = dispositionDiffObj[DIFF_REMOVED_KEY] if DIFF_REMOVED_KEY in dispositionDiffObj.keys() else set()
|
||||||
|
# unchangedDispositions = dispositionDiffObj[DIFF_UNCHANGED_KEY] if DIFF_UNCHANGED_KEY in dispositionDiffObj.keys() else set()
|
||||||
|
#
|
||||||
|
# targetDispositions = addedDispositions | unchangedDispositions
|
||||||
|
#
|
||||||
|
# trackDescriptor = self.__targetTrackDescriptors[trackIndex]
|
||||||
|
# streamIndicator = trackDescriptor.getType().indicator()
|
||||||
|
# subIndex = trackDescriptor.getSubIndex()
|
||||||
|
#
|
||||||
|
# if targetDispositions:
|
||||||
|
# dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in targetDispositions])]
|
||||||
|
# # if not targetDispositions and removedDispositions:
|
||||||
|
# else:
|
||||||
|
# dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
|
||||||
|
for ttd in self.__targetTrackDescriptors:
|
||||||
|
|
||||||
|
targetDispositions = ttd.getDispositionSet()
|
||||||
|
streamIndicator = ttd.getType().indicator()
|
||||||
|
subIndex = ttd.getSubIndex()
|
||||||
|
|
||||||
|
if targetDispositions:
|
||||||
|
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in targetDispositions])]
|
||||||
|
# if not targetDispositions and removedDispositions:
|
||||||
|
else:
|
||||||
|
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
|
||||||
|
|
||||||
|
return dispositionTokens
|
||||||
|
|
||||||
|
|
||||||
|
def generateMetadataTokens(self):
|
||||||
|
|
||||||
|
metadataTokens = []
|
||||||
|
|
||||||
|
if MediaDescriptorChangeSet.TAGS_KEY in self.__changeSetObj.keys():
|
||||||
|
|
||||||
|
addedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY]
|
||||||
|
if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||||
|
removedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY]
|
||||||
|
if DIFF_REMOVED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||||
|
changedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY]
|
||||||
|
if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||||
|
|
||||||
|
outputMediaTags = addedMediaTags | changedMediaTags
|
||||||
|
|
||||||
|
if (not 'no_signature' in self.__context.keys()
|
||||||
|
or not self.__context['no_signature']):
|
||||||
|
outputMediaTags = outputMediaTags | self.__signatureTags
|
||||||
|
|
||||||
|
# outputMediaTags = {k:v for k,v in outputMediaTags.items() if k not in self.__removeGlobalKeys}
|
||||||
|
|
||||||
|
for tagKey, tagValue in outputMediaTags.items():
|
||||||
|
metadataTokens += [f"-metadata:g",
|
||||||
|
f"{tagKey}={tagValue}"]
|
||||||
|
|
||||||
|
for tagKey, tagValue in changedMediaTags.items():
|
||||||
|
metadataTokens += [f"-metadata:g",
|
||||||
|
f"{tagKey}={tagValue}"]
|
||||||
|
|
||||||
|
for removeKey in removedMediaTags.keys():
|
||||||
|
metadataTokens += [f"-metadata:g",
|
||||||
|
f"{removeKey}="]
|
||||||
|
|
||||||
|
|
||||||
|
if MediaDescriptorChangeSet.TRACKS_KEY in self.__changeSetObj.keys():
|
||||||
|
|
||||||
|
if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
addedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY]
|
||||||
|
trackDescriptor: TrackDescriptor
|
||||||
|
for trackDescriptor in addedTracks.values():
|
||||||
|
for tagKey, tagValue in trackDescriptor.getTags().items():
|
||||||
|
if not tagKey in self.__removeTrackKeys:
|
||||||
|
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
|
||||||
|
+ f":{trackDescriptor.getSubIndex()}",
|
||||||
|
f"{tagKey}={tagValue}"]
|
||||||
|
|
||||||
|
if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
changedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
|
||||||
|
trackDiffObj: dict
|
||||||
|
for trackIndex, trackDiffObj in changedTracks.items():
|
||||||
|
|
||||||
|
if MediaDescriptorChangeSet.TAGS_KEY in trackDiffObj.keys():
|
||||||
|
|
||||||
|
tagsDiffObj = trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY]
|
||||||
|
|
||||||
|
addedTrackTags = tagsDiffObj[DIFF_ADDED_KEY] if DIFF_ADDED_KEY in tagsDiffObj.keys() else {}
|
||||||
|
changedTrackTags = tagsDiffObj[DIFF_CHANGED_KEY] if DIFF_CHANGED_KEY in tagsDiffObj.keys() else {}
|
||||||
|
unchangedTrackTags = tagsDiffObj[DIFF_UNCHANGED_KEY] if DIFF_UNCHANGED_KEY in tagsDiffObj.keys() else {}
|
||||||
|
removedTrackTags = tagsDiffObj[DIFF_REMOVED_KEY] if DIFF_REMOVED_KEY in tagsDiffObj.keys() else {}
|
||||||
|
|
||||||
|
outputTrackTags = addedTrackTags | changedTrackTags
|
||||||
|
|
||||||
|
trackDescriptor = self.__targetTrackDescriptors[trackIndex]
|
||||||
|
|
||||||
|
for tagKey, tagValue in outputTrackTags.items():
|
||||||
|
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
|
||||||
|
+ f":{trackDescriptor.getSubIndex()}",
|
||||||
|
f"{tagKey}={tagValue}"]
|
||||||
|
|
||||||
|
for removeKey in removedTrackTags.keys():
|
||||||
|
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
|
||||||
|
+ f":{trackDescriptor.getSubIndex()}",
|
||||||
|
f"{removeKey}="]
|
||||||
|
|
||||||
|
#HINT: In case of loading a track from an external file
|
||||||
|
# no tags from source are present for the track so
|
||||||
|
# the unchanged tracks are passed to the output file as well
|
||||||
|
if trackDescriptor.getExternalSourceFilePath():
|
||||||
|
for tagKey, tagValue in unchangedTrackTags.items():
|
||||||
|
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
|
||||||
|
+ f":{trackDescriptor.getSubIndex()}",
|
||||||
|
f"{tagKey}={tagValue}"]
|
||||||
|
|
||||||
|
return metadataTokens
|
||||||
|
|
||||||
|
|
||||||
|
def getChangeSetObj(self):
|
||||||
|
return self.__changeSetObj
|
||||||
@@ -27,7 +27,9 @@ from textual.widgets._data_table import CellDoesNotExist
|
|||||||
from ffx.media_descriptor import MediaDescriptor
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
from ffx.file_properties import FileProperties
|
from ffx.file_properties import FileProperties
|
||||||
|
|
||||||
from ffx.helper import DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY
|
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
|
||||||
|
|
||||||
|
from ffx.helper import formatRichColor, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY, DIFF_UNCHANGED_KEY
|
||||||
|
|
||||||
|
|
||||||
# Screen[dict[int, str, int]]
|
# Screen[dict[int, str, int]]
|
||||||
@@ -38,7 +40,7 @@ class MediaDetailsScreen(Screen):
|
|||||||
Grid {
|
Grid {
|
||||||
grid-size: 5 8;
|
grid-size: 5 8;
|
||||||
grid-rows: 8 2 2 2 2 8 2 2 8;
|
grid-rows: 8 2 2 2 2 8 2 2 8;
|
||||||
grid-columns: 25 25 120 10 75;
|
grid-columns: 15 25 90 10 105;
|
||||||
height: 100%;
|
height: 100%;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
padding: 1;
|
padding: 1;
|
||||||
@@ -110,6 +112,19 @@ class MediaDetailsScreen(Screen):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
TRACKS_TABLE_INDEX_COLUMN_LABEL = "Index"
|
||||||
|
TRACKS_TABLE_TYPE_COLUMN_LABEL = "Type"
|
||||||
|
TRACKS_TABLE_SUB_INDEX_COLUMN_LABEL = "SubIndex"
|
||||||
|
TRACKS_TABLE_CODEC_COLUMN_LABEL = "Codec"
|
||||||
|
TRACKS_TABLE_LAYOUT_COLUMN_LABEL = "Layout"
|
||||||
|
TRACKS_TABLE_LANGUAGE_COLUMN_LABEL = "Language"
|
||||||
|
TRACKS_TABLE_TITLE_COLUMN_LABEL = "Title"
|
||||||
|
TRACKS_TABLE_DEFAULT_COLUMN_LABEL = "Default"
|
||||||
|
TRACKS_TABLE_FORCED_COLUMN_LABEL = "Forced"
|
||||||
|
|
||||||
|
DIFFERENCES_TABLE_DIFFERENCES_COLUMN_LABEL = 'Differences (file->db/output)'
|
||||||
|
|
||||||
|
|
||||||
BINDINGS = [
|
BINDINGS = [
|
||||||
("n", "new_pattern", "New Pattern"),
|
("n", "new_pattern", "New Pattern"),
|
||||||
("u", "update_pattern", "Update Pattern"),
|
("u", "update_pattern", "Update Pattern"),
|
||||||
@@ -123,6 +138,22 @@ class MediaDetailsScreen(Screen):
|
|||||||
self.context = self.app.getContext()
|
self.context = self.app.getContext()
|
||||||
self.Session = self.context['database']['session'] # convenience
|
self.Session = self.context['database']['session'] # convenience
|
||||||
|
|
||||||
|
|
||||||
|
self.__configurationData = self.context['config'].getData()
|
||||||
|
|
||||||
|
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
|
||||||
|
|
||||||
|
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
|
||||||
|
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
|
||||||
|
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
|
||||||
|
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'remove' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'ignore' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
|
||||||
|
|
||||||
self.__pc = PatternController(context = self.context)
|
self.__pc = PatternController(context = self.context)
|
||||||
self.__sc = ShowController(context = self.context)
|
self.__sc = ShowController(context = self.context)
|
||||||
self.__tc = TrackController(context = self.context)
|
self.__tc = TrackController(context = self.context)
|
||||||
@@ -180,7 +211,7 @@ class MediaDetailsScreen(Screen):
|
|||||||
def loadProperties(self):
|
def loadProperties(self):
|
||||||
|
|
||||||
self.__mediaFileProperties = FileProperties(self.context, self.__mediaFilename)
|
self.__mediaFileProperties = FileProperties(self.context, self.__mediaFilename)
|
||||||
self.__currentMediaDescriptor = self.__mediaFileProperties.getMediaDescriptor()
|
self.__sourceMediaDescriptor = self.__mediaFileProperties.getMediaDescriptor()
|
||||||
|
|
||||||
#HINT: This is None if the filename did not match anything in database
|
#HINT: This is None if the filename did not match anything in database
|
||||||
self.__currentPattern = self.__mediaFileProperties.getPattern()
|
self.__currentPattern = self.__mediaFileProperties.getPattern()
|
||||||
@@ -191,9 +222,13 @@ class MediaDetailsScreen(Screen):
|
|||||||
# Enumerating differences between media descriptors
|
# Enumerating differences between media descriptors
|
||||||
# from file (=current) vs from stored in database (=target)
|
# from file (=current) vs from stored in database (=target)
|
||||||
try:
|
try:
|
||||||
self.__mediaDifferences = self.__targetMediaDescriptor.compare(self.__currentMediaDescriptor) if self.__currentPattern is not None else {}
|
mdcs = MediaDescriptorChangeSet(self.context,
|
||||||
|
self.__targetMediaDescriptor,
|
||||||
|
self.__sourceMediaDescriptor)
|
||||||
|
|
||||||
|
self.__mediaChangeSetObj = mdcs.getChangeSetObj()
|
||||||
except ValueError:
|
except ValueError:
|
||||||
self.__mediaDifferences = {}
|
self.__mediaChangeSetObj = {}
|
||||||
|
|
||||||
|
|
||||||
def updateDifferences(self):
|
def updateDifferences(self):
|
||||||
@@ -202,74 +237,88 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
self.differencesTable.clear()
|
self.differencesTable.clear()
|
||||||
|
|
||||||
if MediaDescriptor.TAGS_KEY in self.__mediaDifferences.keys():
|
|
||||||
|
|
||||||
currentTags = self.__currentMediaDescriptor.getTags()
|
if MediaDescriptorChangeSet.TAGS_KEY in self.__mediaChangeSetObj.keys():
|
||||||
targetTags = self.__targetMediaDescriptor.getTags()
|
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||||
for addedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
|
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY].items():
|
||||||
row = (f"added media tag: key='{addedTagKey}' value='{targetTags[addedTagKey]}'",)
|
if tagKey not in self.__ignoreGlobalKeys:
|
||||||
|
row = (f"add media tag: key='{tagKey}' value='{tagValue}'",)
|
||||||
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
|
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||||
|
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY].items():
|
||||||
|
if tagKey not in self.__ignoreGlobalKeys and tagKey not in self.__removeGlobalKeys:
|
||||||
|
row = (f"remove media tag: key='{tagKey}' value='{tagValue}'",)
|
||||||
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
|
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||||
|
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY].items():
|
||||||
|
if tagKey not in self.__ignoreGlobalKeys:
|
||||||
|
row = (f"change media tag: key='{tagKey}' value='{tagValue}'",)
|
||||||
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
|
|
||||||
|
if MediaDescriptorChangeSet.TRACKS_KEY in self.__mediaChangeSetObj.keys():
|
||||||
|
|
||||||
|
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
|
||||||
|
trackDescriptor: TrackDescriptor
|
||||||
|
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY].items():
|
||||||
|
row = (f"add {trackDescriptor.getType().label()} track: index={trackDescriptor.getIndex()} lang={trackDescriptor.getLanguage().threeLetter()}",)
|
||||||
self.differencesTable.add_row(*map(str, row))
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
for removedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
|
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_REMOVED_KEY].items():
|
||||||
row = (f"removed media tag: key='{removedTagKey}' value='{currentTags[removedTagKey]}'",)
|
row = (f"remove stream #{trackIndex}",)
|
||||||
self.differencesTable.add_row(*map(str, row))
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
for changedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_CHANGED_KEY]:
|
|
||||||
row = (f"changed media tag: key='{changedTagKey}' value='{currentTags[changedTagKey]}'->'{targetTags[changedTagKey]}'",)
|
|
||||||
self.differencesTable.add_row(*map(str, row))
|
|
||||||
|
|
||||||
if MediaDescriptor.TRACKS_KEY in self.__mediaDifferences.keys():
|
changedTracks: dict = self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
|
||||||
|
|
||||||
currentTracks = self.__currentMediaDescriptor.getAllTrackDescriptors() # 0,1,2,3
|
targetTrackDescriptors = self.__targetMediaDescriptor.getTrackDescriptors()
|
||||||
targetTracks = self.__targetMediaDescriptor.getAllTrackDescriptors() # 0 <- from DB
|
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
trackDiffObj: dict
|
||||||
|
for trackIndex, trackDiffObj in changedTracks.items():
|
||||||
|
|
||||||
#raise click.ClickException(f"add track {self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]}")
|
ttd: TrackDescriptor = targetTrackDescriptors[trackIndex]
|
||||||
for addedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]:
|
|
||||||
addedTrack : Track = targetTracks[addedTrackIndex]
|
|
||||||
row = (f"added {addedTrack.getType().label()} track: index={addedTrackIndex} lang={addedTrack.getLanguage().threeLetter()}",)
|
|
||||||
self.differencesTable.add_row(*map(str, row))
|
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
|
||||||
for removedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_REMOVED_KEY]:
|
|
||||||
row = (f"removed track: index={removedTrackIndex}",)
|
|
||||||
self.differencesTable.add_row(*map(str, row))
|
|
||||||
|
|
||||||
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
if MediaDescriptorChangeSet.TAGS_KEY in trackDiffObj.keys():
|
||||||
for changedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY].keys():
|
|
||||||
|
|
||||||
changedTrack : Track = targetTracks[changedTrackIndex]
|
removedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY]
|
||||||
changedTrackDiff : dict = self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY][changedTrackIndex]
|
if DIFF_REMOVED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||||
|
for tagKey, tagValue in removedTags.items():
|
||||||
|
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) remove key={tagKey} value={tagValue}",)
|
||||||
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if MediaDescriptor.TAGS_KEY in changedTrackDiff.keys():
|
addedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY]
|
||||||
|
if DIFF_ADDED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||||
|
for tagKey, tagValue in addedTags.items():
|
||||||
|
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) add key={tagKey} value={tagValue}",)
|
||||||
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in changedTrackDiff[MediaDescriptor.TAGS_KEY]:
|
changedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY]
|
||||||
for addedTagKey in changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
|
if DIFF_CHANGED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||||
addedTagValue = changedTrack.getTags()[addedTagKey]
|
for tagKey, tagValue in changedTags.items():
|
||||||
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} added key={addedTagKey} value={addedTagValue}",)
|
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) change key={tagKey} value={tagValue}",)
|
||||||
self.differencesTable.add_row(*map(str, row))
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in changedTrackDiff[MediaDescriptor.TAGS_KEY]:
|
|
||||||
for removedTagKey in changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
|
|
||||||
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} removed key={removedTagKey}",)
|
|
||||||
self.differencesTable.add_row(*map(str, row))
|
|
||||||
|
|
||||||
if TrackDescriptor.DISPOSITION_SET_KEY in changedTrackDiff.keys():
|
if MediaDescriptorChangeSet.DISPOSITION_SET_KEY in trackDiffObj.keys():
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]:
|
addedDispositions = (trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY][DIFF_ADDED_KEY]
|
||||||
for addedDisposition in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY][DIFF_ADDED_KEY]:
|
if DIFF_ADDED_KEY in trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY].keys() else set())
|
||||||
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} added disposition={addedDisposition.label()}",)
|
for ad in addedDispositions:
|
||||||
self.differencesTable.add_row(*map(str, row))
|
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) add disposition={ad.label()}",)
|
||||||
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]:
|
removedDispositions = (trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY][DIFF_REMOVED_KEY]
|
||||||
for removedDisposition in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY][DIFF_REMOVED_KEY]:
|
if DIFF_REMOVED_KEY in trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY].keys() else set())
|
||||||
row = (f"changed {changedTrack.getType().label()} track index={changedTrackIndex} removed disposition={removedDisposition.label()}",)
|
for rd in removedDispositions:
|
||||||
self.differencesTable.add_row(*map(str, row))
|
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) remove disposition={rd.label()}",)
|
||||||
|
self.differencesTable.add_row(*map(str, row))
|
||||||
|
|
||||||
|
|
||||||
def on_mount(self):
|
def on_mount(self):
|
||||||
@@ -282,8 +331,15 @@ class MediaDetailsScreen(Screen):
|
|||||||
row = (int(show.id), show.name, show.year) # Convert each element to a string before adding
|
row = (int(show.id), show.name, show.year) # Convert each element to a string before adding
|
||||||
self.showsTable.add_row(*map(str, row))
|
self.showsTable.add_row(*map(str, row))
|
||||||
|
|
||||||
for mediaTagKey, mediaTagValue in self.__currentMediaDescriptor.getTags().items():
|
for mediaTagKey, mediaTagValue in self.__sourceMediaDescriptor.getTags().items():
|
||||||
row = (mediaTagKey, mediaTagValue) # Convert each element to a string before adding
|
|
||||||
|
textColor = None
|
||||||
|
if mediaTagKey in self.__ignoreGlobalKeys:
|
||||||
|
textColor = 'blue'
|
||||||
|
if mediaTagKey in self.__removeGlobalKeys:
|
||||||
|
textColor = 'red'
|
||||||
|
|
||||||
|
row = (formatRichColor(mediaTagKey, textColor), formatRichColor(mediaTagValue, textColor)) # Convert each element to a string before adding
|
||||||
self.mediaTagsTable.add_row(*map(str, row))
|
self.mediaTagsTable.add_row(*map(str, row))
|
||||||
|
|
||||||
self.updateTracks()
|
self.updateTracks()
|
||||||
@@ -317,7 +373,8 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
self.tracksTable.clear()
|
self.tracksTable.clear()
|
||||||
|
|
||||||
trackDescriptorList = self.__currentMediaDescriptor.getAllTrackDescriptors()
|
# trackDescriptorList = self.__sourceMediaDescriptor.getAllTrackDescriptors()
|
||||||
|
trackDescriptorList = self.__sourceMediaDescriptor.getTrackDescriptors()
|
||||||
|
|
||||||
typeCounter = {}
|
typeCounter = {}
|
||||||
|
|
||||||
@@ -352,7 +409,7 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
# Define the columns with headers
|
# Define the columns with headers
|
||||||
self.column_key_show_id = self.showsTable.add_column("ID", width=10)
|
self.column_key_show_id = self.showsTable.add_column("ID", width=10)
|
||||||
self.column_key_show_name = self.showsTable.add_column("Name", width=50)
|
self.column_key_show_name = self.showsTable.add_column("Name", width=80)
|
||||||
self.column_key_show_year = self.showsTable.add_column("Year", width=10)
|
self.column_key_show_year = self.showsTable.add_column("Year", width=10)
|
||||||
|
|
||||||
self.showsTable.cursor_type = 'row'
|
self.showsTable.cursor_type = 'row'
|
||||||
@@ -361,8 +418,8 @@ class MediaDetailsScreen(Screen):
|
|||||||
self.mediaTagsTable = DataTable(classes="two")
|
self.mediaTagsTable = DataTable(classes="two")
|
||||||
|
|
||||||
# Define the columns with headers
|
# Define the columns with headers
|
||||||
self.column_key_track_tag_key = self.mediaTagsTable.add_column("Key", width=50)
|
self.column_key_track_tag_key = self.mediaTagsTable.add_column("Key", width=30)
|
||||||
self.column_key_track_tag_value = self.mediaTagsTable.add_column("Value", width=100)
|
self.column_key_track_tag_value = self.mediaTagsTable.add_column("Value", width=70)
|
||||||
|
|
||||||
self.mediaTagsTable.cursor_type = 'row'
|
self.mediaTagsTable.cursor_type = 'row'
|
||||||
|
|
||||||
@@ -370,15 +427,15 @@ class MediaDetailsScreen(Screen):
|
|||||||
self.tracksTable = DataTable(classes="two")
|
self.tracksTable = DataTable(classes="two")
|
||||||
|
|
||||||
# Define the columns with headers
|
# Define the columns with headers
|
||||||
self.column_key_track_index = self.tracksTable.add_column("Index", width=5)
|
self.column_key_track_index = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_INDEX_COLUMN_LABEL, width=5)
|
||||||
self.column_key_track_type = self.tracksTable.add_column("Type", width=10)
|
self.column_key_track_type = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_TYPE_COLUMN_LABEL, width=10)
|
||||||
self.column_key_track_sub_index = self.tracksTable.add_column("SubIndex", width=8)
|
self.column_key_track_sub_index = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_SUB_INDEX_COLUMN_LABEL, width=8)
|
||||||
self.column_key_track_codec = self.tracksTable.add_column("Codec", width=10)
|
self.column_key_track_codec = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_CODEC_COLUMN_LABEL, width=10)
|
||||||
self.column_key_track_layout = self.tracksTable.add_column("Layout", width=10)
|
self.column_key_track_layout = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_LAYOUT_COLUMN_LABEL, width=10)
|
||||||
self.column_key_track_language = self.tracksTable.add_column("Language", width=15)
|
self.column_key_track_language = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_LANGUAGE_COLUMN_LABEL, width=15)
|
||||||
self.column_key_track_title = self.tracksTable.add_column("Title", width=48)
|
self.column_key_track_title = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_TITLE_COLUMN_LABEL, width=48)
|
||||||
self.column_key_track_default = self.tracksTable.add_column("Default", width=8)
|
self.column_key_track_default = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_DEFAULT_COLUMN_LABEL, width=8)
|
||||||
self.column_key_track_forced = self.tracksTable.add_column("Forced", width=8)
|
self.column_key_track_forced = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_FORCED_COLUMN_LABEL, width=8)
|
||||||
|
|
||||||
self.tracksTable.cursor_type = 'row'
|
self.tracksTable.cursor_type = 'row'
|
||||||
|
|
||||||
@@ -387,7 +444,7 @@ class MediaDetailsScreen(Screen):
|
|||||||
self.differencesTable = DataTable(id='differences-table') # classes="triple"
|
self.differencesTable = DataTable(id='differences-table') # classes="triple"
|
||||||
|
|
||||||
# Define the columns with headers
|
# Define the columns with headers
|
||||||
self.column_key_differences = self.differencesTable.add_column("Differences (file->db)", width=70)
|
self.column_key_differences = self.differencesTable.add_column(MediaDetailsScreen.DIFFERENCES_TABLE_DIFFERENCES_COLUMN_LABEL, width=100)
|
||||||
|
|
||||||
self.differencesTable.cursor_type = 'row'
|
self.differencesTable.cursor_type = 'row'
|
||||||
|
|
||||||
@@ -439,15 +496,15 @@ class MediaDetailsScreen(Screen):
|
|||||||
yield Footer()
|
yield Footer()
|
||||||
|
|
||||||
|
|
||||||
def getPatternDescriptorFromInput(self):
|
def getPatternObjFromInput(self):
|
||||||
"""Returns show id and pattern from corresponding inputs"""
|
"""Returns show id and pattern as obj from corresponding inputs"""
|
||||||
patternDescriptor = {}
|
patternObj = {}
|
||||||
try:
|
try:
|
||||||
patternDescriptor['show_id'] = self.getSelectedShowDescriptor().getId()
|
patternObj['show_id'] = self.getSelectedShowDescriptor().getId()
|
||||||
patternDescriptor['pattern'] = str(self.query_one("#pattern_input", Input).value)
|
patternObj['pattern'] = str(self.query_one("#pattern_input", Input).value)
|
||||||
except:
|
except:
|
||||||
pass
|
return {}
|
||||||
return patternDescriptor
|
return patternObj
|
||||||
|
|
||||||
|
|
||||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||||
@@ -464,12 +521,12 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
if event.button.id == "select_default_button":
|
if event.button.id == "select_default_button":
|
||||||
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
||||||
self.__currentMediaDescriptor.setDefaultSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
|
self.__sourceMediaDescriptor.setDefaultSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
|
||||||
self.updateTracks()
|
self.updateTracks()
|
||||||
|
|
||||||
if event.button.id == "select_forced_button":
|
if event.button.id == "select_forced_button":
|
||||||
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
||||||
self.__currentMediaDescriptor.setForcedSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
|
self.__sourceMediaDescriptor.setForcedSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
|
||||||
self.updateTracks()
|
self.updateTracks()
|
||||||
|
|
||||||
|
|
||||||
@@ -526,10 +583,13 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
|
|
||||||
def handle_new_pattern(self, showDescriptor: ShowDescriptor):
|
def handle_new_pattern(self, showDescriptor: ShowDescriptor):
|
||||||
|
""""""
|
||||||
|
|
||||||
if type(showDescriptor) is not ShowDescriptor:
|
if type(showDescriptor) is not ShowDescriptor:
|
||||||
raise TypeError("MediaDetailsScreen.handle_new_pattern(): Argument 'showDescriptor' has to be of type ShowDescriptor")
|
raise TypeError("MediaDetailsScreen.handle_new_pattern(): Argument 'showDescriptor' has to be of type ShowDescriptor")
|
||||||
|
|
||||||
|
self.removeShow()
|
||||||
|
|
||||||
showRowIndex = self.getRowIndexFromShowId(showDescriptor.getId())
|
showRowIndex = self.getRowIndexFromShowId(showDescriptor.getId())
|
||||||
if showRowIndex is None:
|
if showRowIndex is None:
|
||||||
show = (showDescriptor.getId(), showDescriptor.getName(), showDescriptor.getYear())
|
show = (showDescriptor.getId(), showDescriptor.getName(), showDescriptor.getYear())
|
||||||
@@ -539,29 +599,28 @@ class MediaDetailsScreen(Screen):
|
|||||||
if showRowIndex is not None:
|
if showRowIndex is not None:
|
||||||
self.showsTable.move_cursor(row=showRowIndex)
|
self.showsTable.move_cursor(row=showRowIndex)
|
||||||
|
|
||||||
self.removeShow()
|
patternObj = self.getPatternObjFromInput()
|
||||||
|
|
||||||
patternDescriptor = self.getPatternDescriptorFromInput()
|
if patternObj:
|
||||||
|
patternId = self.__pc.addPattern(patternObj)
|
||||||
if patternDescriptor:
|
|
||||||
patternId = self.__pc.addPattern(patternDescriptor)
|
|
||||||
if patternId:
|
if patternId:
|
||||||
self.highlightPattern(False)
|
self.highlightPattern(False)
|
||||||
|
|
||||||
for tagKey, tagValue in self.__currentMediaDescriptor.getTags().items():
|
for tagKey, tagValue in self.__sourceMediaDescriptor.getTags().items():
|
||||||
self.__tac.updateMediaTag(patternId, tagKey, tagValue)
|
|
||||||
|
|
||||||
for trackDescriptor in self.__currentMediaDescriptor.getAllTrackDescriptors():
|
# Filter tags that make no sense to preserve
|
||||||
|
if tagKey not in self.__ignoreGlobalKeys and not tagKey in self.__removeGlobalKeys:
|
||||||
|
self.__tac.updateMediaTag(patternId, tagKey, tagValue)
|
||||||
|
|
||||||
|
# for trackDescriptor in self.__sourceMediaDescriptor.getAllTrackDescriptors():
|
||||||
|
for trackDescriptor in self.__sourceMediaDescriptor.getTrackDescriptors():
|
||||||
self.__tc.addTrack(trackDescriptor, patternId = patternId)
|
self.__tc.addTrack(trackDescriptor, patternId = patternId)
|
||||||
|
|
||||||
|
|
||||||
def action_new_pattern(self):
|
def action_new_pattern(self):
|
||||||
|
"""Adding new patterns
|
||||||
|
|
||||||
#TODO #427: Fehlermeldung in TUI
|
If the corresponding show does not exists in DB it is added beforehand"""
|
||||||
# try:
|
|
||||||
# self.__currentMediaDescriptor.checkConfiguration()
|
|
||||||
# except ValueError:
|
|
||||||
# return
|
|
||||||
|
|
||||||
selectedShowDescriptor = self.getSelectedShowDescriptor()
|
selectedShowDescriptor = self.getSelectedShowDescriptor()
|
||||||
|
|
||||||
@@ -574,90 +633,104 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
|
|
||||||
def action_update_pattern(self):
|
def action_update_pattern(self):
|
||||||
"""When updating the database the actions must reverse the difference (eq to diff db->file)"""
|
"""Updating patterns
|
||||||
|
|
||||||
|
When updating the database the actions must reverse the difference (eq to diff db->file)"""
|
||||||
|
|
||||||
if self.__currentPattern is not None:
|
if self.__currentPattern is not None:
|
||||||
patternDescriptor = self.getPatternDescriptorFromInput()
|
patternObj = self.getPatternObjFromInput()
|
||||||
if (patternDescriptor
|
if (patternObj
|
||||||
and self.__currentPattern.getPattern() != patternDescriptor['pattern']):
|
and self.__currentPattern.getPattern() != patternObj['pattern']):
|
||||||
return self.__pc.updatePattern(self.__currentPattern.getId(), patternDescriptor)
|
return self.__pc.updatePattern(self.__currentPattern.getId(), patternObj)
|
||||||
|
|
||||||
self.loadProperties()
|
self.loadProperties()
|
||||||
|
|
||||||
if MediaDescriptor.TAGS_KEY in self.__mediaDifferences.keys():
|
# __mediaChangeSetObj is file vs database
|
||||||
|
if MediaDescriptorChangeSet.TAGS_KEY in self.__mediaChangeSetObj.keys():
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||||
for addedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
|
for addedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY].keys():
|
||||||
|
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} addedTagKey={addedTagKey}")
|
||||||
self.__tac.deleteMediaTagByKey(self.__currentPattern.getId(), addedTagKey)
|
self.__tac.deleteMediaTagByKey(self.__currentPattern.getId(), addedTagKey)
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||||
for removedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
|
for removedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY].keys():
|
||||||
currentTags = self.__currentMediaDescriptor.getTags()
|
currentTags = self.__sourceMediaDescriptor.getTags()
|
||||||
|
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} removedTagKey={removedTagKey} currentTags={currentTags[removedTagKey]}")
|
||||||
self.__tac.updateMediaTag(self.__currentPattern.getId(), removedTagKey, currentTags[removedTagKey])
|
self.__tac.updateMediaTag(self.__currentPattern.getId(), removedTagKey, currentTags[removedTagKey])
|
||||||
|
|
||||||
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||||
for changedTagKey in self.__mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_CHANGED_KEY]:
|
for changedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY].keys():
|
||||||
currentTags = self.__currentMediaDescriptor.getTags()
|
currentTags = self.__sourceMediaDescriptor.getTags()
|
||||||
|
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} changedTagKey={changedTagKey} currentTags={currentTags[changedTagKey]}")
|
||||||
self.__tac.updateMediaTag(self.__currentPattern.getId(), changedTagKey, currentTags[changedTagKey])
|
self.__tac.updateMediaTag(self.__currentPattern.getId(), changedTagKey, currentTags[changedTagKey])
|
||||||
|
|
||||||
if MediaDescriptor.TRACKS_KEY in self.__mediaDifferences.keys():
|
if MediaDescriptorChangeSet.TRACKS_KEY in self.__mediaChangeSetObj.keys():
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
|
||||||
for addedTrackIndex in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]:
|
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY].items():
|
||||||
targetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == addedTrackIndex]
|
#targetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == addedTrackIndex]
|
||||||
if targetTracks:
|
# if targetTracks:
|
||||||
self.__tc.deleteTrack(targetTracks[0].getId()) # id
|
# self.__tc.deleteTrack(targetTracks[0].getId()) # id
|
||||||
|
# self.__tc.deleteTrack(targetTracks[0].getId())
|
||||||
if DIFF_REMOVED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
self.__tc.addTrack(trackDescriptor, patternId = self.__currentPattern.getId())
|
||||||
for removedTrackIndex, removedTrack in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_REMOVED_KEY].items():
|
|
||||||
|
|
||||||
|
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
trackDescriptor: TrackDescriptor
|
||||||
|
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_REMOVED_KEY].items():
|
||||||
# Track per inspect/update hinzufügen
|
# Track per inspect/update hinzufügen
|
||||||
self.__tc.addTrack(removedTrack, patternId = self.__currentPattern.getId())
|
#self.__tc.addTrack(removedTrack, patternId = self.__currentPattern.getId())
|
||||||
|
self.__tc.deleteTrack(trackDescriptor.getId())
|
||||||
|
|
||||||
if DIFF_CHANGED_KEY in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||||
|
|
||||||
# [vsTracks[tp].getIndex()] = trackDiff
|
# [vsTracks[tp].getIndex()] = trackDiff
|
||||||
for changedTrackIndex, changedTrackDiff in self.__mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY].items():
|
for trackIndex, trackDiff in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY].items():
|
||||||
|
|
||||||
changedTargetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == changedTrackIndex]
|
targetTracks = [t for t in self.__targetMediaDescriptor.getTrackDescriptors() if t.getIndex() == trackIndex]
|
||||||
changedTargeTrackId = changedTargetTracks[0].getId() if changedTargetTracks else None
|
targetTrackId = targetTracks[0].getId() if targetTracks else None
|
||||||
changedTargetTrackIndex = changedTargetTracks[0].getIndex() if changedTargetTracks else None
|
targetTrackIndex = targetTracks[0].getIndex() if targetTracks else None
|
||||||
|
|
||||||
changedCurrentTracks = [t for t in self.__currentMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == changedTrackIndex]
|
changedCurrentTracks = [t for t in self.__sourceMediaDescriptor.getTrackDescriptors() if t.getIndex() == trackIndex]
|
||||||
# changedCurrentTrackId #HINT: Undefined as track descriptors do not come from file with track_id
|
# changedCurrentTrackId #HINT: Undefined as track descriptors do not come from file with track_id
|
||||||
|
|
||||||
if TrackDescriptor.TAGS_KEY in changedTrackDiff.keys():
|
if TrackDescriptor.TAGS_KEY in trackDiff.keys():
|
||||||
changedTrackTagsDiff = changedTrackDiff[TrackDescriptor.TAGS_KEY]
|
tagsDiff = trackDiff[TrackDescriptor.TAGS_KEY]
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in changedTrackTagsDiff.keys():
|
if DIFF_ADDED_KEY in tagsDiff.keys():
|
||||||
for addedTrackTagKey in changedTrackTagsDiff[DIFF_ADDED_KEY]:
|
for tagKey, tagValue in tagsDiff[DIFF_ADDED_KEY].items():
|
||||||
|
|
||||||
if changedTargetTracks:
|
# if targetTracks:
|
||||||
self.__tac.deleteTrackTagByKey(changedTargeTrackId, addedTrackTagKey)
|
# self.__tac.deleteTrackTagByKey(targetTrackId, addedTrackTagKey)
|
||||||
|
self.__tac.updateTrackTag(targetTrackId, tagKey, tagValue)
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in changedTrackTagsDiff.keys():
|
|
||||||
for removedTrackTagKey in changedTrackTagsDiff[DIFF_REMOVED_KEY]:
|
|
||||||
if changedCurrentTracks:
|
|
||||||
self.__tac.updateTrackTag(changedTargeTrackId, removedTrackTagKey, changedCurrentTracks[0].getTags()[removedTrackTagKey])
|
|
||||||
|
|
||||||
if DIFF_CHANGED_KEY in changedTrackTagsDiff.keys():
|
if DIFF_REMOVED_KEY in tagsDiff.keys():
|
||||||
for changedTrackTagKey in changedTrackTagsDiff[DIFF_CHANGED_KEY]:
|
for tagKey, tagValue in tagsDiff[DIFF_REMOVED_KEY].items():
|
||||||
if changedCurrentTracks:
|
# if changedCurrentTracks:
|
||||||
self.__tac.updateTrackTag(changedTargeTrackId, changedTrackTagKey, changedCurrentTracks[0].getTags()[changedTrackTagKey])
|
# self.__tac.updateTrackTag(targetTrackId, removedTrackTagKey, changedCurrentTracks[0].getTags()[removedTrackTagKey])
|
||||||
|
self.__tac.deleteTrackTagByKey(targetTrackId, tagKey)
|
||||||
|
|
||||||
if TrackDescriptor.DISPOSITION_SET_KEY in changedTrackDiff.keys():
|
if DIFF_CHANGED_KEY in tagsDiff.keys():
|
||||||
changedTrackDispositionDiff = changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]
|
for tagKey, tagValue in tagsDiff[DIFF_CHANGED_KEY].items():
|
||||||
|
# if changedCurrentTracks:
|
||||||
|
# self.__tac.updateTrackTag(targetTrackId, changedTrackTagKey, changedCurrentTracks[0].getTags()[changedTrackTagKey])
|
||||||
|
self.__tac.updateTrackTag(targetTrackId, tagKey, tagValue)
|
||||||
|
|
||||||
|
|
||||||
|
if TrackDescriptor.DISPOSITION_SET_KEY in trackDiff.keys():
|
||||||
|
changedTrackDispositionDiff = trackDiff[TrackDescriptor.DISPOSITION_SET_KEY]
|
||||||
|
|
||||||
if DIFF_ADDED_KEY in changedTrackDispositionDiff.keys():
|
if DIFF_ADDED_KEY in changedTrackDispositionDiff.keys():
|
||||||
for changedTrackAddedDisposition in changedTrackDispositionDiff[DIFF_ADDED_KEY]:
|
for changedDisposition in changedTrackDispositionDiff[DIFF_ADDED_KEY]:
|
||||||
if changedTargetTrackIndex is not None:
|
if targetTrackIndex is not None:
|
||||||
self.__tc.setDispositionState(self.__currentPattern.getId(), changedTargetTrackIndex, changedTrackAddedDisposition, False)
|
self.__tc.setDispositionState(self.__currentPattern.getId(), targetTrackIndex, changedDisposition, True)
|
||||||
|
|
||||||
if DIFF_REMOVED_KEY in changedTrackDispositionDiff.keys():
|
if DIFF_REMOVED_KEY in changedTrackDispositionDiff.keys():
|
||||||
for changedTrackRemovedDisposition in changedTrackDispositionDiff[DIFF_REMOVED_KEY]:
|
for changedDisposition in changedTrackDispositionDiff[DIFF_REMOVED_KEY]:
|
||||||
if changedTargetTrackIndex is not None:
|
if targetTrackIndex is not None:
|
||||||
self.__tc.setDispositionState(self.__currentPattern.getId(), changedTargetTrackIndex, changedTrackRemovedDisposition, True)
|
self.__tc.setDispositionState(self.__currentPattern.getId(), targetTrackIndex, changedDisposition, False)
|
||||||
|
|
||||||
|
|
||||||
self.updateDifferences()
|
self.updateDifferences()
|
||||||
@@ -666,11 +739,11 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
def action_edit_pattern(self):
|
def action_edit_pattern(self):
|
||||||
|
|
||||||
patternDescriptor = self.getPatternDescriptorFromInput()
|
patternObj = self.getPatternObjFromInput()
|
||||||
|
|
||||||
if patternDescriptor['pattern']:
|
if patternObj['pattern']:
|
||||||
|
|
||||||
selectedPatternId = self.__pc.findPattern(patternDescriptor)
|
selectedPatternId = self.__pc.findPattern(patternObj)
|
||||||
|
|
||||||
if selectedPatternId is None:
|
if selectedPatternId is None:
|
||||||
raise click.ClickException(f"MediaDetailsScreen.action_edit_pattern(): Pattern to edit has no id")
|
raise click.ClickException(f"MediaDetailsScreen.action_edit_pattern(): Pattern to edit has no id")
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import click
|
import click
|
||||||
|
|
||||||
from sqlalchemy import Column, Integer, String, ForeignKey
|
from sqlalchemy import Column, Integer, String, Text, ForeignKey
|
||||||
from sqlalchemy.orm import relationship
|
from sqlalchemy.orm import relationship
|
||||||
|
|
||||||
from .show import Base, Show
|
from .show import Base, Show
|
||||||
@@ -31,9 +31,13 @@ class Pattern(Base):
|
|||||||
|
|
||||||
tracks = relationship('Track', back_populates='pattern', cascade="all, delete", lazy='joined')
|
tracks = relationship('Track', back_populates='pattern', cascade="all, delete", lazy='joined')
|
||||||
|
|
||||||
|
|
||||||
media_tags = relationship('MediaTag', back_populates='pattern', cascade="all, delete", lazy='joined')
|
media_tags = relationship('MediaTag', back_populates='pattern', cascade="all, delete", lazy='joined')
|
||||||
|
|
||||||
|
quality = Column(Integer, default=0)
|
||||||
|
|
||||||
|
notes = Column(Text, default='')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def getId(self):
|
def getId(self):
|
||||||
return int(self.id)
|
return int(self.id)
|
||||||
|
|||||||
@@ -11,17 +11,20 @@ class PatternController():
|
|||||||
self.Session = self.context['database']['session'] # convenience
|
self.Session = self.context['database']['session'] # convenience
|
||||||
|
|
||||||
|
|
||||||
def addPattern(self, patternDescriptor):
|
def addPattern(self, patternObj):
|
||||||
|
"""Adds pattern to database from obj
|
||||||
|
|
||||||
|
Returns database id or 0 if pattern already exists"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']),
|
q = s.query(Pattern).filter(Pattern.show_id == int(patternObj['show_id']),
|
||||||
Pattern.pattern == str(patternDescriptor['pattern']))
|
Pattern.pattern == str(patternObj['pattern']))
|
||||||
|
|
||||||
if not q.count():
|
if not q.count():
|
||||||
pattern = Pattern(show_id = int(patternDescriptor['show_id']),
|
pattern = Pattern(show_id = int(patternObj['show_id']),
|
||||||
pattern = str(patternDescriptor['pattern']))
|
pattern = str(patternObj['pattern']))
|
||||||
s.add(pattern)
|
s.add(pattern)
|
||||||
s.commit()
|
s.commit()
|
||||||
return pattern.getId()
|
return pattern.getId()
|
||||||
@@ -34,7 +37,7 @@ class PatternController():
|
|||||||
s.close()
|
s.close()
|
||||||
|
|
||||||
|
|
||||||
def updatePattern(self, patternId, patternDescriptor):
|
def updatePattern(self, patternId, patternObj):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
@@ -42,10 +45,12 @@ class PatternController():
|
|||||||
|
|
||||||
if q.count():
|
if q.count():
|
||||||
|
|
||||||
pattern = q.first()
|
pattern: Pattern = q.first()
|
||||||
|
|
||||||
pattern.show_id = int(patternDescriptor['show_id'])
|
pattern.show_id = int(patternObj['show_id'])
|
||||||
pattern.pattern = str(patternDescriptor['pattern'])
|
pattern.pattern = str(patternObj['pattern'])
|
||||||
|
pattern.quality = str(patternObj['quality'])
|
||||||
|
pattern.notes = str(patternObj['notes'])
|
||||||
|
|
||||||
s.commit()
|
s.commit()
|
||||||
return True
|
return True
|
||||||
@@ -60,11 +65,11 @@ class PatternController():
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def findPattern(self, patternDescriptor):
|
def findPattern(self, patternObj):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']), Pattern.pattern == str(patternDescriptor['pattern']))
|
q = s.query(Pattern).filter(Pattern.show_id == int(patternObj['show_id']), Pattern.pattern == str(patternObj['pattern']))
|
||||||
|
|
||||||
if q.count():
|
if q.count():
|
||||||
pattern = q.first()
|
pattern = q.first()
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import click, re
|
|||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from textual.screen import Screen
|
from textual.screen import Screen
|
||||||
from textual.widgets import Header, Footer, Static, Button, Input, DataTable
|
from textual.widgets import Header, Footer, Static, Button, Input, DataTable, TextArea
|
||||||
from textual.containers import Grid
|
from textual.containers import Grid
|
||||||
|
|
||||||
from ffx.model.pattern import Pattern
|
from ffx.model.pattern import Pattern
|
||||||
@@ -30,6 +30,8 @@ from ffx.file_properties import FileProperties
|
|||||||
from ffx.iso_language import IsoLanguage
|
from ffx.iso_language import IsoLanguage
|
||||||
from ffx.audio_layout import AudioLayout
|
from ffx.audio_layout import AudioLayout
|
||||||
|
|
||||||
|
from ffx.helper import formatRichColor, removeRichColor
|
||||||
|
|
||||||
|
|
||||||
# Screen[dict[int, str, int]]
|
# Screen[dict[int, str, int]]
|
||||||
class PatternDetailsScreen(Screen):
|
class PatternDetailsScreen(Screen):
|
||||||
@@ -37,8 +39,8 @@ class PatternDetailsScreen(Screen):
|
|||||||
CSS = """
|
CSS = """
|
||||||
|
|
||||||
Grid {
|
Grid {
|
||||||
grid-size: 7 13;
|
grid-size: 7 17;
|
||||||
grid-rows: 2 2 2 2 2 8 2 2 8 2 2 2 2;
|
grid-rows: 2 2 2 2 2 2 6 2 2 8 2 2 8 2 2 2 2;
|
||||||
grid-columns: 25 25 25 25 25 25 25;
|
grid-columns: 25 25 25 25 25 25 25;
|
||||||
height: 100%;
|
height: 100%;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
@@ -87,6 +89,12 @@ class PatternDetailsScreen(Screen):
|
|||||||
column-span: 7;
|
column-span: 7;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
.four_box {
|
||||||
|
min-height: 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
.box {
|
.box {
|
||||||
height: 100%;
|
height: 100%;
|
||||||
border: solid green;
|
border: solid green;
|
||||||
@@ -103,6 +111,20 @@ class PatternDetailsScreen(Screen):
|
|||||||
self.context = self.app.getContext()
|
self.context = self.app.getContext()
|
||||||
self.Session = self.context['database']['session'] # convenience
|
self.Session = self.context['database']['session'] # convenience
|
||||||
|
|
||||||
|
self.__configurationData = self.context['config'].getData()
|
||||||
|
|
||||||
|
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
|
||||||
|
|
||||||
|
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
|
||||||
|
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
|
||||||
|
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
|
||||||
|
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'remove' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'ignore' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
|
||||||
self.__pc = PatternController(context = self.context)
|
self.__pc = PatternController(context = self.context)
|
||||||
self.__sc = ShowController(context = self.context)
|
self.__sc = ShowController(context = self.context)
|
||||||
self.__tc = TrackController(context = self.context)
|
self.__tc = TrackController(context = self.context)
|
||||||
@@ -147,29 +169,31 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
td : TrackDescriptor = tr.getDescriptor(self.context)
|
td : TrackDescriptor = tr.getDescriptor(self.context)
|
||||||
|
|
||||||
trackType = td.getType()
|
if (trackType := td.getType()) != TrackType.ATTACHMENT:
|
||||||
if not trackType in typeCounter.keys():
|
|
||||||
typeCounter[trackType] = 0
|
|
||||||
|
|
||||||
dispoSet = td.getDispositionSet()
|
if not trackType in typeCounter.keys():
|
||||||
|
typeCounter[trackType] = 0
|
||||||
|
|
||||||
trackLanguage = td.getLanguage()
|
dispoSet = td.getDispositionSet()
|
||||||
audioLayout = td.getAudioLayout()
|
|
||||||
row = (td.getIndex(),
|
|
||||||
trackType.label(),
|
|
||||||
typeCounter[trackType],
|
|
||||||
td.getCodec().label(),
|
|
||||||
audioLayout.label() if trackType == TrackType.AUDIO
|
|
||||||
and audioLayout != AudioLayout.LAYOUT_UNDEFINED else ' ',
|
|
||||||
trackLanguage.label() if trackLanguage != IsoLanguage.UNDEFINED else ' ',
|
|
||||||
td.getTitle(),
|
|
||||||
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
|
|
||||||
'Yes' if TrackDisposition.FORCED in dispoSet else 'No',
|
|
||||||
td.getSourceIndex())
|
|
||||||
|
|
||||||
self.tracksTable.add_row(*map(str, row))
|
trackLanguage = td.getLanguage()
|
||||||
|
audioLayout = td.getAudioLayout()
|
||||||
|
|
||||||
typeCounter[trackType] += 1
|
row = (td.getIndex(),
|
||||||
|
trackType.label(),
|
||||||
|
typeCounter[trackType],
|
||||||
|
td.getCodec().label(),
|
||||||
|
audioLayout.label() if trackType == TrackType.AUDIO
|
||||||
|
and audioLayout != AudioLayout.LAYOUT_UNDEFINED else ' ',
|
||||||
|
trackLanguage.label() if trackLanguage != IsoLanguage.UNDEFINED else ' ',
|
||||||
|
td.getTitle(),
|
||||||
|
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
|
||||||
|
'Yes' if TrackDisposition.FORCED in dispoSet else 'No',
|
||||||
|
td.getSourceIndex())
|
||||||
|
|
||||||
|
self.tracksTable.add_row(*map(str, row))
|
||||||
|
|
||||||
|
typeCounter[trackType] += 1
|
||||||
|
|
||||||
|
|
||||||
def swapTracks(self, trackIndex1: int, trackIndex2: int):
|
def swapTracks(self, trackIndex1: int, trackIndex2: int):
|
||||||
@@ -217,7 +241,15 @@ class PatternDetailsScreen(Screen):
|
|||||||
tags = self.__tac.findAllMediaTags(self.__pattern.getId())
|
tags = self.__tac.findAllMediaTags(self.__pattern.getId())
|
||||||
|
|
||||||
for tagKey, tagValue in tags.items():
|
for tagKey, tagValue in tags.items():
|
||||||
row = (tagKey, tagValue)
|
|
||||||
|
textColor = None
|
||||||
|
if tagKey in self.__ignoreGlobalKeys:
|
||||||
|
textColor = 'blue'
|
||||||
|
if tagKey in self.__removeGlobalKeys:
|
||||||
|
textColor = 'red'
|
||||||
|
|
||||||
|
# if tagKey not in self.__ignoreTrackKeys:
|
||||||
|
row = (formatRichColor(tagKey, textColor), formatRichColor(tagValue, textColor))
|
||||||
self.tagsTable.add_row(*map(str, row))
|
self.tagsTable.add_row(*map(str, row))
|
||||||
|
|
||||||
|
|
||||||
@@ -230,6 +262,12 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
self.query_one("#pattern_input", Input).value = str(self.__pattern.getPattern())
|
self.query_one("#pattern_input", Input).value = str(self.__pattern.getPattern())
|
||||||
|
|
||||||
|
if self.__pattern and self.__pattern.quality:
|
||||||
|
self.query_one("#quality_input", Input).value = str(self.__pattern.quality)
|
||||||
|
|
||||||
|
if self.__pattern and self.__pattern.notes:
|
||||||
|
self.query_one("#notes_textarea", TextArea).text = str(self.__pattern.notes)
|
||||||
|
|
||||||
self.updateTags()
|
self.updateTags()
|
||||||
self.updateTracks()
|
self.updateTracks()
|
||||||
|
|
||||||
@@ -276,10 +314,31 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
# 3
|
# 3
|
||||||
yield Static(" ", classes="seven")
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# 4
|
# 4
|
||||||
yield Static(" ", classes="seven")
|
yield Static("Quality")
|
||||||
|
yield Input(type="integer", id="quality_input")
|
||||||
|
yield Static(' ', classes="five")
|
||||||
|
|
||||||
|
|
||||||
# 5
|
# 5
|
||||||
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
|
|
||||||
|
# 6
|
||||||
|
yield Static("Notes")
|
||||||
|
yield Static(" ", classes="six")
|
||||||
|
|
||||||
|
# 7
|
||||||
|
yield TextArea(id="notes_textarea", classes="four_box seven")
|
||||||
|
|
||||||
|
|
||||||
|
# 8
|
||||||
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
|
# 9
|
||||||
yield Static("Media Tags")
|
yield Static("Media Tags")
|
||||||
|
|
||||||
|
|
||||||
@@ -296,13 +355,13 @@ class PatternDetailsScreen(Screen):
|
|||||||
yield Static(" ")
|
yield Static(" ")
|
||||||
yield Static(" ")
|
yield Static(" ")
|
||||||
|
|
||||||
# 6
|
# 10
|
||||||
yield self.tagsTable
|
yield self.tagsTable
|
||||||
|
|
||||||
# 7
|
# 11
|
||||||
yield Static(" ", classes="seven")
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
# 8
|
# 12
|
||||||
yield Static("Streams")
|
yield Static("Streams")
|
||||||
|
|
||||||
|
|
||||||
@@ -319,21 +378,21 @@ class PatternDetailsScreen(Screen):
|
|||||||
yield Button("Up", id="button_track_up")
|
yield Button("Up", id="button_track_up")
|
||||||
yield Button("Down", id="button_track_down")
|
yield Button("Down", id="button_track_down")
|
||||||
|
|
||||||
# 9
|
# 13
|
||||||
yield self.tracksTable
|
yield self.tracksTable
|
||||||
|
|
||||||
# 10
|
# 14
|
||||||
yield Static(" ", classes="seven")
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
# 11
|
# 15
|
||||||
yield Static(" ", classes="seven")
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
# 12
|
# 16
|
||||||
yield Button("Save", id="save_button")
|
yield Button("Save", id="save_button")
|
||||||
yield Button("Cancel", id="cancel_button")
|
yield Button("Cancel", id="cancel_button")
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
|
|
||||||
# 13
|
# 17
|
||||||
yield Static(" ", classes="seven")
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
yield Footer()
|
yield Footer()
|
||||||
@@ -342,6 +401,14 @@ class PatternDetailsScreen(Screen):
|
|||||||
def getPatternFromInput(self):
|
def getPatternFromInput(self):
|
||||||
return str(self.query_one("#pattern_input", Input).value)
|
return str(self.query_one("#pattern_input", Input).value)
|
||||||
|
|
||||||
|
def getQualityFromInput(self):
|
||||||
|
try:
|
||||||
|
return int(self.query_one("#quality_input", Input).value)
|
||||||
|
except ValueError:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def getNotesFromInput(self):
|
||||||
|
return str(self.query_one("#notes_textarea", TextArea).text)
|
||||||
|
|
||||||
|
|
||||||
def getSelectedTrackDescriptor(self):
|
def getSelectedTrackDescriptor(self):
|
||||||
@@ -382,8 +449,8 @@ class PatternDetailsScreen(Screen):
|
|||||||
if row_key is not None:
|
if row_key is not None:
|
||||||
selected_tag_data = self.tagsTable.get_row(row_key)
|
selected_tag_data = self.tagsTable.get_row(row_key)
|
||||||
|
|
||||||
tagKey = str(selected_tag_data[0])
|
tagKey = removeRichColor(selected_tag_data[0])
|
||||||
tagValue = str(selected_tag_data[1])
|
tagValue = removeRichColor(selected_tag_data[1])
|
||||||
|
|
||||||
return tagKey, tagValue
|
return tagKey, tagValue
|
||||||
|
|
||||||
@@ -403,6 +470,8 @@ class PatternDetailsScreen(Screen):
|
|||||||
patternDescriptor = {}
|
patternDescriptor = {}
|
||||||
patternDescriptor['show_id'] = self.__showDescriptor.getId()
|
patternDescriptor['show_id'] = self.__showDescriptor.getId()
|
||||||
patternDescriptor['pattern'] = self.getPatternFromInput()
|
patternDescriptor['pattern'] = self.getPatternFromInput()
|
||||||
|
patternDescriptor['quality'] = self.getQualityFromInput()
|
||||||
|
patternDescriptor['notes'] = self.getNotesFromInput()
|
||||||
|
|
||||||
if self.__pattern is not None:
|
if self.__pattern is not None:
|
||||||
|
|
||||||
@@ -519,8 +588,10 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
self.tracksTable.update_cell(row_key, self.column_key_track_language, trackDescriptor.getLanguage().label())
|
self.tracksTable.update_cell(row_key, self.column_key_track_language, trackDescriptor.getLanguage().label())
|
||||||
self.tracksTable.update_cell(row_key, self.column_key_track_title, trackDescriptor.getTitle())
|
self.tracksTable.update_cell(row_key, self.column_key_track_title, trackDescriptor.getTitle())
|
||||||
self.tracksTable.update_cell(row_key, self.column_key_track_default, 'Yes' if TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet() else 'No')
|
self.tracksTable.update_cell(row_key, self.column_key_track_default,
|
||||||
self.tracksTable.update_cell(row_key, self.column_key_track_forced, 'Yes' if TrackDisposition.FORCED in trackDescriptor.getDispositionSet() else 'No')
|
'Yes' if TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet() else 'No')
|
||||||
|
self.tracksTable.update_cell(row_key, self.column_key_track_forced,
|
||||||
|
'Yes' if TrackDisposition.FORCED in trackDescriptor.getDispositionSet() else 'No')
|
||||||
|
|
||||||
except CellDoesNotExist:
|
except CellDoesNotExist:
|
||||||
pass
|
pass
|
||||||
@@ -545,4 +616,6 @@ class PatternDetailsScreen(Screen):
|
|||||||
raise click.ClickException(f"PatternDetailsScreen.handle_delete_tag: pattern not set")
|
raise click.ClickException(f"PatternDetailsScreen.handle_delete_tag: pattern not set")
|
||||||
|
|
||||||
if self.__tac.deleteMediaTagByKey(self.__pattern.getId(), tag[0]):
|
if self.__tac.deleteMediaTagByKey(self.__pattern.getId(), tag[0]):
|
||||||
self.updateTags()
|
self.updateTags()
|
||||||
|
else:
|
||||||
|
raise click.ClickException('tag delete failed')
|
||||||
|
|||||||
@@ -15,14 +15,8 @@ def executeProcess(commandSequence: List[str], directory: str = None, context: d
|
|||||||
|
|
||||||
niceSequence = []
|
niceSequence = []
|
||||||
|
|
||||||
niceness = (int(context['resource_limits']['niceness'])
|
niceness = int((context or {}).get('resource_limits', {}).get('niceness', 99))
|
||||||
if not context is None
|
cpu_percent = int((context or {}).get('resource_limits', {}).get('cpu_percent', 0))
|
||||||
and 'resource_limits' in context.keys()
|
|
||||||
and 'niceness' in context['resource_limits'].keys() else 99)
|
|
||||||
cpu_percent = (int(context['resource_limits']['cpu_percent'])
|
|
||||||
if not context is None
|
|
||||||
and 'resource_limits' in context.keys()
|
|
||||||
and 'cpu_percent' in context['resource_limits'].keys() else 0)
|
|
||||||
|
|
||||||
if niceness >= -20 and niceness <= 19:
|
if niceness >= -20 and niceness <= 19:
|
||||||
niceSequence += ['nice', '-n', str(niceness)]
|
niceSequence += ['nice', '-n', str(niceness)]
|
||||||
|
|||||||
@@ -18,9 +18,16 @@ class ShiftedSeasonController():
|
|||||||
self.Session = self.context['database']['session'] # convenience
|
self.Session = self.context['database']['session'] # convenience
|
||||||
|
|
||||||
def checkShiftedSeason(self, showId: int, shiftedSeasonObj: dict, shiftedSeasonId: int = 0):
|
def checkShiftedSeason(self, showId: int, shiftedSeasonObj: dict, shiftedSeasonId: int = 0):
|
||||||
|
"""
|
||||||
|
Check if for a particula season
|
||||||
|
|
||||||
|
shiftedSeasonId
|
||||||
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
|
|
||||||
|
originalSeason = shiftedSeasonObj['original_season']
|
||||||
firstEpisode = int(shiftedSeasonObj['first_episode'])
|
firstEpisode = int(shiftedSeasonObj['first_episode'])
|
||||||
lastEpisode = int(shiftedSeasonObj['last_episode'])
|
lastEpisode = int(shiftedSeasonObj['last_episode'])
|
||||||
|
|
||||||
@@ -31,11 +38,14 @@ class ShiftedSeasonController():
|
|||||||
siblingShiftedSeason: ShiftedSeason
|
siblingShiftedSeason: ShiftedSeason
|
||||||
for siblingShiftedSeason in q.all():
|
for siblingShiftedSeason in q.all():
|
||||||
|
|
||||||
|
siblingOriginalSeason = siblingShiftedSeason.getOriginalSeason
|
||||||
siblingFirstEpisode = siblingShiftedSeason.getFirstEpisode()
|
siblingFirstEpisode = siblingShiftedSeason.getFirstEpisode()
|
||||||
siblingLastEpisode = siblingShiftedSeason.getLastEpisode()
|
siblingLastEpisode = siblingShiftedSeason.getLastEpisode()
|
||||||
|
|
||||||
if (lastEpisode >= siblingFirstEpisode
|
if (originalSeason == siblingOriginalSeason
|
||||||
|
and lastEpisode >= siblingFirstEpisode
|
||||||
and siblingLastEpisode >= firstEpisode):
|
and siblingLastEpisode >= firstEpisode):
|
||||||
|
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|||||||
@@ -444,7 +444,7 @@ class ShowDetailsScreen(Screen):
|
|||||||
|
|
||||||
# Event handler for button press
|
# Event handler for button press
|
||||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||||
# Check if the button pressed is the one we are interested in
|
|
||||||
if event.button.id == "save_button":
|
if event.button.id == "save_button":
|
||||||
|
|
||||||
showDescriptor = self.getShowDescriptorFromInput()
|
showDescriptor = self.getShowDescriptorFromInput()
|
||||||
|
|||||||
@@ -162,4 +162,7 @@ class ShowsScreen(Screen):
|
|||||||
|
|
||||||
yield self.table
|
yield self.table
|
||||||
|
|
||||||
yield Footer()
|
f = Footer()
|
||||||
|
f.description = "yolo"
|
||||||
|
|
||||||
|
yield f
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ class TagController():
|
|||||||
s = self.Session()
|
s = self.Session()
|
||||||
|
|
||||||
q = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId),
|
q = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId),
|
||||||
MediaTag.key == str(tagKey))
|
MediaTag.key == str(tagKey))
|
||||||
if q.count():
|
if q.count():
|
||||||
tag = q.first()
|
tag = q.first()
|
||||||
s.delete(tag)
|
s.delete(tag)
|
||||||
|
|||||||
@@ -164,7 +164,8 @@ def createMediaTestFile(mediaDescriptor: MediaDescriptor,
|
|||||||
|
|
||||||
subIndexCounter = {}
|
subIndexCounter = {}
|
||||||
|
|
||||||
for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
|
# for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
|
||||||
|
for trackDescriptor in mediaDescriptor.getTrackDescriptors():
|
||||||
|
|
||||||
trackType = trackDescriptor.getType()
|
trackType = trackDescriptor.getType()
|
||||||
|
|
||||||
|
|||||||
@@ -122,7 +122,8 @@ class Scenario2(Scenario):
|
|||||||
resultFileProperties = FileProperties(testContext, resultFile)
|
resultFileProperties = FileProperties(testContext, resultFile)
|
||||||
resultMediaDescriptor = resultFileProperties.getMediaDescriptor()
|
resultMediaDescriptor = resultFileProperties.getMediaDescriptor()
|
||||||
|
|
||||||
resultMediaTracks = resultMediaDescriptor.getAllTrackDescriptors()
|
# resultMediaTracks = resultMediaDescriptor.getAllTrackDescriptors()
|
||||||
|
resultMediaTracks = resultMediaDescriptor.getTrackDescriptors()
|
||||||
|
|
||||||
for assertIndex in range(len(assertSelectorList)):
|
for assertIndex in range(len(assertSelectorList)):
|
||||||
|
|
||||||
|
|||||||
@@ -223,7 +223,8 @@ class Scenario4(Scenario):
|
|||||||
self._logger.debug(f"{variantLabel}: Result file properties: {rfp.getFilename()} season={rfp.getSeason()} episode={rfp.getEpisode()}")
|
self._logger.debug(f"{variantLabel}: Result file properties: {rfp.getFilename()} season={rfp.getSeason()} episode={rfp.getEpisode()}")
|
||||||
|
|
||||||
rmd = rfp.getMediaDescriptor()
|
rmd = rfp.getMediaDescriptor()
|
||||||
rmt = rmd.getAllTrackDescriptors()
|
# rmt = rmd.getAllTrackDescriptors()
|
||||||
|
rmt = rmd.getTrackDescriptors()
|
||||||
|
|
||||||
for l in rmd.getConfiguration(label = 'resultMediaDescriptor'):
|
for l in rmd.getConfiguration(label = 'resultMediaDescriptor'):
|
||||||
self._logger.debug(l)
|
self._logger.debug(l)
|
||||||
|
|||||||
@@ -3,15 +3,26 @@ from enum import Enum
|
|||||||
|
|
||||||
class TrackCodec(Enum):
|
class TrackCodec(Enum):
|
||||||
|
|
||||||
H265 = {'identifier': 'hevc', 'format': 'h265', 'extension': 'h265' ,'label': 'H.265'}
|
H265 = {'identifier': 'hevc', 'format': 'h265', 'extension': 'h265' ,'label': 'H.265'}
|
||||||
H264 = {'identifier': 'h264', 'format': 'h264', 'extension': 'h264' ,'label': 'H.264'}
|
H264 = {'identifier': 'h264', 'format': 'h264', 'extension': 'h264' ,'label': 'H.264'}
|
||||||
AAC = {'identifier': 'aac', 'format': None, 'extension': 'aac' , 'label': 'AAC'}
|
MPEG4 = {'identifier': 'mpeg4', 'format': 'm4v', 'extension': 'm4v' ,'label': 'MPEG-4'}
|
||||||
AC3 = {'identifier': 'ac3', 'format': 'ac3', 'extension': 'ac3' , 'label': 'AC3'}
|
MPEG2 = {'identifier': 'mpeg2video', 'format': 'mpeg2video', 'extension': 'mpg' ,'label': 'MPEG-2'}
|
||||||
DTS = {'identifier': 'dts', 'format': 'dts', 'extension': 'dts' , 'label': 'DTS'}
|
|
||||||
ASS = {'identifier': 'ass', 'format': 'ass', 'extension': 'ass' , 'label': 'ASS'}
|
|
||||||
PGS = {'identifier': 'hdmv_pgs_subtitle', 'format': 'sup', 'extension': 'sup' , 'label': 'PGS'}
|
|
||||||
|
|
||||||
UNKNOWN = {'identifier': 'unknown', 'format': None, 'extension': None, 'label': 'UNKNOWN'}
|
AAC = {'identifier': 'aac', 'format': None, 'extension': 'aac' , 'label': 'AAC'}
|
||||||
|
AC3 = {'identifier': 'ac3', 'format': 'ac3', 'extension': 'ac3' , 'label': 'AC3'}
|
||||||
|
EAC3 = {'identifier': 'eac3', 'format': 'eac3', 'extension': 'eac3' , 'label': 'EAC3'}
|
||||||
|
DTS = {'identifier': 'dts', 'format': 'dts', 'extension': 'dts' , 'label': 'DTS'}
|
||||||
|
MP3 = {'identifier': 'mp3', 'format': 'mp3', 'extension': 'mp3' , 'label': 'MP3'}
|
||||||
|
|
||||||
|
SRT = {'identifier': 'subrip', 'format': 'srt', 'extension': 'srt' , 'label': 'SRT'}
|
||||||
|
ASS = {'identifier': 'ass', 'format': 'ass', 'extension': 'ass' , 'label': 'ASS'}
|
||||||
|
TTF = {'identifier': 'ttf', 'format': None, 'extension': 'ttf' , 'label': 'TTF'}
|
||||||
|
PGS = {'identifier': 'hdmv_pgs_subtitle', 'format': 'sup', 'extension': 'sup' , 'label': 'PGS'}
|
||||||
|
VOBSUB = {'identifier': 'dvd_subtitle', 'format': None, 'extension': 'mkv' , 'label': 'VobSub'}
|
||||||
|
|
||||||
|
PNG = {'identifier': 'png', 'format': None, 'extension': 'png' , 'label': 'PNG'}
|
||||||
|
|
||||||
|
UNKNOWN = {'identifier': 'unknown', 'format': None, 'extension': None, 'label': 'UNKNOWN'}
|
||||||
|
|
||||||
|
|
||||||
def identifier(self):
|
def identifier(self):
|
||||||
@@ -23,8 +34,8 @@ class TrackCodec(Enum):
|
|||||||
return str(self.value['label'])
|
return str(self.value['label'])
|
||||||
|
|
||||||
def format(self):
|
def format(self):
|
||||||
"""Returns the codec as single letter"""
|
"""Returns the codec """
|
||||||
return str(self.value['format'])
|
return self.value['format']
|
||||||
|
|
||||||
def extension(self):
|
def extension(self):
|
||||||
"""Returns the corresponding extension"""
|
"""Returns the corresponding extension"""
|
||||||
|
|||||||
@@ -19,6 +19,20 @@ class TrackController():
|
|||||||
self.context = context
|
self.context = context
|
||||||
self.Session = self.context['database']['session'] # convenience
|
self.Session = self.context['database']['session'] # convenience
|
||||||
|
|
||||||
|
self.__configurationData = self.context['config'].getData()
|
||||||
|
|
||||||
|
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
|
||||||
|
|
||||||
|
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
|
||||||
|
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
|
||||||
|
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
|
||||||
|
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'remove' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'ignore' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
|
||||||
|
|
||||||
def addTrack(self, trackDescriptor : TrackDescriptor, patternId = None):
|
def addTrack(self, trackDescriptor : TrackDescriptor, patternId = None):
|
||||||
|
|
||||||
@@ -40,10 +54,12 @@ class TrackController():
|
|||||||
|
|
||||||
for k,v in trackDescriptor.getTags().items():
|
for k,v in trackDescriptor.getTags().items():
|
||||||
|
|
||||||
tag = TrackTag(track_id = track.id,
|
# Filter tags that make no sense to preserve
|
||||||
key = k,
|
if k not in self.__ignoreTrackKeys and k not in self.__removeTrackKeys:
|
||||||
value = v)
|
tag = TrackTag(track_id = track.id,
|
||||||
s.add(tag)
|
key = k,
|
||||||
|
value = v)
|
||||||
|
s.add(tag)
|
||||||
s.commit()
|
s.commit()
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from .audio_layout import AudioLayout
|
|||||||
from .track_disposition import TrackDisposition
|
from .track_disposition import TrackDisposition
|
||||||
from .track_codec import TrackCodec
|
from .track_codec import TrackCodec
|
||||||
|
|
||||||
from .helper import dictDiff, setDiff
|
# from .helper import dictDiff, setDiff
|
||||||
|
|
||||||
|
|
||||||
class TrackDescriptor:
|
class TrackDescriptor:
|
||||||
@@ -34,7 +34,6 @@ class TrackDescriptor:
|
|||||||
FFPROBE_CODEC_TYPE_KEY = "codec_type"
|
FFPROBE_CODEC_TYPE_KEY = "codec_type"
|
||||||
FFPROBE_CODEC_KEY = "codec_name"
|
FFPROBE_CODEC_KEY = "codec_name"
|
||||||
|
|
||||||
CODEC_PGS = 'hdmv_pgs_subtitle'
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
|
|
||||||
@@ -321,24 +320,24 @@ class TrackDescriptor:
|
|||||||
else:
|
else:
|
||||||
self.__dispositionSet.discard(disposition)
|
self.__dispositionSet.discard(disposition)
|
||||||
|
|
||||||
def compare(self, vsTrackDescriptor: Self):
|
# def compare(self, vsTrackDescriptor: Self):
|
||||||
|
#
|
||||||
compareResult = {}
|
# compareResult = {}
|
||||||
|
#
|
||||||
tagsDiffResult = dictDiff(vsTrackDescriptor.getTags(), self.getTags())
|
# tagsDiffResult = dictKeysDiff(vsTrackDescriptor.getTags(), self.getTags())
|
||||||
|
#
|
||||||
if tagsDiffResult:
|
# if tagsDiffResult:
|
||||||
compareResult[TrackDescriptor.TAGS_KEY] = tagsDiffResult
|
# compareResult[TrackDescriptor.TAGS_KEY] = tagsDiffResult
|
||||||
|
#
|
||||||
vsDispositions = vsTrackDescriptor.getDispositionSet()
|
# vsDispositions = vsTrackDescriptor.getDispositionSet()
|
||||||
dispositions = self.getDispositionSet()
|
# dispositions = self.getDispositionSet()
|
||||||
|
#
|
||||||
dispositionDiffResult = setDiff(vsDispositions, dispositions)
|
# dispositionDiffResult = setDiff(vsDispositions, dispositions)
|
||||||
|
#
|
||||||
if dispositionDiffResult:
|
# if dispositionDiffResult:
|
||||||
compareResult[TrackDescriptor.DISPOSITION_SET_KEY] = dispositionDiffResult
|
# compareResult[TrackDescriptor.DISPOSITION_SET_KEY] = dispositionDiffResult
|
||||||
|
#
|
||||||
return compareResult
|
# return compareResult
|
||||||
|
|
||||||
def setExternalSourceFilePath(self, filePath: str):
|
def setExternalSourceFilePath(self, filePath: str):
|
||||||
self.__externalSourceFilePath = str(filePath)
|
self.__externalSourceFilePath = str(filePath)
|
||||||
|
|||||||
@@ -24,6 +24,8 @@ from .tag_delete_screen import TagDeleteScreen
|
|||||||
|
|
||||||
from textual.widgets._data_table import CellDoesNotExist
|
from textual.widgets._data_table import CellDoesNotExist
|
||||||
|
|
||||||
|
from ffx.helper import formatRichColor, removeRichColor
|
||||||
|
|
||||||
|
|
||||||
# Screen[dict[int, str, int]]
|
# Screen[dict[int, str, int]]
|
||||||
class TrackDetailsScreen(Screen):
|
class TrackDetailsScreen(Screen):
|
||||||
@@ -101,6 +103,21 @@ class TrackDetailsScreen(Screen):
|
|||||||
self.context = self.app.getContext()
|
self.context = self.app.getContext()
|
||||||
self.Session = self.context['database']['session'] # convenience
|
self.Session = self.context['database']['session'] # convenience
|
||||||
|
|
||||||
|
self.__configurationData = self.context['config'].getData()
|
||||||
|
|
||||||
|
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
|
||||||
|
|
||||||
|
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
|
||||||
|
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
|
||||||
|
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
|
||||||
|
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'remove' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
|
||||||
|
if 'streams' in metadataConfiguration.keys()
|
||||||
|
and 'ignore' in metadataConfiguration['streams'].keys() else [])
|
||||||
|
|
||||||
|
|
||||||
self.__tc = TrackController(context = self.context)
|
self.__tc = TrackController(context = self.context)
|
||||||
self.__pc = PatternController(context = self.context)
|
self.__pc = PatternController(context = self.context)
|
||||||
self.__tac = TagController(context = self.context)
|
self.__tac = TagController(context = self.context)
|
||||||
@@ -138,7 +155,14 @@ class TrackDetailsScreen(Screen):
|
|||||||
for k,v in trackTags.items():
|
for k,v in trackTags.items():
|
||||||
|
|
||||||
if k != 'language' and k != 'title':
|
if k != 'language' and k != 'title':
|
||||||
row = (k,v)
|
|
||||||
|
textColor = None
|
||||||
|
if k in self.__ignoreTrackKeys:
|
||||||
|
textColor = 'blue'
|
||||||
|
if k in self.__removeTrackKeys:
|
||||||
|
textColor = 'red'
|
||||||
|
|
||||||
|
row = (formatRichColor(k, textColor), formatRichColor(v, textColor))
|
||||||
self.trackTagsTable.add_row(*map(str, row))
|
self.trackTagsTable.add_row(*map(str, row))
|
||||||
|
|
||||||
|
|
||||||
@@ -192,7 +216,7 @@ class TrackDetailsScreen(Screen):
|
|||||||
|
|
||||||
# 2
|
# 2
|
||||||
yield Static("for pattern")
|
yield Static("for pattern")
|
||||||
yield Static("", id="pattern_label", classes="four")
|
yield Static("", id="pattern_label", classes="four", markup=False)
|
||||||
|
|
||||||
# 3
|
# 3
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="five")
|
||||||
@@ -328,8 +352,8 @@ class TrackDetailsScreen(Screen):
|
|||||||
if row_key is not None:
|
if row_key is not None:
|
||||||
selected_tag_data = self.trackTagsTable.get_row(row_key)
|
selected_tag_data = self.trackTagsTable.get_row(row_key)
|
||||||
|
|
||||||
tagKey = str(selected_tag_data[0])
|
tagKey = removeRichColor(selected_tag_data[0])
|
||||||
tagValue = str(selected_tag_data[1])
|
tagValue = removeRichColor(selected_tag_data[1])
|
||||||
|
|
||||||
return tagKey, tagValue
|
return tagKey, tagValue
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ class TrackType(Enum):
|
|||||||
VIDEO = {'label': 'video', 'index': 1}
|
VIDEO = {'label': 'video', 'index': 1}
|
||||||
AUDIO = {'label': 'audio', 'index': 2}
|
AUDIO = {'label': 'audio', 'index': 2}
|
||||||
SUBTITLE = {'label': 'subtitle', 'index': 3}
|
SUBTITLE = {'label': 'subtitle', 'index': 3}
|
||||||
|
ATTACHMENT = {'label': 'attachment', 'index': 4}
|
||||||
|
|
||||||
UNKNOWN = {'label': 'unknown', 'index': 0}
|
UNKNOWN = {'label': 'unknown', 'index': 0}
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ class VideoEncoder(Enum):
|
|||||||
|
|
||||||
AV1 = {'label': 'av1', 'index': 1}
|
AV1 = {'label': 'av1', 'index': 1}
|
||||||
VP9 = {'label': 'vp9', 'index': 2}
|
VP9 = {'label': 'vp9', 'index': 2}
|
||||||
|
H264 = {'label': 'h264', 'index': 3}
|
||||||
|
|
||||||
UNDEFINED = {'label': 'undefined', 'index': 0}
|
UNDEFINED = {'label': 'undefined', 'index': 0}
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +0,0 @@
|
|||||||
all:
|
|
||||||
hosts:
|
|
||||||
hawaii:
|
|
||||||
ansible_host: refulgent.de
|
|
||||||
ansible_user: osgw
|
|
||||||
|
|
||||||
ffxSystemUsername: osgw
|
|
||||||
ffxHomeDirectory: /var/local/osgw/lib/osgw
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
all:
|
|
||||||
hosts:
|
|
||||||
peppermint:
|
|
||||||
ansible_host: maveno.de
|
|
||||||
ansible_user: osgw
|
|
||||||
|
|
||||||
ffxSystemUsername: osgw
|
|
||||||
ffxHomeDirectory: /var/local/osgw/lib/osgw
|
|
||||||
@@ -6,13 +6,26 @@
|
|||||||
|
|
||||||
- name: Update system and install packages
|
- name: Update system and install packages
|
||||||
become: true
|
become: true
|
||||||
|
when: ansible_os_family == 'Debian'
|
||||||
ansible.builtin.apt:
|
ansible.builtin.apt:
|
||||||
|
update_cache: true
|
||||||
name:
|
name:
|
||||||
- python3-virtualenv
|
- python3-virtualenv
|
||||||
|
- cpulimit
|
||||||
|
- ffmpeg
|
||||||
|
- git
|
||||||
|
- screen
|
||||||
|
|
||||||
|
- name: Update system and install packages
|
||||||
|
become: true
|
||||||
|
when: ansible_os_family == 'Archlinux'
|
||||||
|
ansible.builtin.pacman:
|
||||||
|
update_cache: true
|
||||||
|
name:
|
||||||
|
- cpulimit
|
||||||
- ffmpeg
|
- ffmpeg
|
||||||
- git
|
- git
|
||||||
- screen
|
- screen
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Create sync dir
|
- name: Create sync dir
|
||||||
become: true
|
become: true
|
||||||
@@ -50,16 +63,6 @@
|
|||||||
group: "{{ ffxSystemUsername }}"
|
group: "{{ ffxSystemUsername }}"
|
||||||
mode: 0755
|
mode: 0755
|
||||||
|
|
||||||
- name: Prepare ffx virtualenv
|
|
||||||
become: true
|
|
||||||
become_user: "{{ ffxSystemUsername }}"
|
|
||||||
ansible.builtin.pip:
|
|
||||||
name:
|
|
||||||
- click
|
|
||||||
- textual
|
|
||||||
- sqlalchemy
|
|
||||||
- requests
|
|
||||||
virtualenv: "{{ ffxHomeDirectory }}/.local/share/ffx.venv"
|
|
||||||
|
|
||||||
- name: Clone ffx repository
|
- name: Clone ffx repository
|
||||||
become: true
|
become: true
|
||||||
@@ -70,6 +73,15 @@
|
|||||||
version: dev
|
version: dev
|
||||||
|
|
||||||
|
|
||||||
|
- name: Install FFX package in venv
|
||||||
|
become: true
|
||||||
|
become_user: "{{ ffxSystemUsername }}"
|
||||||
|
ansible.builtin.pip:
|
||||||
|
name: .
|
||||||
|
chdir: "{{ ffxHomeDirectory }}/.local/src/ffx"
|
||||||
|
virtualenv: "{{ ffxHomeDirectory }}/.local/share/ffx.venv"
|
||||||
|
|
||||||
|
|
||||||
- name: Add TMDB API token placeholer to .bashrc
|
- name: Add TMDB API token placeholer to .bashrc
|
||||||
become: true
|
become: true
|
||||||
become_user: "{{ ffxSystemUsername }}"
|
become_user: "{{ ffxSystemUsername }}"
|
||||||
@@ -77,7 +89,7 @@
|
|||||||
path: "{{ ffxHomeDirectory }}/.bashrc"
|
path: "{{ ffxHomeDirectory }}/.bashrc"
|
||||||
insertbefore: BOF
|
insertbefore: BOF
|
||||||
line: >-
|
line: >-
|
||||||
export TMDB_API_KEY="<TMDB API token>"
|
export TMDB_API_KEY="{{ ffxTmdbApiKey | default('<TMDB API key>') }}"
|
||||||
|
|
||||||
- name: Add ffx alias to .bashrc
|
- name: Add ffx alias to .bashrc
|
||||||
become: true
|
become: true
|
||||||
@@ -86,8 +98,7 @@
|
|||||||
path: "{{ ffxHomeDirectory }}/.bashrc"
|
path: "{{ ffxHomeDirectory }}/.bashrc"
|
||||||
insertbefore: BOF
|
insertbefore: BOF
|
||||||
line: >-
|
line: >-
|
||||||
alias ffx="{{ ffxHomeDirectory }}/.local/share/ffx.venv/bin/python
|
alias ffx="{{ ffxHomeDirectory }}/.local/share/ffx.venv/bin/ffx
|
||||||
{{ ffxHomeDirectory }}/.local/src/ffx/bin/ffx.py"
|
|
||||||
|
|
||||||
|
|
||||||
- name: Ensure local sync directory
|
- name: Ensure local sync directory
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
. ~/.local/share/ffx.venv/bin/activate
|
. ~/.local/share/ffx.venv/bin/activate
|
||||||
pushd ~/.local/src/ffx/
|
pushd ~/.local/src/ffx/
|
||||||
git checkout main
|
git checkout "${1:-main}"
|
||||||
git pull
|
git pull
|
||||||
pip install --editable .
|
pip install --editable .
|
||||||
popd
|
popd
|
||||||
|
|||||||
444
tools/prepare.sh
Executable file
444
tools/prepare.sh
Executable file
@@ -0,0 +1,444 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -u
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
|
||||||
|
CONFIG_DIR="${FFX_CONFIG_DIR:-${HOME}/.local/etc}"
|
||||||
|
CONFIG_FILE="${FFX_CONFIG_FILE:-${CONFIG_DIR}/ffx.json}"
|
||||||
|
VAR_DIR="${FFX_VAR_DIR:-${HOME}/.local/var/ffx}"
|
||||||
|
LOG_DIR="${FFX_LOG_DIR:-${HOME}/.local/var/log}"
|
||||||
|
DATABASE_FILE="${FFX_DATABASE_FILE:-${VAR_DIR}/ffx.db}"
|
||||||
|
|
||||||
|
CHECK_ONLY=0
|
||||||
|
|
||||||
|
MUTATIONS=0
|
||||||
|
INSTALL_FAILURES=0
|
||||||
|
READINESS_FAILURES=0
|
||||||
|
|
||||||
|
MISSING_REQUIRED_SYSTEM=()
|
||||||
|
MISSING_OPTIONAL_SYSTEM=()
|
||||||
|
|
||||||
|
COLOR_RESET=""
|
||||||
|
COLOR_GREEN=""
|
||||||
|
COLOR_YELLOW=""
|
||||||
|
COLOR_RED=""
|
||||||
|
|
||||||
|
if [ -t 1 ]; then
|
||||||
|
COLOR_RESET="$(printf '\033[0m')"
|
||||||
|
COLOR_GREEN="$(printf '\033[32m')"
|
||||||
|
COLOR_YELLOW="$(printf '\033[33m')"
|
||||||
|
COLOR_RED="$(printf '\033[31m')"
|
||||||
|
fi
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<EOF
|
||||||
|
Usage: $(basename "$0") [--check] [--help]
|
||||||
|
|
||||||
|
Prepare the local FFX development environment for this repository.
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--check Report readiness only. Do not create, install, or modify.
|
||||||
|
--help Show this help text.
|
||||||
|
|
||||||
|
Environment overrides:
|
||||||
|
FFX_CONFIG_DIR Override the parent directory for the seeded ffx.json file.
|
||||||
|
FFX_CONFIG_FILE Override the seeded config file path directly.
|
||||||
|
FFX_VAR_DIR Override the default data directory.
|
||||||
|
FFX_LOG_DIR Override the default log directory.
|
||||||
|
FFX_DATABASE_FILE Override the database path written into a newly seeded config.
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
status_ok() {
|
||||||
|
printf '%sok%s' "${COLOR_GREEN}" "${COLOR_RESET}"
|
||||||
|
}
|
||||||
|
|
||||||
|
status_warn() {
|
||||||
|
printf '%swarn%s' "${COLOR_YELLOW}" "${COLOR_RESET}"
|
||||||
|
}
|
||||||
|
|
||||||
|
status_fail() {
|
||||||
|
printf '%sfailed%s' "${COLOR_RED}" "${COLOR_RESET}"
|
||||||
|
}
|
||||||
|
|
||||||
|
report_component() {
|
||||||
|
local level="$1"
|
||||||
|
local label="$2"
|
||||||
|
local detail="$3"
|
||||||
|
local rendered_status=""
|
||||||
|
|
||||||
|
case "${level}" in
|
||||||
|
ok)
|
||||||
|
rendered_status="$(status_ok)"
|
||||||
|
;;
|
||||||
|
warn)
|
||||||
|
rendered_status="$(status_warn)"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
rendered_status="$(status_fail)"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
printf '[%s] %s%s\n' "${rendered_status}" "${label}" "${detail:+: $detail}"
|
||||||
|
}
|
||||||
|
|
||||||
|
command_exists() {
|
||||||
|
command -v "$1" >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
check_command_component() {
|
||||||
|
command_exists "$2"
|
||||||
|
}
|
||||||
|
|
||||||
|
check_tmdb_key() {
|
||||||
|
[ -n "${TMDB_API_KEY:-}" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
check_seeded_dir() {
|
||||||
|
[ -d "$1" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
check_seeded_file() {
|
||||||
|
[ -f "$1" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
component_detail() {
|
||||||
|
case "$1" in
|
||||||
|
git|python3|ffmpeg|ffprobe|cpulimit)
|
||||||
|
command -v "$1" || printf "command '%s' not found" "$1"
|
||||||
|
;;
|
||||||
|
tmdb-key)
|
||||||
|
if check_tmdb_key; then
|
||||||
|
printf 'TMDB_API_KEY is set'
|
||||||
|
else
|
||||||
|
printf 'TMDB_API_KEY is unset; TMDB-backed flows will be skipped or fail'
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
config-dir)
|
||||||
|
if check_seeded_dir "${CONFIG_DIR}"; then
|
||||||
|
printf '%s' "${CONFIG_DIR}"
|
||||||
|
else
|
||||||
|
printf 'missing; prep can create it'
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var-dir)
|
||||||
|
if check_seeded_dir "${VAR_DIR}"; then
|
||||||
|
printf '%s' "${VAR_DIR}"
|
||||||
|
else
|
||||||
|
printf 'missing; prep can create it'
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
log-dir)
|
||||||
|
if check_seeded_dir "${LOG_DIR}"; then
|
||||||
|
printf '%s' "${LOG_DIR}"
|
||||||
|
else
|
||||||
|
printf 'missing; prep can create it'
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
ffx-config)
|
||||||
|
if check_seeded_file "${CONFIG_FILE}"; then
|
||||||
|
printf '%s' "${CONFIG_FILE}"
|
||||||
|
else
|
||||||
|
printf 'missing; prep can seed a default non-destructively'
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
report_toolchain_component() {
|
||||||
|
local label="$1"
|
||||||
|
local command_name="$2"
|
||||||
|
local required="$3"
|
||||||
|
|
||||||
|
if check_command_component "${label}" "${command_name}" "${required}"; then
|
||||||
|
report_component ok "${label}" "$(component_detail "${command_name}")"
|
||||||
|
else
|
||||||
|
if [ "${required}" = "required" ]; then
|
||||||
|
report_component failed "${label}" "$(component_detail "${command_name}")"
|
||||||
|
MISSING_REQUIRED_SYSTEM+=("${command_name}")
|
||||||
|
READINESS_FAILURES=$((READINESS_FAILURES + 1))
|
||||||
|
else
|
||||||
|
report_component warn "${label}" "$(component_detail "${command_name}")"
|
||||||
|
MISSING_OPTIONAL_SYSTEM+=("${command_name}")
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
report_tmdb_component() {
|
||||||
|
if check_tmdb_key; then
|
||||||
|
report_component ok "TMDB API key" "$(component_detail tmdb-key)"
|
||||||
|
else
|
||||||
|
report_component warn "TMDB API key" "$(component_detail tmdb-key)"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
report_seeded_component() {
|
||||||
|
local label="$1"
|
||||||
|
local key="$2"
|
||||||
|
local required="$3"
|
||||||
|
local ok=1
|
||||||
|
|
||||||
|
case "${key}" in
|
||||||
|
config-dir)
|
||||||
|
check_seeded_dir "${CONFIG_DIR}" || ok=0
|
||||||
|
;;
|
||||||
|
var-dir)
|
||||||
|
check_seeded_dir "${VAR_DIR}" || ok=0
|
||||||
|
;;
|
||||||
|
log-dir)
|
||||||
|
check_seeded_dir "${LOG_DIR}" || ok=0
|
||||||
|
;;
|
||||||
|
ffx-config)
|
||||||
|
check_seeded_file "${CONFIG_FILE}" || ok=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [ "${ok}" -eq 1 ]; then
|
||||||
|
report_component ok "${label}" "$(component_detail "${key}")"
|
||||||
|
else
|
||||||
|
if [ "${required}" = "required" ]; then
|
||||||
|
report_component failed "${label}" "$(component_detail "${key}")"
|
||||||
|
READINESS_FAILURES=$((READINESS_FAILURES + 1))
|
||||||
|
else
|
||||||
|
report_component warn "${label}" "$(component_detail "${key}")"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
print_dependency_status() {
|
||||||
|
READINESS_FAILURES=0
|
||||||
|
MISSING_REQUIRED_SYSTEM=()
|
||||||
|
MISSING_OPTIONAL_SYSTEM=()
|
||||||
|
|
||||||
|
echo "Dependency status:"
|
||||||
|
report_toolchain_component "git" "git" "required"
|
||||||
|
report_toolchain_component "python3" "python3" "required"
|
||||||
|
report_toolchain_component "ffmpeg" "ffmpeg" "required"
|
||||||
|
report_toolchain_component "ffprobe" "ffprobe" "required"
|
||||||
|
report_toolchain_component "cpulimit" "cpulimit" "required"
|
||||||
|
report_tmdb_component
|
||||||
|
}
|
||||||
|
|
||||||
|
print_seeded_file_status() {
|
||||||
|
echo "Seeded local files:"
|
||||||
|
report_seeded_component "Config dir" "config-dir" "optional"
|
||||||
|
report_seeded_component "Var dir" "var-dir" "optional"
|
||||||
|
report_seeded_component "Log dir" "log-dir" "optional"
|
||||||
|
report_seeded_component "ffx config" "ffx-config" "optional"
|
||||||
|
}
|
||||||
|
|
||||||
|
detect_package_manager() {
|
||||||
|
if command_exists apt-get; then
|
||||||
|
printf 'apt-get\n'
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
if command_exists pacman; then
|
||||||
|
printf 'pacman\n'
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
run_root_command() {
|
||||||
|
if [ "${EUID}" -eq 0 ]; then
|
||||||
|
"$@"
|
||||||
|
elif command_exists sudo; then
|
||||||
|
sudo "$@"
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
install_system_requirements() {
|
||||||
|
local package_manager
|
||||||
|
|
||||||
|
if ! package_manager="$(detect_package_manager)"; then
|
||||||
|
printf 'No supported package manager found for automatic preparation.\n' >&2
|
||||||
|
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "${package_manager}" in
|
||||||
|
apt-get)
|
||||||
|
printf 'Installing missing system dependencies via apt-get...\n'
|
||||||
|
if ! run_root_command apt-get update; then
|
||||||
|
printf 'apt-get update failed.\n' >&2
|
||||||
|
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
if ! run_root_command apt-get install -y git python3 ffmpeg cpulimit; then
|
||||||
|
printf 'apt-get install failed.\n' >&2
|
||||||
|
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
pacman)
|
||||||
|
printf 'Installing missing system dependencies via pacman...\n'
|
||||||
|
if ! run_root_command pacman -Sy --noconfirm git python ffmpeg cpulimit; then
|
||||||
|
printf 'pacman install failed.\n' >&2
|
||||||
|
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
MUTATIONS=$((MUTATIONS + 1))
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
seed_default_config() {
|
||||||
|
if [ "${CHECK_ONLY}" -eq 1 ]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local created_any=0
|
||||||
|
|
||||||
|
if [ ! -d "${CONFIG_DIR}" ]; then
|
||||||
|
printf 'Creating config dir at %s...\n' "${CONFIG_DIR}"
|
||||||
|
if ! mkdir -p "${CONFIG_DIR}"; then
|
||||||
|
printf 'Failed to create config dir at %s.\n' "${CONFIG_DIR}" >&2
|
||||||
|
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
created_any=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -d "${VAR_DIR}" ]; then
|
||||||
|
printf 'Creating var dir at %s...\n' "${VAR_DIR}"
|
||||||
|
if ! mkdir -p "${VAR_DIR}"; then
|
||||||
|
printf 'Failed to create var dir at %s.\n' "${VAR_DIR}" >&2
|
||||||
|
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
created_any=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -d "${LOG_DIR}" ]; then
|
||||||
|
printf 'Creating log dir at %s...\n' "${LOG_DIR}"
|
||||||
|
if ! mkdir -p "${LOG_DIR}"; then
|
||||||
|
printf 'Failed to create log dir at %s.\n' "${LOG_DIR}" >&2
|
||||||
|
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
created_any=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f "${CONFIG_FILE}" ]; then
|
||||||
|
printf 'Seeding ffx config at %s...\n' "${CONFIG_FILE}"
|
||||||
|
if ! cat >"${CONFIG_FILE}" <<EOF
|
||||||
|
{
|
||||||
|
"databasePath": "${DATABASE_FILE}",
|
||||||
|
"logDirectory": "${LOG_DIR}",
|
||||||
|
"metadata": {
|
||||||
|
"signature": {
|
||||||
|
"RECODED_WITH": "FFX"
|
||||||
|
},
|
||||||
|
"remove": [
|
||||||
|
"VERSION-eng",
|
||||||
|
"creation_time",
|
||||||
|
"NAME"
|
||||||
|
],
|
||||||
|
"streams": {
|
||||||
|
"remove": [
|
||||||
|
"BPS",
|
||||||
|
"NUMBER_OF_FRAMES",
|
||||||
|
"NUMBER_OF_BYTES",
|
||||||
|
"_STATISTICS_WRITING_APP",
|
||||||
|
"_STATISTICS_WRITING_DATE_UTC",
|
||||||
|
"_STATISTICS_TAGS",
|
||||||
|
"BPS-eng",
|
||||||
|
"DURATION-eng",
|
||||||
|
"NUMBER_OF_FRAMES-eng",
|
||||||
|
"NUMBER_OF_BYTES-eng",
|
||||||
|
"_STATISTICS_WRITING_APP-eng",
|
||||||
|
"_STATISTICS_WRITING_DATE_UTC-eng",
|
||||||
|
"_STATISTICS_TAGS-eng"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
then
|
||||||
|
printf 'Failed to write ffx config at %s.\n' "${CONFIG_FILE}" >&2
|
||||||
|
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
created_any=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${created_any}" -eq 1 ]; then
|
||||||
|
MUTATIONS=$((MUTATIONS + 1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
parse_args() {
|
||||||
|
while [ "$#" -gt 0 ]; do
|
||||||
|
case "$1" in
|
||||||
|
--check)
|
||||||
|
CHECK_ONLY=1
|
||||||
|
;;
|
||||||
|
--help|-h)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
printf 'Unknown option: %s\n\n' "$1" >&2
|
||||||
|
usage >&2
|
||||||
|
exit 2
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
main() {
|
||||||
|
parse_args "$@"
|
||||||
|
|
||||||
|
print_dependency_status
|
||||||
|
|
||||||
|
if [ "${CHECK_ONLY}" -eq 0 ] && [ "${#MISSING_REQUIRED_SYSTEM[@]}" -gt 0 ]; then
|
||||||
|
install_system_requirements
|
||||||
|
|
||||||
|
echo
|
||||||
|
print_dependency_status
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
print_seeded_file_status
|
||||||
|
|
||||||
|
if [ "${CHECK_ONLY}" -eq 0 ]; then
|
||||||
|
seed_default_config
|
||||||
|
echo
|
||||||
|
print_seeded_file_status
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
if [ "${INSTALL_FAILURES}" -gt 0 ]; then
|
||||||
|
echo "One or more install steps failed; see the status checks above." >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${READINESS_FAILURES}" -gt 0 ]; then
|
||||||
|
if [ "${CHECK_ONLY}" -eq 1 ]; then
|
||||||
|
echo "Required system prerequisites are incomplete." >&2
|
||||||
|
else
|
||||||
|
echo "Required components are still missing after preparation." >&2
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${CHECK_ONLY}" -eq 1 ]; then
|
||||||
|
echo "The FFX preparation environment is ready."
|
||||||
|
elif [ "${MUTATIONS}" -gt 0 ]; then
|
||||||
|
echo "The FFX preparation environment is ready."
|
||||||
|
else
|
||||||
|
echo "The FFX preparation environment is already prepared."
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
||||||
350
tools/setup.sh
Executable file
350
tools/setup.sh
Executable file
@@ -0,0 +1,350 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -u
|
||||||
|
|
||||||
|
ROOT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
VENV_DIR="${HOME}/.local/share/ffx.venv"
|
||||||
|
VENV_BIN_DIR="${VENV_DIR}/bin"
|
||||||
|
VENV_PYTHON="${VENV_BIN_DIR}/python"
|
||||||
|
VENV_PIP="${VENV_BIN_DIR}/pip"
|
||||||
|
VENV_FFX="${VENV_BIN_DIR}/ffx"
|
||||||
|
BASHRC_FILE="${HOME}/.bashrc"
|
||||||
|
ALIAS_BLOCK_BEGIN="# >>> ffx alias >>>"
|
||||||
|
ALIAS_BLOCK_END="# <<< ffx alias <<<"
|
||||||
|
ALIAS_LINE="alias ffx=\"${VENV_FFX}\""
|
||||||
|
|
||||||
|
CHECK_ONLY=0
|
||||||
|
READINESS_FAILURES=0
|
||||||
|
INSTALL_FAILURES=0
|
||||||
|
|
||||||
|
COLOR_RESET=""
|
||||||
|
COLOR_GREEN=""
|
||||||
|
COLOR_YELLOW=""
|
||||||
|
COLOR_RED=""
|
||||||
|
|
||||||
|
if [ -t 1 ]; then
|
||||||
|
COLOR_RESET="$(printf '\033[0m')"
|
||||||
|
COLOR_GREEN="$(printf '\033[32m')"
|
||||||
|
COLOR_YELLOW="$(printf '\033[33m')"
|
||||||
|
COLOR_RED="$(printf '\033[31m')"
|
||||||
|
fi
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<EOF
|
||||||
|
Usage: $(basename "$0") [--check] [--help]
|
||||||
|
|
||||||
|
Prepare the persistent FFX bundle virtualenv at:
|
||||||
|
${VENV_DIR}
|
||||||
|
|
||||||
|
Actions:
|
||||||
|
- create or reuse ${VENV_DIR}
|
||||||
|
- install this repository into the venv with pip --editable
|
||||||
|
- ensure ${BASHRC_FILE} exposes alias ffx -> ${VENV_FFX}
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--check Report readiness only. Do not create or modify anything.
|
||||||
|
--help Show this help text.
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
status_ok() {
|
||||||
|
printf '%sok%s' "${COLOR_GREEN}" "${COLOR_RESET}"
|
||||||
|
}
|
||||||
|
|
||||||
|
status_warn() {
|
||||||
|
printf '%swarn%s' "${COLOR_YELLOW}" "${COLOR_RESET}"
|
||||||
|
}
|
||||||
|
|
||||||
|
status_fail() {
|
||||||
|
printf '%sfailed%s' "${COLOR_RED}" "${COLOR_RESET}"
|
||||||
|
}
|
||||||
|
|
||||||
|
report_component() {
|
||||||
|
local level="$1"
|
||||||
|
local label="$2"
|
||||||
|
local detail="$3"
|
||||||
|
local rendered_status=""
|
||||||
|
|
||||||
|
case "${level}" in
|
||||||
|
ok)
|
||||||
|
rendered_status="$(status_ok)"
|
||||||
|
;;
|
||||||
|
warn)
|
||||||
|
rendered_status="$(status_warn)"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
rendered_status="$(status_fail)"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
printf '[%s] %s%s\n' "${rendered_status}" "${label}" "${detail:+: $detail}"
|
||||||
|
}
|
||||||
|
|
||||||
|
command_exists() {
|
||||||
|
command -v "$1" >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
check_python3() {
|
||||||
|
command_exists python3
|
||||||
|
}
|
||||||
|
|
||||||
|
check_venv_dir() {
|
||||||
|
[ -x "${VENV_PYTHON}" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
check_venv_pip() {
|
||||||
|
check_venv_dir && "${VENV_PIP}" --version >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
check_venv_ffx() {
|
||||||
|
[ -x "${VENV_FFX}" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
check_bashrc_file() {
|
||||||
|
[ -f "${BASHRC_FILE}" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
check_bashrc_alias() {
|
||||||
|
check_bashrc_file && grep -Fqx "${ALIAS_LINE}" "${BASHRC_FILE}"
|
||||||
|
}
|
||||||
|
|
||||||
|
detail_python3() {
|
||||||
|
command -v python3 || printf "command 'python3' not found"
|
||||||
|
}
|
||||||
|
|
||||||
|
detail_venv_dir() {
|
||||||
|
if check_venv_dir; then
|
||||||
|
printf '%s' "${VENV_DIR}"
|
||||||
|
else
|
||||||
|
printf 'missing %s' "${VENV_DIR}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
detail_venv_pip() {
|
||||||
|
if check_venv_pip; then
|
||||||
|
"${VENV_PIP}" --version
|
||||||
|
else
|
||||||
|
printf 'missing pip in %s' "${VENV_DIR}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
detail_venv_ffx() {
|
||||||
|
if check_venv_ffx; then
|
||||||
|
printf '%s' "${VENV_FFX}"
|
||||||
|
else
|
||||||
|
printf 'missing %s' "${VENV_FFX}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
detail_bashrc_file() {
|
||||||
|
if check_bashrc_file; then
|
||||||
|
printf '%s' "${BASHRC_FILE}"
|
||||||
|
else
|
||||||
|
printf 'missing %s; prep can create it' "${BASHRC_FILE}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
detail_bashrc_alias() {
|
||||||
|
if check_bashrc_alias; then
|
||||||
|
printf '%s' "${ALIAS_LINE}"
|
||||||
|
else
|
||||||
|
printf 'missing alias line for %s' "${VENV_FFX}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
print_status_report() {
|
||||||
|
READINESS_FAILURES=0
|
||||||
|
|
||||||
|
echo "Dependency status:"
|
||||||
|
if check_python3; then
|
||||||
|
report_component ok "python3" "$(detail_python3)"
|
||||||
|
else
|
||||||
|
report_component failed "python3" "$(detail_python3)"
|
||||||
|
READINESS_FAILURES=$((READINESS_FAILURES + 1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "Bundle venv status:"
|
||||||
|
if check_venv_dir; then
|
||||||
|
report_component ok "bundle virtualenv" "$(detail_venv_dir)"
|
||||||
|
else
|
||||||
|
report_component failed "bundle virtualenv" "$(detail_venv_dir)"
|
||||||
|
READINESS_FAILURES=$((READINESS_FAILURES + 1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
if check_venv_pip; then
|
||||||
|
report_component ok "bundle pip" "$(detail_venv_pip)"
|
||||||
|
else
|
||||||
|
report_component failed "bundle pip" "$(detail_venv_pip)"
|
||||||
|
READINESS_FAILURES=$((READINESS_FAILURES + 1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
if check_venv_ffx; then
|
||||||
|
report_component ok "bundle ffx" "$(detail_venv_ffx)"
|
||||||
|
else
|
||||||
|
report_component failed "bundle ffx" "$(detail_venv_ffx)"
|
||||||
|
READINESS_FAILURES=$((READINESS_FAILURES + 1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "Shell exposure status:"
|
||||||
|
if check_bashrc_file; then
|
||||||
|
report_component ok ".bashrc" "$(detail_bashrc_file)"
|
||||||
|
else
|
||||||
|
report_component warn ".bashrc" "$(detail_bashrc_file)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if check_bashrc_alias; then
|
||||||
|
report_component ok "ffx alias" "$(detail_bashrc_alias)"
|
||||||
|
else
|
||||||
|
report_component failed "ffx alias" "$(detail_bashrc_alias)"
|
||||||
|
READINESS_FAILURES=$((READINESS_FAILURES + 1))
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
ensure_bundle_venv() {
|
||||||
|
mkdir -p "${HOME}/.local/share"
|
||||||
|
|
||||||
|
if ! check_venv_dir; then
|
||||||
|
printf 'Creating bundle virtualenv at %s...\n' "${VENV_DIR}"
|
||||||
|
if ! python3 -m venv "${VENV_DIR}"; then
|
||||||
|
printf 'Failed to create virtualenv at %s.\n' "${VENV_DIR}" >&2
|
||||||
|
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! check_venv_pip; then
|
||||||
|
printf 'Missing pip in %s.\n' "${VENV_DIR}" >&2
|
||||||
|
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf 'Installing FFX package into %s...\n' "${VENV_DIR}"
|
||||||
|
if ! "${VENV_PIP}" install --editable "${ROOT_DIR}"; then
|
||||||
|
printf 'Failed to install FFX package into %s.\n' "${VENV_DIR}" >&2
|
||||||
|
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
write_alias_block() {
|
||||||
|
local bashrc_dir
|
||||||
|
bashrc_dir="$(dirname "${BASHRC_FILE}")"
|
||||||
|
mkdir -p "${bashrc_dir}"
|
||||||
|
touch "${BASHRC_FILE}"
|
||||||
|
|
||||||
|
if grep -Fq "${ALIAS_BLOCK_BEGIN}" "${BASHRC_FILE}" || grep -Fq "${ALIAS_BLOCK_END}" "${BASHRC_FILE}"; then
|
||||||
|
if ! python3 - "${BASHRC_FILE}" "${ALIAS_BLOCK_BEGIN}" "${ALIAS_BLOCK_END}" "${ALIAS_LINE}" <<'PY'
|
||||||
|
import pathlib
|
||||||
|
import sys
|
||||||
|
|
||||||
|
path = pathlib.Path(sys.argv[1])
|
||||||
|
begin = sys.argv[2]
|
||||||
|
end = sys.argv[3]
|
||||||
|
alias_line = sys.argv[4]
|
||||||
|
|
||||||
|
content = path.read_text()
|
||||||
|
block = f"{begin}\n{alias_line}\n{end}\n"
|
||||||
|
|
||||||
|
start = content.find(begin)
|
||||||
|
stop = content.find(end)
|
||||||
|
|
||||||
|
if start != -1 and stop != -1 and stop >= start:
|
||||||
|
stop += len(end)
|
||||||
|
if stop < len(content) and content[stop] == "\n":
|
||||||
|
stop += 1
|
||||||
|
content = content[:start] + block + content[stop:]
|
||||||
|
else:
|
||||||
|
if content and not content.endswith("\n"):
|
||||||
|
content += "\n"
|
||||||
|
content += block
|
||||||
|
|
||||||
|
path.write_text(content)
|
||||||
|
PY
|
||||||
|
then
|
||||||
|
printf 'Failed to update managed alias block in %s.\n' "${BASHRC_FILE}" >&2
|
||||||
|
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
elif check_bashrc_alias; then
|
||||||
|
:
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if [ -s "${BASHRC_FILE}" ] && [ "$(tail -c 1 "${BASHRC_FILE}" 2>/dev/null || true)" != "" ]; then
|
||||||
|
printf '\n'
|
||||||
|
fi
|
||||||
|
printf '%s\n' "${ALIAS_BLOCK_BEGIN}"
|
||||||
|
printf '%s\n' "${ALIAS_LINE}"
|
||||||
|
printf '%s\n' "${ALIAS_BLOCK_END}"
|
||||||
|
} >>"${BASHRC_FILE}" || {
|
||||||
|
printf 'Failed to append alias block to %s.\n' "${BASHRC_FILE}" >&2
|
||||||
|
INSTALL_FAILURES=$((INSTALL_FAILURES + 1))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
ensure_bashrc_alias() {
|
||||||
|
printf 'Ensuring ffx alias in %s...\n' "${BASHRC_FILE}"
|
||||||
|
write_alias_block
|
||||||
|
}
|
||||||
|
|
||||||
|
parse_args() {
|
||||||
|
while [ "$#" -gt 0 ]; do
|
||||||
|
case "$1" in
|
||||||
|
--check)
|
||||||
|
CHECK_ONLY=1
|
||||||
|
;;
|
||||||
|
--help|-h)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
printf 'Unknown option: %s\n\n' "$1" >&2
|
||||||
|
usage >&2
|
||||||
|
exit 2
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
main() {
|
||||||
|
parse_args "$@"
|
||||||
|
|
||||||
|
print_status_report
|
||||||
|
|
||||||
|
if [ "${CHECK_ONLY}" -eq 0 ]; then
|
||||||
|
if ! check_python3; then
|
||||||
|
printf '\npython3 is required before the bundle venv can be prepared.\n' >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
ensure_bundle_venv
|
||||||
|
ensure_bashrc_alias
|
||||||
|
|
||||||
|
echo
|
||||||
|
print_status_report
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
if [ "${INSTALL_FAILURES}" -gt 0 ]; then
|
||||||
|
echo "One or more bundle preparation steps failed; see the status checks above." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${READINESS_FAILURES}" -gt 0 ]; then
|
||||||
|
echo "The FFX bundle virtualenv and/or alias setup is incomplete." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "The FFX bundle virtualenv is ready."
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
||||||
Reference in New Issue
Block a user