Compare commits
43 Commits
0.1.3
...
2f3658de5b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2f3658de5b | ||
|
|
ec4af18e7a | ||
|
|
58b01f2be7 | ||
|
|
1d4507782b | ||
|
|
8c7eee580d | ||
|
|
303fd4bc80 | ||
|
|
5febb96916 | ||
|
|
b16e76370b | ||
|
|
feb5441251 | ||
|
|
ea182d4ddb | ||
|
|
f853cf0f85 | ||
|
|
b492be227a | ||
|
|
7fe5b66c0c | ||
|
|
07cc0cd95e | ||
|
|
de2d7c0593 | ||
|
|
826677cb03 | ||
|
|
95aeacf694 | ||
|
|
a3bb16e850 | ||
|
|
0ed85fce4a | ||
|
|
1a0a5f4482 | ||
|
|
06f6322d32 | ||
|
|
0cbcf1a702 | ||
|
|
f94310fdb7 | ||
|
|
06b523f3e8 | ||
|
|
efb4fbfc95 | ||
|
|
2abda01fe6 | ||
|
|
f007ada29f | ||
|
|
44916bf062 | ||
|
|
89129ae5c4 | ||
|
|
696b2b56d3 | ||
|
|
b9185b5b07 | ||
|
|
de0f4a57c1 | ||
|
|
207472283b | ||
|
|
22f4b00e76 | ||
|
|
95d858b2c6 | ||
| 3f0efab49b | |||
| 5c47f193d4 | |||
| 324084c845 | |||
|
|
79f088a86a | ||
|
|
9e37ff18c4 | ||
|
|
3647b25b4c | ||
|
|
772c1d8f90 | ||
|
|
ad58ba5ce6 |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,3 +1,6 @@
|
|||||||
__pycache__
|
__pycache__
|
||||||
junk/
|
junk/
|
||||||
.vscode/launch.json
|
.vscode
|
||||||
|
.ipynb_checkpoints/
|
||||||
|
ansible/inventory/hawaii.yml
|
||||||
|
ansible/inventory/peppermint.yml
|
||||||
|
|||||||
8
ansible/inventory/ffx.yml
Normal file
8
ansible/inventory/ffx.yml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
all:
|
||||||
|
hosts:
|
||||||
|
ffx:
|
||||||
|
ansible_host: <domain>
|
||||||
|
ansible_user: <system user>
|
||||||
|
|
||||||
|
ffxSystemUsername: <system user>
|
||||||
|
ffxHomeDirectory: <home directory>
|
||||||
135
ansible/setup_node.yml
Normal file
135
ansible/setup_node.yml
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
- name: Setup FFX node
|
||||||
|
hosts: all
|
||||||
|
vars:
|
||||||
|
ffxRepoUrl: https://gitea.maveno.de/Javanaut/ffx.git
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: Update system and install packages
|
||||||
|
become: true
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name:
|
||||||
|
- python3-virtualenv
|
||||||
|
- ffmpeg
|
||||||
|
- git
|
||||||
|
- screen
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Create sync dir
|
||||||
|
become: true
|
||||||
|
file:
|
||||||
|
path: "{{ ffxHomeDirectory }}/.local/var/sync/ffx"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ffxSystemUsername }}"
|
||||||
|
group: "{{ ffxSystemUsername }}"
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Ensure local etc directory
|
||||||
|
become: true
|
||||||
|
file:
|
||||||
|
path: "{{ ffxHomeDirectory }}/.local/etc"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ffxSystemUsername }}"
|
||||||
|
group: "{{ ffxSystemUsername }}"
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Ensure local src directory
|
||||||
|
become: true
|
||||||
|
file:
|
||||||
|
path: "{{ ffxHomeDirectory }}/.local/src"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ffxSystemUsername }}"
|
||||||
|
group: "{{ ffxSystemUsername }}"
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Ensure local share directory
|
||||||
|
become: true
|
||||||
|
file:
|
||||||
|
path: "{{ ffxHomeDirectory }}/.local/share"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ffxSystemUsername }}"
|
||||||
|
group: "{{ ffxSystemUsername }}"
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Prepare ffx virtualenv
|
||||||
|
become: true
|
||||||
|
become_user: "{{ ffxSystemUsername }}"
|
||||||
|
ansible.builtin.pip:
|
||||||
|
name:
|
||||||
|
- click
|
||||||
|
- textual
|
||||||
|
- sqlalchemy
|
||||||
|
- requests
|
||||||
|
virtualenv: "{{ ffxHomeDirectory }}/.local/share/ffx.venv"
|
||||||
|
|
||||||
|
- name: Clone ffx repository
|
||||||
|
become: true
|
||||||
|
become_user: "{{ ffxSystemUsername }}"
|
||||||
|
ansible.builtin.git:
|
||||||
|
repo: "{{ ffxRepoUrl }}"
|
||||||
|
dest: "{{ ffxHomeDirectory }}/.local/src/ffx"
|
||||||
|
version: dev
|
||||||
|
|
||||||
|
|
||||||
|
- name: Add TMDB API token placeholer to .bashrc
|
||||||
|
become: true
|
||||||
|
become_user: "{{ ffxSystemUsername }}"
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: "{{ ffxHomeDirectory }}/.bashrc"
|
||||||
|
insertbefore: BOF
|
||||||
|
line: >-
|
||||||
|
export TMDB_API_KEY="<TMDB API token>"
|
||||||
|
|
||||||
|
- name: Add ffx alias to .bashrc
|
||||||
|
become: true
|
||||||
|
become_user: "{{ ffxSystemUsername }}"
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: "{{ ffxHomeDirectory }}/.bashrc"
|
||||||
|
insertbefore: BOF
|
||||||
|
line: >-
|
||||||
|
alias ffx="{{ ffxHomeDirectory }}/.local/share/ffx.venv/bin/python
|
||||||
|
{{ ffxHomeDirectory }}/.local/src/ffx/bin/ffx.py"
|
||||||
|
|
||||||
|
|
||||||
|
- name: Ensure local sync directory
|
||||||
|
become: true
|
||||||
|
file:
|
||||||
|
path: "{{ ffxHomeDirectory }}/.local/var/sync/ffx"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ffxSystemUsername }}"
|
||||||
|
group: "{{ ffxSystemUsername }}"
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Create ffx config file
|
||||||
|
become: true
|
||||||
|
become_user: "{{ ffxSystemUsername }}"
|
||||||
|
vars:
|
||||||
|
ffxConfiguration:
|
||||||
|
databasePath: "{{ ffxHomeDirectory }}/.local/var/sync/ffx/ffx.db"
|
||||||
|
metadata:
|
||||||
|
signature:
|
||||||
|
RECODED_WITH: FFX
|
||||||
|
remove:
|
||||||
|
- VERSION-eng
|
||||||
|
- creation_time
|
||||||
|
- NAME
|
||||||
|
streams:
|
||||||
|
remove:
|
||||||
|
- BPS
|
||||||
|
- NUMBER_OF_FRAMES
|
||||||
|
- NUMBER_OF_BYTES
|
||||||
|
- _STATISTICS_WRITING_APP
|
||||||
|
- _STATISTICS_WRITING_DATE_UTC
|
||||||
|
- _STATISTICS_TAGS
|
||||||
|
- BPS-eng
|
||||||
|
- DURATION-eng
|
||||||
|
- NUMBER_OF_FRAMES-eng
|
||||||
|
- NUMBER_OF_BYTES-eng
|
||||||
|
- _STATISTICS_WRITING_APP-eng
|
||||||
|
- _STATISTICS_WRITING_DATE_UTC-eng
|
||||||
|
- _STATISTICS_TAGS-eng
|
||||||
|
ansible.builtin.copy:
|
||||||
|
content: "{{ ffxConfiguration | to_json }}"
|
||||||
|
dest: "{{ ffxHomeDirectory }}/.local/etc/ffx.json"
|
||||||
|
owner: "{{ ffxSystemUsername }}"
|
||||||
|
group: "{{ ffxSystemUsername }}"
|
||||||
|
mode: 0644
|
||||||
32
bin/check.py
32
bin/check.py
@@ -1,32 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from ffx.pattern_controller import PatternController
|
|
||||||
|
|
||||||
from ffx.model.show import Base
|
|
||||||
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
|
|
||||||
from sqlalchemy.orm import relationship, sessionmaker, Mapped, backref
|
|
||||||
|
|
||||||
filename = 'Boruto.Naruto.Next.Generations.S01E256.GerEngSub.AAC.1080p.WebDL.x264-Tanuki.mkv'
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Data 'input' variable
|
|
||||||
context = {}
|
|
||||||
|
|
||||||
# Initialize DB
|
|
||||||
homeDir = os.path.expanduser("~")
|
|
||||||
ffxVarDir = os.path.join(homeDir, '.local', 'var', 'ffx')
|
|
||||||
if not os.path.exists(ffxVarDir):
|
|
||||||
os.makedirs(ffxVarDir)
|
|
||||||
|
|
||||||
context['database_url'] = f"sqlite:///{os.path.join(ffxVarDir, 'ffx.db')}"
|
|
||||||
context['database_engine'] = create_engine(context['database_url'])
|
|
||||||
context['database_session'] = sessionmaker(bind=context['database_engine'])
|
|
||||||
|
|
||||||
Base.metadata.create_all(context['database_engine'])
|
|
||||||
|
|
||||||
|
|
||||||
pc = PatternController(context)
|
|
||||||
|
|
||||||
|
|
||||||
print(pc.matchFilename(filename))
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
from ffx.helper import dictDiff
|
|
||||||
|
|
||||||
a = {'name': 'yolo', 'mass': 56}
|
|
||||||
b = {'name': 'zolo', 'mass': 58}
|
|
||||||
|
|
||||||
print(dictDiff(a, b))
|
|
||||||
562
bin/ffx.py
562
bin/ffx.py
@@ -1,25 +1,34 @@
|
|||||||
#! /usr/bin/python3
|
#! /usr/bin/python3
|
||||||
|
|
||||||
import os, sys, subprocess, json, click, time, re
|
import os, click, time, logging
|
||||||
|
|
||||||
|
from ffx.configuration_controller import ConfigurationController
|
||||||
|
|
||||||
from ffx.file_properties import FileProperties
|
from ffx.file_properties import FileProperties
|
||||||
|
|
||||||
from ffx.ffx_app import FfxApp
|
from ffx.ffx_app import FfxApp
|
||||||
from ffx.ffx_controller import FfxController
|
from ffx.ffx_controller import FfxController
|
||||||
from ffx.show_controller import ShowController
|
|
||||||
from ffx.tmdb_controller import TmdbController
|
from ffx.tmdb_controller import TmdbController
|
||||||
|
|
||||||
from ffx.database import databaseContext
|
from ffx.database import databaseContext
|
||||||
|
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
from ffx.track_descriptor import TrackDescriptor
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.show_descriptor import ShowDescriptor
|
||||||
|
|
||||||
from ffx.track_type import TrackType
|
from ffx.track_type import TrackType
|
||||||
from ffx.video_encoder import VideoEncoder
|
from ffx.video_encoder import VideoEncoder
|
||||||
from ffx.track_disposition import TrackDisposition
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from ffx.nlmeans_controller import NlmeansController
|
||||||
|
|
||||||
from ffx.process import executeProcess
|
from ffx.process import executeProcess
|
||||||
|
from ffx.helper import filterFilename
|
||||||
|
|
||||||
|
from ffx.constants import DEFAULT_QUALITY, DEFAULT_AV1_PRESET
|
||||||
|
from ffx.constants import DEFAULT_STEREO_BANDWIDTH, DEFAULT_AC3_BANDWIDTH, DEFAULT_DTS_BANDWIDTH, DEFAULT_7_1_BANDWIDTH
|
||||||
|
|
||||||
|
|
||||||
VERSION='0.1.3'
|
VERSION='0.2.2'
|
||||||
|
|
||||||
# 0.1.1
|
# 0.1.1
|
||||||
# Bugfixes, TMBD identify shows
|
# Bugfixes, TMBD identify shows
|
||||||
@@ -27,15 +36,57 @@ VERSION='0.1.3'
|
|||||||
# Bugfixes
|
# Bugfixes
|
||||||
# 0.1.3
|
# 0.1.3
|
||||||
# Subtitle file imports
|
# Subtitle file imports
|
||||||
|
# 0.2.0
|
||||||
|
# Tests, Config-File
|
||||||
|
# 0.2.1
|
||||||
|
# Signature, Tags cleaning, Bugfixes, Refactoring
|
||||||
|
# 0.2.2
|
||||||
|
# CLI-Overrides
|
||||||
|
|
||||||
|
|
||||||
@click.group()
|
@click.group()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def ffx(ctx):
|
@click.option('--database-file', type=str, default='', help='Path to database file')
|
||||||
|
@click.option('-v', '--verbose', type=int, default=0, help='Set verbosity of output')
|
||||||
|
@click.option("--dry-run", is_flag=True, default=False)
|
||||||
|
def ffx(ctx, database_file, verbose, dry_run):
|
||||||
"""FFX"""
|
"""FFX"""
|
||||||
|
|
||||||
ctx.obj = {}
|
ctx.obj = {}
|
||||||
ctx.obj['database'] = databaseContext()
|
|
||||||
|
ctx.obj['config'] = ConfigurationController()
|
||||||
|
|
||||||
|
ctx.obj['database'] = databaseContext(databasePath=database_file
|
||||||
|
if database_file else ctx.obj['config'].getDatabaseFilePath())
|
||||||
|
|
||||||
|
ctx.obj['dry_run'] = dry_run
|
||||||
|
ctx.obj['verbosity'] = verbose
|
||||||
|
|
||||||
|
# Critical 50
|
||||||
|
# Error 40
|
||||||
|
# Warning 30
|
||||||
|
# Info 20
|
||||||
|
# Debug 10
|
||||||
|
fileLogVerbosity = max(40 - verbose * 10, 10)
|
||||||
|
consoleLogVerbosity = max(20 - verbose * 10, 10)
|
||||||
|
|
||||||
|
ctx.obj['logger'] = logging.getLogger('FFX')
|
||||||
|
ctx.obj['logger'].setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
ffxFileHandler = logging.FileHandler(ctx.obj['config'].getLogFilePath())
|
||||||
|
ffxFileHandler.setLevel(fileLogVerbosity)
|
||||||
|
ffxConsoleHandler = logging.StreamHandler()
|
||||||
|
ffxConsoleHandler.setLevel(consoleLogVerbosity)
|
||||||
|
|
||||||
|
fileFormatter = logging.Formatter(
|
||||||
|
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||||
|
ffxFileHandler.setFormatter(fileFormatter)
|
||||||
|
consoleFormatter = logging.Formatter(
|
||||||
|
'%(message)s')
|
||||||
|
ffxConsoleHandler.setFormatter(consoleFormatter)
|
||||||
|
|
||||||
|
ctx.obj['logger'].addHandler(ffxConsoleHandler)
|
||||||
|
ctx.obj['logger'].addHandler(ffxFileHandler)
|
||||||
|
|
||||||
|
|
||||||
# Define a subcommand
|
# Define a subcommand
|
||||||
@@ -51,8 +102,6 @@ def help():
|
|||||||
click.echo(f"Usage: ffx [input file] [output file] [vp9|av1] [q=[nn[,nn,...]]] [p=nn] [a=nnn[k]] [ac3=nnn[k]] [dts=nnn[k]] [crop]")
|
click.echo(f"Usage: ffx [input file] [output file] [vp9|av1] [q=[nn[,nn,...]]] [p=nn] [a=nnn[k]] [ac3=nnn[k]] [dts=nnn[k]] [crop]")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ffx.command()
|
@ffx.command()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
@click.argument('filename', nargs=1)
|
@click.argument('filename', nargs=1)
|
||||||
@@ -106,16 +155,22 @@ def getUnmuxSequence(trackDescriptor: TrackDescriptor, sourcePath, targetPrefix,
|
|||||||
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
|
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
|
||||||
@click.option("-o", "--output-directory", type=str, default='')
|
@click.option("-o", "--output-directory", type=str, default='')
|
||||||
@click.option("-s", "--subtitles-only", is_flag=True, default=False)
|
@click.option("-s", "--subtitles-only", is_flag=True, default=False)
|
||||||
@click.option("--dry-run", is_flag=True, default=False)
|
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
|
||||||
|
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
|
||||||
def unmux(ctx,
|
def unmux(ctx,
|
||||||
paths,
|
paths,
|
||||||
label,
|
label,
|
||||||
output_directory,
|
output_directory,
|
||||||
subtitles_only,
|
subtitles_only,
|
||||||
dry_run):
|
nice,
|
||||||
|
cpu):
|
||||||
|
|
||||||
existingSourcePaths = [p for p in paths if os.path.isfile(p)]
|
existingSourcePaths = [p for p in paths if os.path.isfile(p)]
|
||||||
click.echo(f"\nUnmuxing {len(existingSourcePaths)} files")
|
ctx.obj['logger'].debug(f"\nUnmuxing {len(existingSourcePaths)} files")
|
||||||
|
|
||||||
|
ctx.obj['resource_limits'] = {}
|
||||||
|
ctx.obj['resource_limits']['niceness'] = nice
|
||||||
|
ctx.obj['resource_limits']['cpu_percent'] = cpu
|
||||||
|
|
||||||
for sourcePath in existingSourcePaths:
|
for sourcePath in existingSourcePaths:
|
||||||
|
|
||||||
@@ -133,10 +188,10 @@ def unmux(ctx,
|
|||||||
targetIndicator = f"_S{season}E{episode}" if label and season != -1 and episode != -1 else ''
|
targetIndicator = f"_S{season}E{episode}" if label and season != -1 and episode != -1 else ''
|
||||||
|
|
||||||
if label and not targetIndicator:
|
if label and not targetIndicator:
|
||||||
click.echo(f"Skipping file {fp.getFilename()}: Label set but no indicator recognized")
|
ctx.obj['logger'].warning(f"Skipping file {fp.getFilename()}: Label set but no indicator recognized")
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
click.echo(f"\nUnmuxing file {fp.getFilename()}\n")
|
ctx.obj['logger'].debug(f"\nUnmuxing file {fp.getFilename()}\n")
|
||||||
|
|
||||||
for trackDescriptor in sourceMediaDescriptor.getAllTrackDescriptors():
|
for trackDescriptor in sourceMediaDescriptor.getAllTrackDescriptors():
|
||||||
|
|
||||||
@@ -148,15 +203,15 @@ def unmux(ctx,
|
|||||||
unmuxSequence = getUnmuxSequence(trackDescriptor, sourcePath, targetPrefix, targetDirectory = output_directory)
|
unmuxSequence = getUnmuxSequence(trackDescriptor, sourcePath, targetPrefix, targetDirectory = output_directory)
|
||||||
|
|
||||||
if unmuxSequence:
|
if unmuxSequence:
|
||||||
if not dry_run:
|
if not ctx.obj['dry_run']:
|
||||||
click.echo(f"Executing unmuxing sequence: {' '.join(unmuxSequence)}")
|
ctx.obj['logger'].debug(f"Executing unmuxing sequence")
|
||||||
out, err, rc = executeProcess(unmuxSequence)
|
out, err, rc = executeProcess(unmuxSequence, context = ctx.obj)
|
||||||
if rc:
|
if rc:
|
||||||
click.echo(f"Unmuxing of stream {trackDescriptor.getIndex()} failed with error ({rc}) {err}")
|
ctx.obj['logger'].error(f"Unmuxing of stream {trackDescriptor.getIndex()} failed with error ({rc}) {err}")
|
||||||
else:
|
else:
|
||||||
click.echo(f"Skipping stream with unknown codec {trackDescriptor.getCodec()}")
|
ctx.obj['logger'].warning(f"Skipping stream with unknown codec {trackDescriptor.getCodec()}")
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
click.echo(f"Skipping File {sourcePath} ({ex})")
|
ctx.obj['logger'].warning(f"Skipping File {sourcePath} ({ex})")
|
||||||
|
|
||||||
|
|
||||||
@ffx.command()
|
@ffx.command()
|
||||||
@@ -170,6 +225,49 @@ def shows(ctx):
|
|||||||
app.run()
|
app.run()
|
||||||
|
|
||||||
|
|
||||||
|
def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
|
||||||
|
|
||||||
|
# Check for multiple default or forced dispositions if not set by user input or database requirements
|
||||||
|
#
|
||||||
|
# Query user for the correct sub indices, then configure flags in track descriptors associated with media descriptor accordingly.
|
||||||
|
# The correct tokens should then be created by
|
||||||
|
if len([v for v in mediaDescriptor.getVideoTracks() if v.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
||||||
|
if context['no_prompt']:
|
||||||
|
raise click.ClickException('More than one default video stream detected and no prompt set')
|
||||||
|
defaultVideoTrackSubIndex = click.prompt("More than one default video stream detected! Please select stream", type=int)
|
||||||
|
mediaDescriptor.setDefaultSubTrack(TrackType.VIDEO, defaultVideoTrackSubIndex)
|
||||||
|
|
||||||
|
if len([v for v in mediaDescriptor.getVideoTracks() if v.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
||||||
|
if context['no_prompt']:
|
||||||
|
raise click.ClickException('More than one forced video stream detected and no prompt set')
|
||||||
|
forcedVideoTrackSubIndex = click.prompt("More than one forced video stream detected! Please select stream", type=int)
|
||||||
|
mediaDescriptor.setForcedSubTrack(TrackType.VIDEO, forcedVideoTrackSubIndex)
|
||||||
|
|
||||||
|
if len([a for a in mediaDescriptor.getAudioTracks() if a.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
||||||
|
if context['no_prompt']:
|
||||||
|
raise click.ClickException('More than one default audio stream detected and no prompt set')
|
||||||
|
defaultAudioTrackSubIndex = click.prompt("More than one default audio stream detected! Please select stream", type=int)
|
||||||
|
mediaDescriptor.setDefaultSubTrack(TrackType.AUDIO, defaultAudioTrackSubIndex)
|
||||||
|
|
||||||
|
if len([a for a in mediaDescriptor.getAudioTracks() if a.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
||||||
|
if context['no_prompt']:
|
||||||
|
raise click.ClickException('More than one forced audio stream detected and no prompt set')
|
||||||
|
forcedAudioTrackSubIndex = click.prompt("More than one forced audio stream detected! Please select stream", type=int)
|
||||||
|
mediaDescriptor.setForcedSubTrack(TrackType.AUDIO, forcedAudioTrackSubIndex)
|
||||||
|
|
||||||
|
if len([s for s in mediaDescriptor.getSubtitleTracks() if s.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
||||||
|
if context['no_prompt']:
|
||||||
|
raise click.ClickException('More than one default subtitle stream detected and no prompt set')
|
||||||
|
defaultSubtitleTrackSubIndex = click.prompt("More than one default subtitle stream detected! Please select stream", type=int)
|
||||||
|
mediaDescriptor.setDefaultSubTrack(TrackType.SUBTITLE, defaultSubtitleTrackSubIndex)
|
||||||
|
|
||||||
|
if len([s for s in mediaDescriptor.getSubtitleTracks() if s.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
||||||
|
if context['no_prompt']:
|
||||||
|
raise click.ClickException('More than one forced subtitle stream detected and no prompt set')
|
||||||
|
forcedSubtitleTrackSubIndex = click.prompt("More than one forced subtitle stream detected! Please select stream", type=int)
|
||||||
|
mediaDescriptor.setForcedSubTrack(TrackType.SUBTITLE, forcedSubtitleTrackSubIndex)
|
||||||
|
|
||||||
|
|
||||||
@ffx.command()
|
@ffx.command()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
|
|
||||||
@@ -177,45 +275,57 @@ def shows(ctx):
|
|||||||
|
|
||||||
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
|
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
|
||||||
|
|
||||||
@click.option('-v', '--video-encoder', type=str, default=FfxController.DEFAULT_VIDEO_ENCODER, help=f"Target video encoder (vp9 or av1) default: {FfxController.DEFAULT_VIDEO_ENCODER}")
|
@click.option('-v', '--video-encoder', type=str, default=FfxController.DEFAULT_VIDEO_ENCODER, help=f"Target video encoder (vp9 or av1)", show_default=True)
|
||||||
|
|
||||||
@click.option('-q', '--quality', type=str, default=FfxController.DEFAULT_QUALITY, help=f"Quality settings to be used with VP9 encoder (default: {FfxController.DEFAULT_QUALITY})")
|
@click.option('-q', '--quality', type=str, default=DEFAULT_QUALITY, help=f"Quality settings to be used with VP9 encoder", show_default=True)
|
||||||
@click.option('-p', '--preset', type=str, default=FfxController.DEFAULT_AV1_PRESET, help=f"Quality preset to be used with AV1 encoder (default: {FfxController.DEFAULT_AV1_PRESET})")
|
@click.option('-p', '--preset', type=str, default=DEFAULT_AV1_PRESET, help=f"Quality preset to be used with AV1 encoder", show_default=True)
|
||||||
|
|
||||||
@click.option('-a', '--stereo-bitrate', type=int, default=FfxController.DEFAULT_STEREO_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode stereo audio streams (default: {FfxController.DEFAULT_STEREO_BANDWIDTH})")
|
@click.option('-a', '--stereo-bitrate', type=int, default=DEFAULT_STEREO_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode stereo audio streams", show_default=True)
|
||||||
@click.option('-ac3', '--ac3-bitrate', type=int, default=FfxController.DEFAULT_AC3_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode 5.1 audio streams (default: {FfxController.DEFAULT_AC3_BANDWIDTH})")
|
@click.option('--ac3', type=int, default=DEFAULT_AC3_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode 5.1 audio streams", show_default=True)
|
||||||
@click.option('-dts', '--dts-bitrate', type=int, default=FfxController.DEFAULT_DTS_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode 6.1 audio streams (default: {FfxController.DEFAULT_DTS_BANDWIDTH})")
|
@click.option('--dts', type=int, default=DEFAULT_DTS_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode 6.1 audio streams", show_default=True)
|
||||||
|
|
||||||
@click.option('-sd', '--subtitle-directory', type=str, default='', help='Load subtitles from here')
|
@click.option('--subtitle-directory', type=str, default='', help='Load subtitles from here')
|
||||||
@click.option('-sp', '--subtitle-prefix', type=str, default='', help='Subtitle filename prefix')
|
@click.option('--subtitle-prefix', type=str, default='', help='Subtitle filename prefix')
|
||||||
|
|
||||||
|
@click.option('--language', type=str, multiple=True, help='Set stream language. Use format <stream index>:<3 letter iso code>')
|
||||||
|
@click.option('--title', type=str, multiple=True, help='Set stream title. Use format <stream index>:<title>')
|
||||||
|
|
||||||
@click.option('-as', '--audio-language', type=str, multiple=True, help='Audio stream language(s)')
|
@click.option('--default-video', type=int, default=-1, help='Index of default video stream')
|
||||||
@click.option('-at', '--audio-title', type=str, multiple=True, help='Audio stream title(s)')
|
@click.option('--forced-video', type=int, default=-1, help='Index of forced video stream')
|
||||||
|
@click.option('--default-audio', type=int, default=-1, help='Index of default audio stream')
|
||||||
@click.option('-da', '--default-audio', type=int, default=-1, help='Index of default audio stream')
|
@click.option('--forced-audio', type=int, default=-1, help='Index of forced audio stream')
|
||||||
@click.option('-da', '--forced-audio', type=int, default=-1, help='Index of forced audio stream')
|
@click.option('--default-subtitle', type=int, default=-1, help='Index of default subtitle stream')
|
||||||
|
@click.option('--forced-subtitle', type=int, default=-1, help='Index of forced subtitle stream')
|
||||||
|
|
||||||
@click.option('-ss', '--subtitle-language', type=str, multiple=True, help='Subtitle stream language(s)')
|
|
||||||
@click.option('-st', '--subtitle-title', type=str, multiple=True, help='Subtitle stream title(s)')
|
|
||||||
|
|
||||||
@click.option('-ds', '--default-subtitle', type=int, default=-1, help='Index of default subtitle stream')
|
|
||||||
@click.option('-fs', '--forced-subtitle', type=int, default=-1, help='Index of forced subtitle stream') # (including default audio stream tag)
|
|
||||||
|
|
||||||
|
@click.option('--rearrange-streams', type=str, default="", help='Rearrange output streams order. Use format comma separated integers')
|
||||||
|
|
||||||
@click.option("--crop", is_flag=False, flag_value="default", default="none")
|
@click.option("--crop", is_flag=False, flag_value="default", default="none")
|
||||||
|
|
||||||
@click.option("-o", "--output-directory", type=str, default='')
|
@click.option("--output-directory", type=str, default='')
|
||||||
|
|
||||||
@click.option("-d", "--denoise", is_flag=True, default=False)
|
@click.option("--denoise", is_flag=False, flag_value="default", default="none")
|
||||||
|
@click.option("--denoise-use-hw", is_flag=True, default=False)
|
||||||
|
@click.option('--denoise-strength', type=str, default='', help='Denoising strength, more blurring vs more details.')
|
||||||
|
@click.option('--denoise-patch-size', type=str, default='', help='Subimage size to apply filtering on luminosity plane. Reduces broader noise patterns but costly.')
|
||||||
|
@click.option('--denoise-chroma-patch-size', type=str, default='', help='Subimage size to apply filtering on chroma planes.')
|
||||||
|
@click.option('--denoise-research-window', type=str, default='', help='Range to search for comparable patches on luminosity plane. Better filtering but costly.')
|
||||||
|
@click.option('--denoise-chroma-research-window', type=str, default='', help='Range to search for comparable patches on chroma planes.')
|
||||||
|
|
||||||
@click.option("-t", "--no-tmdb", is_flag=True, default=False)
|
@click.option('--show', type=int, default=-1, help='Set TMDB show identifier')
|
||||||
@click.option("-j", "--no-jellyfin", is_flag=True, default=False)
|
@click.option('--season', type=int, default=-1, help='Set season of show')
|
||||||
@click.option("-np", "--no-pattern", is_flag=True, default=False)
|
@click.option('--episode', type=int, default=-1, help='Set episode of show')
|
||||||
|
|
||||||
@click.option("--dry-run", is_flag=True, default=False)
|
@click.option("--no-tmdb", is_flag=True, default=False)
|
||||||
|
@click.option("--no-pattern", is_flag=True, default=False)
|
||||||
|
|
||||||
|
@click.option("--dont-pass-dispositions", is_flag=True, default=False)
|
||||||
|
|
||||||
|
@click.option("--no-prompt", is_flag=True, default=False)
|
||||||
|
@click.option("--no-signature", is_flag=True, default=False)
|
||||||
|
@click.option("--keep-mkvmerge-metadata", is_flag=True, default=False)
|
||||||
|
|
||||||
|
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
|
||||||
|
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
|
||||||
|
|
||||||
def convert(ctx,
|
def convert(ctx,
|
||||||
paths,
|
paths,
|
||||||
@@ -224,29 +334,49 @@ def convert(ctx,
|
|||||||
quality,
|
quality,
|
||||||
preset,
|
preset,
|
||||||
stereo_bitrate,
|
stereo_bitrate,
|
||||||
ac3_bitrate,
|
ac3,
|
||||||
dts_bitrate,
|
dts,
|
||||||
|
|
||||||
subtitle_directory,
|
subtitle_directory,
|
||||||
subtitle_prefix,
|
subtitle_prefix,
|
||||||
|
|
||||||
audio_language,
|
language,
|
||||||
audio_title,
|
title,
|
||||||
|
|
||||||
|
default_video,
|
||||||
|
forced_video,
|
||||||
default_audio,
|
default_audio,
|
||||||
forced_audio,
|
forced_audio,
|
||||||
|
|
||||||
subtitle_language,
|
|
||||||
subtitle_title,
|
|
||||||
default_subtitle,
|
default_subtitle,
|
||||||
forced_subtitle,
|
forced_subtitle,
|
||||||
|
|
||||||
|
rearrange_streams,
|
||||||
|
|
||||||
crop,
|
crop,
|
||||||
output_directory,
|
output_directory,
|
||||||
|
|
||||||
denoise,
|
denoise,
|
||||||
|
denoise_use_hw,
|
||||||
|
denoise_strength,
|
||||||
|
denoise_patch_size,
|
||||||
|
denoise_chroma_patch_size,
|
||||||
|
denoise_research_window,
|
||||||
|
denoise_chroma_research_window,
|
||||||
|
|
||||||
|
show,
|
||||||
|
season,
|
||||||
|
episode,
|
||||||
|
|
||||||
no_tmdb,
|
no_tmdb,
|
||||||
no_jellyfin,
|
# no_jellyfin,
|
||||||
no_pattern,
|
no_pattern,
|
||||||
dry_run):
|
dont_pass_dispositions,
|
||||||
|
no_prompt,
|
||||||
|
no_signature,
|
||||||
|
keep_mkvmerge_metadata,
|
||||||
|
|
||||||
|
nice,
|
||||||
|
cpu):
|
||||||
"""Batch conversion of audiovideo files in format suitable for web playback, e.g. jellyfin
|
"""Batch conversion of audiovideo files in format suitable for web playback, e.g. jellyfin
|
||||||
|
|
||||||
Files found under PATHS will be converted according to parameters.
|
Files found under PATHS will be converted according to parameters.
|
||||||
@@ -258,34 +388,123 @@ def convert(ctx,
|
|||||||
|
|
||||||
context = ctx.obj
|
context = ctx.obj
|
||||||
|
|
||||||
context['dry_run'] = dry_run
|
|
||||||
|
|
||||||
context['video_encoder'] = VideoEncoder.fromLabel(video_encoder)
|
context['video_encoder'] = VideoEncoder.fromLabel(video_encoder)
|
||||||
|
|
||||||
context['use_jellyfin'] = not no_jellyfin
|
targetFormat = FfxController.DEFAULT_FILE_FORMAT
|
||||||
|
targetExtension = FfxController.DEFAULT_FILE_EXTENSION
|
||||||
|
|
||||||
|
|
||||||
|
#TODO: #407 Without effect -> remove
|
||||||
|
context['use_jellyfin'] = False
|
||||||
|
|
||||||
context['use_tmdb'] = not no_tmdb
|
context['use_tmdb'] = not no_tmdb
|
||||||
context['use_pattern'] = not no_pattern
|
context['use_pattern'] = not no_pattern
|
||||||
|
context['no_prompt'] = no_prompt
|
||||||
|
context['no_signature'] = no_signature
|
||||||
|
context['keep_mkvmerge_metadata'] = keep_mkvmerge_metadata
|
||||||
|
|
||||||
|
|
||||||
|
context['resource_limits'] = {}
|
||||||
|
context['resource_limits']['niceness'] = nice
|
||||||
|
context['resource_limits']['cpu_percent'] = cpu
|
||||||
|
|
||||||
|
|
||||||
|
context['denoiser'] = NlmeansController(parameters = denoise,
|
||||||
|
strength = denoise_strength,
|
||||||
|
patchSize = denoise_patch_size,
|
||||||
|
chromaPatchSize = denoise_chroma_patch_size,
|
||||||
|
researchWindow = denoise_research_window,
|
||||||
|
chromaResearchWindow = denoise_chroma_research_window,
|
||||||
|
useHardware = denoise_use_hw)
|
||||||
|
|
||||||
context['import_subtitles'] = (subtitle_directory and subtitle_prefix)
|
context['import_subtitles'] = (subtitle_directory and subtitle_prefix)
|
||||||
if context['import_subtitles']:
|
if context['import_subtitles']:
|
||||||
context['subtitle_directory'] = subtitle_directory
|
context['subtitle_directory'] = subtitle_directory
|
||||||
context['subtitle_prefix'] = subtitle_prefix
|
context['subtitle_prefix'] = subtitle_prefix
|
||||||
|
|
||||||
# click.echo(f"\nVideo encoder: {video_encoder}")
|
|
||||||
|
existingSourcePaths = [p for p in paths if os.path.isfile(p) and p.split('.')[-1] in FfxController.INPUT_FILE_EXTENSIONS]
|
||||||
|
|
||||||
|
|
||||||
|
# CLI Overrides
|
||||||
|
|
||||||
|
cliOverrides = {}
|
||||||
|
|
||||||
|
if language:
|
||||||
|
cliOverrides['languages'] = {}
|
||||||
|
for overLang in language:
|
||||||
|
olTokens = overLang.split(':')
|
||||||
|
if len(olTokens) == 2:
|
||||||
|
try:
|
||||||
|
cliOverrides['languages'][int(olTokens[0])] = olTokens[1]
|
||||||
|
except ValueError:
|
||||||
|
ctx.obj['logger'].warning(f"Ignoring non-integer language index {olTokens[0]}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if title:
|
||||||
|
cliOverrides['titles'] = {}
|
||||||
|
for overTitle in title:
|
||||||
|
otTokens = overTitle.split(':')
|
||||||
|
if len(otTokens) == 2:
|
||||||
|
try:
|
||||||
|
cliOverrides['titles'][int(otTokens[0])] = otTokens[1]
|
||||||
|
except ValueError:
|
||||||
|
ctx.obj['logger'].warning(f"Ignoring non-integer title index {otTokens[0]}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if default_video != -1:
|
||||||
|
cliOverrides['default_video'] = default_video
|
||||||
|
if forced_video != -1:
|
||||||
|
cliOverrides['forced_video'] = forced_video
|
||||||
|
if default_audio != -1:
|
||||||
|
cliOverrides['default_audio'] = default_audio
|
||||||
|
if forced_audio != -1:
|
||||||
|
cliOverrides['forced_audio'] = forced_audio
|
||||||
|
if default_subtitle != -1:
|
||||||
|
cliOverrides['default_subtitle'] = default_subtitle
|
||||||
|
if forced_subtitle != -1:
|
||||||
|
cliOverrides['forced_subtitle'] = forced_subtitle
|
||||||
|
|
||||||
|
if show != -1 or season != -1 or episode != -1:
|
||||||
|
if len(existingSourcePaths) > 1:
|
||||||
|
context['logger'].warning(f"Ignoring TMDB show, season, episode overrides, not supported for multiple source files")
|
||||||
|
else:
|
||||||
|
cliOverrides['tmdb'] = {}
|
||||||
|
if show != -1:
|
||||||
|
cliOverrides['tmdb']['show'] = show
|
||||||
|
if season != -1:
|
||||||
|
cliOverrides['tmdb']['season'] = season
|
||||||
|
if episode != -1:
|
||||||
|
cliOverrides['tmdb']['episode'] = episode
|
||||||
|
|
||||||
|
if cliOverrides:
|
||||||
|
context['overrides'] = cliOverrides
|
||||||
|
|
||||||
|
|
||||||
|
if rearrange_streams:
|
||||||
|
try:
|
||||||
|
cliOverrides['stream_order'] = [int(si) for si in rearrange_streams.split(",")]
|
||||||
|
except ValueError as ve:
|
||||||
|
errorMessage = "Non-integer in rearrange stream parameter"
|
||||||
|
ctx.obj['logger'].error(errorMessage)
|
||||||
|
raise click.Abort()
|
||||||
|
|
||||||
|
|
||||||
|
ctx.obj['logger'].debug(f"\nVideo encoder: {video_encoder}")
|
||||||
|
|
||||||
qualityTokens = quality.split(',')
|
qualityTokens = quality.split(',')
|
||||||
q_list = [q for q in qualityTokens if q.isnumeric()]
|
q_list = [q for q in qualityTokens if q.isnumeric()]
|
||||||
|
|
||||||
click.echo(f"Qualities: {q_list}")
|
ctx.obj['logger'].debug(f"Qualities: {q_list}")
|
||||||
|
|
||||||
context['bitrates'] = {}
|
context['bitrates'] = {}
|
||||||
context['bitrates']['stereo'] = str(stereo_bitrate) if str(stereo_bitrate).endswith('k') else f"{stereo_bitrate}k"
|
context['bitrates']['stereo'] = str(stereo_bitrate) if str(stereo_bitrate).endswith('k') else f"{stereo_bitrate}k"
|
||||||
context['bitrates']['ac3'] = str(ac3_bitrate) if str(ac3_bitrate).endswith('k') else f"{ac3_bitrate}k"
|
context['bitrates']['ac3'] = str(ac3) if str(ac3).endswith('k') else f"{ac3}k"
|
||||||
context['bitrates']['dts'] = str(dts_bitrate) if str(dts_bitrate).endswith('k') else f"{dts_bitrate}k"
|
context['bitrates']['dts'] = str(dts) if str(dts).endswith('k') else f"{dts}k"
|
||||||
|
|
||||||
click.echo(f"Stereo bitrate: {context['bitrates']['stereo']}")
|
ctx.obj['logger'].debug(f"Stereo bitrate: {context['bitrates']['stereo']}")
|
||||||
click.echo(f"AC3 bitrate: {context['bitrates']['ac3']}")
|
ctx.obj['logger'].debug(f"AC3 bitrate: {context['bitrates']['ac3']}")
|
||||||
click.echo(f"DTS bitrate: {context['bitrates']['dts']}")
|
ctx.obj['logger'].debug(f"DTS bitrate: {context['bitrates']['dts']}")
|
||||||
|
|
||||||
|
|
||||||
# Process crop parameters
|
# Process crop parameters
|
||||||
@@ -295,13 +514,14 @@ def convert(ctx,
|
|||||||
if cTokens and len(cTokens) == 2:
|
if cTokens and len(cTokens) == 2:
|
||||||
context['crop_start'] = int(cTokens[0])
|
context['crop_start'] = int(cTokens[0])
|
||||||
context['crop_length'] = int(cTokens[1])
|
context['crop_length'] = int(cTokens[1])
|
||||||
click.echo(f"Crop start={context['crop_start']} length={context['crop_length']}")
|
ctx.obj['logger'].debug(f"Crop start={context['crop_start']} length={context['crop_length']}")
|
||||||
|
|
||||||
|
|
||||||
tc = TmdbController() if context['use_tmdb'] else None
|
tc = TmdbController() if context['use_tmdb'] else None
|
||||||
|
|
||||||
existingSourcePaths = [p for p in paths if os.path.isfile(p) and p.split('.')[-1] in FfxController.INPUT_FILE_EXTENSIONS]
|
|
||||||
click.echo(f"\nRunning {len(existingSourcePaths) * len(q_list)} jobs")
|
ctx.obj['logger'].info(f"\nRunning {len(existingSourcePaths) * len(q_list)} jobs")
|
||||||
|
|
||||||
jobIndex = 0
|
jobIndex = 0
|
||||||
|
|
||||||
for sourcePath in existingSourcePaths:
|
for sourcePath in existingSourcePaths:
|
||||||
@@ -314,150 +534,174 @@ def convert(ctx,
|
|||||||
sourceFileBasename = '.'.join(sourcePathTokens[:-1])
|
sourceFileBasename = '.'.join(sourcePathTokens[:-1])
|
||||||
sourceFilenameExtension = sourcePathTokens[-1]
|
sourceFilenameExtension = sourcePathTokens[-1]
|
||||||
|
|
||||||
click.echo(f"\nProcessing file {sourcePath}")
|
ctx.obj['logger'].info(f"\nProcessing file {sourcePath}")
|
||||||
|
|
||||||
|
targetSuffices = {}
|
||||||
|
|
||||||
|
|
||||||
mediaFileProperties = FileProperties(context, sourceFilename)
|
mediaFileProperties = FileProperties(context, sourceFilename)
|
||||||
|
|
||||||
|
|
||||||
|
#HINT: -1 if not set
|
||||||
|
if 'tmdb' in cliOverrides.keys() and 'season' in cliOverrides['tmdb']:
|
||||||
|
showSeason = cliOverrides['tmdb']['season']
|
||||||
|
else:
|
||||||
|
showSeason = mediaFileProperties.getSeason()
|
||||||
|
|
||||||
|
if 'tmdb' in cliOverrides.keys() and 'episode' in cliOverrides['tmdb']:
|
||||||
|
showEpisode = cliOverrides['tmdb']['episode']
|
||||||
|
else:
|
||||||
|
showEpisode = mediaFileProperties.getEpisode()
|
||||||
|
|
||||||
|
|
||||||
|
ctx.obj['logger'].debug(f"Season={showSeason} Episode={showEpisode}")
|
||||||
|
|
||||||
|
|
||||||
sourceMediaDescriptor = mediaFileProperties.getMediaDescriptor()
|
sourceMediaDescriptor = mediaFileProperties.getMediaDescriptor()
|
||||||
|
|
||||||
#HINT: This is None if the filename did not match anything in database
|
#HINT: This is None if the filename did not match anything in database
|
||||||
currentPattern = mediaFileProperties.getPattern() if context['use_pattern'] else None
|
currentPattern = mediaFileProperties.getPattern() if context['use_pattern'] else None
|
||||||
|
|
||||||
click.echo(f"Pattern matching: {'No' if currentPattern is None else 'Yes'}")
|
ctx.obj['logger'].debug(f"Pattern matching: {'No' if currentPattern is None else 'Yes'}")
|
||||||
|
|
||||||
fileBasename = ''
|
|
||||||
|
|
||||||
|
# Setup FfxController accordingly depending on pattern matching is enabled and a pattern was matched
|
||||||
if currentPattern is None:
|
if currentPattern is None:
|
||||||
|
|
||||||
# Case no pattern matching
|
checkUniqueDispositions(context, sourceMediaDescriptor)
|
||||||
|
currentShowDescriptor = None
|
||||||
# Check for multiple default or forced dispositions if not set by user input or database requirements
|
|
||||||
#
|
|
||||||
# Query user for the correct sub indices, then configure flags in track descriptors associated with media descriptor accordingly.
|
|
||||||
# The correct tokens should then be created by
|
|
||||||
if len([v for v in sourceMediaDescriptor.getVideoTracks() if v.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
|
||||||
defaultVideoTrackSubIndex = click.prompt("More than one default video stream detected! Please select stream", type=int)
|
|
||||||
sourceMediaDescriptor.setDefaultSubTrack(TrackType.VIDEO, defaultVideoTrackSubIndex)
|
|
||||||
|
|
||||||
if len([v for v in sourceMediaDescriptor.getVideoTracks() if v.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
|
||||||
forcedVideoTrackSubIndex = click.prompt("More than one forced video stream detected! Please select stream", type=int)
|
|
||||||
sourceMediaDescriptor.setForcedSubTrack(TrackType.VIDEO, forcedVideoTrackSubIndex)
|
|
||||||
|
|
||||||
if len([a for a in sourceMediaDescriptor.getAudioTracks() if a.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
|
||||||
defaultAudioTrackSubIndex = click.prompt("More than one default audio stream detected! Please select stream", type=int)
|
|
||||||
sourceMediaDescriptor.setDefaultSubTrack(TrackType.AUDIO, defaultAudioTrackSubIndex)
|
|
||||||
|
|
||||||
if len([a for a in sourceMediaDescriptor.getAudioTracks() if a.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
|
||||||
forcedAudioTrackSubIndex = click.prompt("More than one forced audio stream detected! Please select stream", type=int)
|
|
||||||
sourceMediaDescriptor.setForcedSubTrack(TrackType.AUDIO, forcedAudioTrackSubIndex)
|
|
||||||
|
|
||||||
if len([s for s in sourceMediaDescriptor.getSubtitleTracks() if s.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
|
||||||
defaultSubtitleTrackSubIndex = click.prompt("More than one default subtitle stream detected! Please select stream", type=int)
|
|
||||||
sourceMediaDescriptor.setDefaultSubTrack(TrackType.SUBTITLE, defaultSubtitleTrackSubIndex)
|
|
||||||
|
|
||||||
if len([s for s in sourceMediaDescriptor.getSubtitleTracks() if s.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
|
||||||
forcedSubtitleTrackSubIndex = click.prompt("More than one forced subtitle stream detected! Please select stream", type=int)
|
|
||||||
sourceMediaDescriptor.setForcedSubTrack(TrackType.SUBTITLE, forcedSubtitleTrackSubIndex)
|
|
||||||
|
|
||||||
|
|
||||||
if context['import_subtitles']:
|
if context['import_subtitles']:
|
||||||
sourceMediaDescriptor.importSubtitles(context['subtitle_directory'],
|
sourceMediaDescriptor.importSubtitles(context['subtitle_directory'],
|
||||||
context['subtitle_prefix'],
|
context['subtitle_prefix'],
|
||||||
mediaFileProperties.getSeason(),
|
showSeason,
|
||||||
mediaFileProperties.getEpisode())
|
showEpisode)
|
||||||
|
|
||||||
|
if cliOverrides:
|
||||||
|
sourceMediaDescriptor.applyOverrides(cliOverrides)
|
||||||
|
|
||||||
fc = FfxController(context, sourceMediaDescriptor)
|
fc = FfxController(context, sourceMediaDescriptor)
|
||||||
|
|
||||||
dispositionTokens = fc.generateDispositionTokens()
|
|
||||||
click.echo(f"Disposition Tokens: {dispositionTokens}")
|
|
||||||
|
|
||||||
audioTokens = fc.generateAudioEncodingTokens()
|
|
||||||
click.echo(f"Audio Tokens: {audioTokens}")
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
targetMediaDescriptor = currentPattern.getMediaDescriptor(ctx.obj)
|
||||||
# Case pattern matching
|
checkUniqueDispositions(context, targetMediaDescriptor)
|
||||||
|
currentShowDescriptor = currentPattern.getShowDescriptor(ctx.obj)
|
||||||
targetMediaDescriptor = currentPattern.getMediaDescriptor()
|
|
||||||
|
|
||||||
currentShowDescriptor = currentPattern.getShowDescriptor()
|
|
||||||
|
|
||||||
|
|
||||||
if context['use_tmdb']:
|
|
||||||
|
|
||||||
tmdbEpisodeResult = tc.queryEpisode(currentShowDescriptor.getId(), mediaFileProperties.getSeason(), mediaFileProperties.getEpisode())
|
|
||||||
|
|
||||||
if tmdbEpisodeResult:
|
|
||||||
fileBasename = tc.getEpisodeFileBasename(currentShowDescriptor.getFilenamePrefix(),
|
|
||||||
tmdbEpisodeResult['name'],
|
|
||||||
mediaFileProperties.getSeason(),
|
|
||||||
mediaFileProperties.getEpisode(),
|
|
||||||
currentShowDescriptor.getIndexSeasonDigits(),
|
|
||||||
currentShowDescriptor.getIndexEpisodeDigits(),
|
|
||||||
currentShowDescriptor.getIndicatorSeasonDigits(),
|
|
||||||
currentShowDescriptor.getIndicatorEpisodeDigits())
|
|
||||||
else:
|
|
||||||
fileBasename = currentShowDescriptor.getFilenamePrefix()
|
|
||||||
|
|
||||||
click.echo(f"fileBasename={fileBasename}")
|
|
||||||
|
|
||||||
if context['import_subtitles']:
|
if context['import_subtitles']:
|
||||||
targetMediaDescriptor.importSubtitles(context['subtitle_directory'],
|
targetMediaDescriptor.importSubtitles(context['subtitle_directory'],
|
||||||
context['subtitle_prefix'],
|
context['subtitle_prefix'],
|
||||||
mediaFileProperties.getSeason(),
|
showSeason,
|
||||||
mediaFileProperties.getEpisode())
|
showEpisode)
|
||||||
|
|
||||||
# raise click.ClickException(f"tmd subindices: {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
||||||
# click.echo(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
|
||||||
|
|
||||||
if context['use_jellyfin']:
|
if cliOverrides:
|
||||||
# Reorder subtracks in types with default the last, then make subindices flat again
|
targetMediaDescriptor.applyOverrides(cliOverrides)
|
||||||
targetMediaDescriptor.applyJellyfinOrder()
|
|
||||||
|
|
||||||
# click.echo(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
||||||
# raise click.Abort
|
|
||||||
|
|
||||||
click.echo(f"Input mapping tokens (2nd pass): {targetMediaDescriptor.getInputMappingTokens()}")
|
ctx.obj['logger'].debug(f"Input mapping tokens (2nd pass): {targetMediaDescriptor.getInputMappingTokens()}")
|
||||||
|
|
||||||
fc = FfxController(context, targetMediaDescriptor, sourceMediaDescriptor)
|
fc = FfxController(context, targetMediaDescriptor, sourceMediaDescriptor)
|
||||||
|
|
||||||
mappingTokens = fc.generateMetadataTokens()
|
|
||||||
click.echo(f"Metadata Tokens: {mappingTokens}")
|
|
||||||
|
|
||||||
dispositionTokens = fc.generateDispositionTokens()
|
indexSeasonDigits = currentShowDescriptor.getIndexSeasonDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDEX_SEASON_DIGITS
|
||||||
click.echo(f"Disposition Tokens: {dispositionTokens}")
|
indexEpisodeDigits = currentShowDescriptor.getIndexEpisodeDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDEX_EPISODE_DIGITS
|
||||||
|
indicatorSeasonDigits = currentShowDescriptor.getIndicatorSeasonDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDICATOR_SEASON_DIGITS
|
||||||
|
indicatorEpisodeDigits = currentShowDescriptor.getIndicatorEpisodeDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDICATOR_EPISODE_DIGITS
|
||||||
|
|
||||||
audioTokens = fc.generateAudioEncodingTokens()
|
|
||||||
click.echo(f"Audio Tokens: {audioTokens}")
|
|
||||||
|
|
||||||
click.echo(f"Season={mediaFileProperties.getSeason()} Episode={mediaFileProperties.getEpisode()}")
|
# Assemble target filename accordingly depending on TMDB lookup is enabled
|
||||||
|
#HINT: -1 if not set
|
||||||
|
showId = cliOverrides['tmdb']['show'] if 'tmdb' in cliOverrides.keys() and 'show' in cliOverrides['tmdb'] else (-1 if currentShowDescriptor is None else currentShowDescriptor.getId())
|
||||||
|
|
||||||
|
if context['use_tmdb'] and showId != -1 and showSeason != -1 and showEpisode != -1:
|
||||||
|
|
||||||
|
ctx.obj['logger'].debug(f"Querying TMDB for show_id={showId} season={showSeason} episode{showEpisode}")
|
||||||
|
|
||||||
|
if currentPattern is None:
|
||||||
|
sName, showYear = tc.getShowNameAndYear(showId)
|
||||||
|
showName = filterFilename(sName)
|
||||||
|
showFilenamePrefix = f"{showName} ({str(showYear)})"
|
||||||
|
else:
|
||||||
|
showFilenamePrefix = currentShowDescriptor.getFilenamePrefix()
|
||||||
|
|
||||||
|
tmdbEpisodeResult = tc.queryEpisode(showId, showSeason, showEpisode)
|
||||||
|
|
||||||
|
ctx.obj['logger'].debug(f"tmdbEpisodeResult={tmdbEpisodeResult}")
|
||||||
|
|
||||||
|
if tmdbEpisodeResult:
|
||||||
|
filteredEpisodeName = filterFilename(tmdbEpisodeResult['name'])
|
||||||
|
sourceFileBasename = TmdbController.getEpisodeFileBasename(showFilenamePrefix,
|
||||||
|
filteredEpisodeName,
|
||||||
|
showSeason,
|
||||||
|
showEpisode,
|
||||||
|
indexSeasonDigits,
|
||||||
|
indexEpisodeDigits,
|
||||||
|
indicatorSeasonDigits,
|
||||||
|
indicatorEpisodeDigits)
|
||||||
|
|
||||||
|
|
||||||
|
if label:
|
||||||
|
if showSeason > -1 and showEpisode > -1:
|
||||||
|
targetSuffices['se'] = f"S{showSeason:0{indicatorSeasonDigits}d}E{showEpisode:0{indicatorEpisodeDigits}d}"
|
||||||
|
elif showEpisode > -1:
|
||||||
|
targetSuffices['se'] = f"E{showEpisode:0{indicatorEpisodeDigits}d}"
|
||||||
|
else:
|
||||||
|
if 'se' in targetSuffices.keys():
|
||||||
|
del targetSuffices['se']
|
||||||
|
|
||||||
|
|
||||||
|
ctx.obj['logger'].debug(f"fileBasename={sourceFileBasename}")
|
||||||
|
|
||||||
|
|
||||||
for q in q_list:
|
for q in q_list:
|
||||||
|
|
||||||
click.echo(f"\nRunning job {jobIndex} file={sourcePath} q={q}")
|
if len(q_list) > 1:
|
||||||
|
targetSuffices['q'] = f"q{q}"
|
||||||
|
|
||||||
|
ctx.obj['logger'].debug(f"\nRunning job {jobIndex} file={sourcePath} q={q}")
|
||||||
jobIndex += 1
|
jobIndex += 1
|
||||||
|
|
||||||
extra = ['ffx'] if sourceFilenameExtension == FfxController.DEFAULT_FILE_EXTENSION else []
|
ctx.obj['logger'].debug(f"label={label if label else 'Falsy'}")
|
||||||
|
ctx.obj['logger'].debug(f"sourceFileBasename={sourceFileBasename}")
|
||||||
|
|
||||||
targetFilename = (fileBasename if context['use_tmdb']
|
# targetFileBasename = mediaFileProperties.assembleTargetFileBasename(label,
|
||||||
else mediaFileProperties.assembleTargetFileBasename(label if label else fileBasename,
|
# q if len(q_list) > 1 else -1,
|
||||||
q if len(q_list) > 1 else -1,
|
#
|
||||||
extraTokens = extra))
|
targetFileBasename = sourceFileBasename if context['use_tmdb'] and not label else label
|
||||||
|
|
||||||
|
|
||||||
|
targetFilenameTokens = [targetFileBasename]
|
||||||
|
|
||||||
|
if 'se' in targetSuffices.keys():
|
||||||
|
targetFilenameTokens += [targetSuffices['se']]
|
||||||
|
|
||||||
|
if 'q' in targetSuffices.keys():
|
||||||
|
targetFilenameTokens += [targetSuffices['q']]
|
||||||
|
|
||||||
|
|
||||||
|
#TODO #387
|
||||||
|
# targetFilename = ((f"{sourceFileBasename}_q{q}" if len(q_list) > 1 else sourceFileBasename)
|
||||||
|
# if context['use_tmdb'] else targetFileBasename)
|
||||||
|
|
||||||
|
targetFilename = f"{'_'.join(targetFilenameTokens)}.{targetExtension}"
|
||||||
|
|
||||||
targetPath = os.path.join(output_directory if output_directory else sourceDirectory, targetFilename)
|
targetPath = os.path.join(output_directory if output_directory else sourceDirectory, targetFilename)
|
||||||
|
|
||||||
|
#TODO: target extension anpassen
|
||||||
|
ctx.obj['logger'].info(f"Creating file {targetFilename}")
|
||||||
|
|
||||||
fc.runJob(sourcePath,
|
fc.runJob(sourcePath,
|
||||||
targetPath,
|
targetPath,
|
||||||
|
targetFormat,
|
||||||
context['video_encoder'],
|
context['video_encoder'],
|
||||||
q,
|
q,
|
||||||
preset,
|
preset)
|
||||||
denoise)
|
|
||||||
|
|
||||||
#TODO: click.confirm('Warning! This file is not compliant to the defined source schema! Do you want to continue?', abort=True)
|
#TODO: click.confirm('Warning! This file is not compliant to the defined source schema! Do you want to continue?', abort=True)
|
||||||
|
|
||||||
endTime = time.perf_counter()
|
endTime = time.perf_counter()
|
||||||
click.echo(f"\nDONE\nTime elapsed {endTime - startTime}")
|
ctx.obj['logger'].info(f"\nDONE\nTime elapsed {endTime - startTime}")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -28,7 +28,6 @@ class AudioLayout(Enum):
|
|||||||
|
|
||||||
return [a for a in AudioLayout if a.value['label'] == str(label)][0]
|
return [a for a in AudioLayout if a.value['label'] == str(label)][0]
|
||||||
except:
|
except:
|
||||||
raise click.ClickException('fromLabel failed')
|
|
||||||
return AudioLayout.LAYOUT_UNDEFINED
|
return AudioLayout.LAYOUT_UNDEFINED
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -36,7 +35,6 @@ class AudioLayout(Enum):
|
|||||||
try:
|
try:
|
||||||
return [a for a in AudioLayout if a.value['index'] == int(index)][0]
|
return [a for a in AudioLayout if a.value['index'] == int(index)][0]
|
||||||
except:
|
except:
|
||||||
raise click.ClickException('fromIndex failed')
|
|
||||||
return AudioLayout.LAYOUT_UNDEFINED
|
return AudioLayout.LAYOUT_UNDEFINED
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|||||||
52
bin/ffx/configuration_controller.py
Normal file
52
bin/ffx/configuration_controller.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
import os, json
|
||||||
|
|
||||||
|
class ConfigurationController():
|
||||||
|
|
||||||
|
CONFIG_FILENAME = 'ffx.json'
|
||||||
|
DATABASE_FILENAME = 'ffx.db'
|
||||||
|
LOG_FILENAME = 'ffx.log'
|
||||||
|
|
||||||
|
DATABASE_PATH_CONFIG_KEY = 'databasePath'
|
||||||
|
LOG_DIRECTORY_CONFIG_KEY = 'logDirectory'
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
|
||||||
|
self.__homeDir = os.path.expanduser("~")
|
||||||
|
self.__localVarDir = os.path.join(self.__homeDir, '.local', 'var')
|
||||||
|
self.__localEtcDir = os.path.join(self.__homeDir, '.local', 'etc')
|
||||||
|
|
||||||
|
self.__configurationData = {}
|
||||||
|
|
||||||
|
# .local/etc/ffx.json
|
||||||
|
self.__configFilePath = os.path.join(self.__localEtcDir, ConfigurationController.CONFIG_FILENAME)
|
||||||
|
if os.path.isfile(self.__configFilePath):
|
||||||
|
with open(self.__configFilePath, 'r') as configurationFile:
|
||||||
|
self.__configurationData = json.load(configurationFile)
|
||||||
|
|
||||||
|
if ConfigurationController.DATABASE_PATH_CONFIG_KEY in self.__configurationData.keys():
|
||||||
|
self.__databaseFilePath = self.__configurationData[ConfigurationController.DATABASE_PATH_CONFIG_KEY]
|
||||||
|
os.makedirs(os.path.dirname(self.__databaseFilePath), exist_ok=True)
|
||||||
|
else:
|
||||||
|
ffxVarDir = os.path.join(self.__localVarDir, 'ffx')
|
||||||
|
os.makedirs(ffxVarDir, exist_ok=True)
|
||||||
|
self.__databaseFilePath = os.path.join(ffxVarDir, ConfigurationController.DATABASE_FILENAME)
|
||||||
|
|
||||||
|
if ConfigurationController.LOG_DIRECTORY_CONFIG_KEY in self.__configurationData.keys():
|
||||||
|
self.__logDir = self.__configurationData[ConfigurationController.LOG_DIRECTORY_CONFIG_KEY]
|
||||||
|
else:
|
||||||
|
self.__logDir = os.path.join(self.__localVarDir, 'log')
|
||||||
|
os.makedirs(self.__logDir, exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
def getHomeDirectory(self):
|
||||||
|
return self.__homeDir
|
||||||
|
|
||||||
|
def getLogFilePath(self):
|
||||||
|
return os.path.join(self.__logDir, ConfigurationController.LOG_FILENAME)
|
||||||
|
|
||||||
|
def getDatabaseFilePath(self):
|
||||||
|
return self.__databaseFilePath
|
||||||
|
|
||||||
|
|
||||||
|
def getData(self):
|
||||||
|
return self.__configurationData
|
||||||
10
bin/ffx/constants.py
Normal file
10
bin/ffx/constants.py
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
DEFAULT_QUALITY = 32
|
||||||
|
DEFAULT_AV1_PRESET = 5
|
||||||
|
|
||||||
|
DEFAULT_STEREO_BANDWIDTH = "112"
|
||||||
|
DEFAULT_AC3_BANDWIDTH = "256"
|
||||||
|
DEFAULT_DTS_BANDWIDTH = "320"
|
||||||
|
DEFAULT_7_1_BANDWIDTH = "384"
|
||||||
|
|
||||||
|
DEFAULT_CROP_START = 60
|
||||||
|
DEFAULT_CROP_LENGTH = 180
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
from textual.app import App, ComposeResult
|
|
||||||
from textual.screen import Screen
|
|
||||||
from textual.widgets import Header, Footer, Placeholder, Label
|
|
||||||
|
|
||||||
class DashboardScreen(Screen):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
context = self.app.getContext()
|
|
||||||
context['dashboard'] = 'dashboard'
|
|
||||||
|
|
||||||
def compose(self) -> ComposeResult:
|
|
||||||
yield Header(show_clock=True)
|
|
||||||
yield Placeholder("Dashboard Screen")
|
|
||||||
yield Footer()
|
|
||||||
@@ -11,17 +11,21 @@ from ffx.model.media_tag import MediaTag
|
|||||||
from ffx.model.track_tag import TrackTag
|
from ffx.model.track_tag import TrackTag
|
||||||
|
|
||||||
|
|
||||||
def databaseContext():
|
def databaseContext(databasePath: str = ''):
|
||||||
|
|
||||||
databaseContext = {}
|
databaseContext = {}
|
||||||
|
|
||||||
# Initialize DB
|
if databasePath is None:
|
||||||
|
# sqlite:///:memory:
|
||||||
|
databasePath = ':memory:'
|
||||||
|
elif not databasePath:
|
||||||
homeDir = os.path.expanduser("~")
|
homeDir = os.path.expanduser("~")
|
||||||
ffxVarDir = os.path.join(homeDir, '.local', 'var', 'ffx')
|
ffxVarDir = os.path.join(homeDir, '.local', 'var', 'ffx')
|
||||||
if not os.path.exists(ffxVarDir):
|
if not os.path.exists(ffxVarDir):
|
||||||
os.makedirs(ffxVarDir)
|
os.makedirs(ffxVarDir)
|
||||||
|
databasePath = os.path.join(ffxVarDir, 'ffx.db')
|
||||||
|
|
||||||
databaseContext['url'] = f"sqlite:///{os.path.join(ffxVarDir, 'ffx.db')}"
|
databaseContext['url'] = f"sqlite:///{databasePath}"
|
||||||
databaseContext['engine'] = create_engine(databaseContext['url'])
|
databaseContext['engine'] = create_engine(databaseContext['url'])
|
||||||
databaseContext['session'] = sessionmaker(bind=databaseContext['engine'])
|
databaseContext['session'] = sessionmaker(bind=databaseContext['engine'])
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,10 @@ from ffx.audio_layout import AudioLayout
|
|||||||
from ffx.track_type import TrackType
|
from ffx.track_type import TrackType
|
||||||
from ffx.video_encoder import VideoEncoder
|
from ffx.video_encoder import VideoEncoder
|
||||||
from ffx.process import executeProcess
|
from ffx.process import executeProcess
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
|
||||||
|
from ffx.constants import DEFAULT_QUALITY, DEFAULT_AV1_PRESET
|
||||||
|
from ffx.constants import DEFAULT_CROP_START, DEFAULT_CROP_LENGTH
|
||||||
|
|
||||||
|
|
||||||
class FfxController():
|
class FfxController():
|
||||||
@@ -19,30 +23,14 @@ class FfxController():
|
|||||||
|
|
||||||
DEFAULT_VIDEO_ENCODER = VideoEncoder.VP9.label()
|
DEFAULT_VIDEO_ENCODER = VideoEncoder.VP9.label()
|
||||||
|
|
||||||
DEFAULT_QUALITY = 23
|
|
||||||
DEFAULT_AV1_PRESET = 5
|
|
||||||
|
|
||||||
DEFAULT_FILE_FORMAT = 'webm'
|
DEFAULT_FILE_FORMAT = 'webm'
|
||||||
DEFAULT_FILE_EXTENSION = 'webm'
|
DEFAULT_FILE_EXTENSION = 'webm'
|
||||||
|
|
||||||
DEFAULT_STEREO_BANDWIDTH = "128"
|
|
||||||
DEFAULT_AC3_BANDWIDTH = "256"
|
|
||||||
DEFAULT_DTS_BANDWIDTH = "320"
|
|
||||||
|
|
||||||
DEFAULT_CROP_START = 60
|
|
||||||
DEFAULT_CROP_LENGTH = 180
|
|
||||||
|
|
||||||
MKVMERGE_METADATA_KEYS = ['BPS',
|
|
||||||
'NUMBER_OF_FRAMES',
|
|
||||||
'NUMBER_OF_BYTES',
|
|
||||||
'_STATISTICS_WRITING_APP',
|
|
||||||
'_STATISTICS_WRITING_DATE_UTC',
|
|
||||||
'_STATISTICS_TAGS']
|
|
||||||
|
|
||||||
INPUT_FILE_EXTENSIONS = ['mkv', 'mp4', 'avi', 'flv', 'webm']
|
INPUT_FILE_EXTENSIONS = ['mkv', 'mp4', 'avi', 'flv', 'webm']
|
||||||
|
|
||||||
CHANNEL_MAP_5_1 = 'FL-FL|FR-FR|FC-FC|LFE-LFE|SL-BL|SR-BR:5.1'
|
CHANNEL_MAP_5_1 = 'FL-FL|FR-FR|FC-FC|LFE-LFE|SL-BL|SR-BR:5.1'
|
||||||
|
|
||||||
|
SIGNATURE_TAGS = {'RECODED_WITH': 'FFX'}
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
context : dict,
|
context : dict,
|
||||||
@@ -53,6 +41,14 @@ class FfxController():
|
|||||||
self.__sourceMediaDescriptor = sourceMediaDescriptor
|
self.__sourceMediaDescriptor = sourceMediaDescriptor
|
||||||
self.__targetMediaDescriptor = targetMediaDescriptor
|
self.__targetMediaDescriptor = targetMediaDescriptor
|
||||||
|
|
||||||
|
self.__configurationData = self.__context['config'].getData()
|
||||||
|
|
||||||
|
# Convenience
|
||||||
|
self.__niceness = self.__context['resource_limits']['niceness'] if 'resource_limits' in self.__context.keys() and 'niceness' in self.__context['resource_limits'].keys() else 99
|
||||||
|
self.__cpuPercent = self.__context['resource_limits']['cpu_percent'] if 'resource_limits' in self.__context.keys() and 'cpu_percent' in self.__context['resource_limits'].keys() else 0
|
||||||
|
|
||||||
|
self.__logger = context['logger']
|
||||||
|
|
||||||
|
|
||||||
def generateAV1Tokens(self, quality, preset, subIndex : int = 0):
|
def generateAV1Tokens(self, quality, preset, subIndex : int = 0):
|
||||||
|
|
||||||
@@ -95,20 +91,18 @@ class FfxController():
|
|||||||
cropStart = int(self.__context['crop_start'])
|
cropStart = int(self.__context['crop_start'])
|
||||||
cropLength = int(self.__context['crop_length'])
|
cropLength = int(self.__context['crop_length'])
|
||||||
else:
|
else:
|
||||||
cropStart = FfxController.DEFAULT_CROP_START
|
cropStart = DEFAULT_CROP_START
|
||||||
cropLength = FfxController.DEFAULT_CROP_LENGTH
|
cropLength = DEFAULT_CROP_LENGTH
|
||||||
|
|
||||||
return ['-ss', str(cropStart), '-t', str(cropLength)]
|
return ['-ss', str(cropStart), '-t', str(cropLength)]
|
||||||
|
|
||||||
|
|
||||||
def generateDenoiseTokens(self, spatial=5, patch=7, research=7, hw=False):
|
def generateOutputTokens(self, filePathBase, format = '', ext = ''):
|
||||||
filterName = 'nlmeans_opencl' if hw else 'nlmeans'
|
outputFilePath = f"{filePathBase}{'.'+str(ext) if ext else ''}"
|
||||||
return ['-vf', f"{filterName}=s={spatial}:p={patch}:r={research}"]
|
if format:
|
||||||
|
|
||||||
|
|
||||||
def generateOutputTokens(self, filepath, format, ext):
|
|
||||||
outputFilePath = f"{filepath}.{ext}"
|
|
||||||
return ['-f', format, outputFilePath]
|
return ['-f', format, outputFilePath]
|
||||||
|
else:
|
||||||
|
return [outputFilePath]
|
||||||
|
|
||||||
|
|
||||||
def generateAudioEncodingTokens(self):
|
def generateAudioEncodingTokens(self):
|
||||||
@@ -116,24 +110,13 @@ class FfxController():
|
|||||||
|
|
||||||
audioTokens = []
|
audioTokens = []
|
||||||
|
|
||||||
#sourceAudioTrackDescriptors = [smd for smd in self.__sourceMediaDescriptor.getAllTrackDescriptors() if smd.getType() == TrackType.AUDIO]
|
|
||||||
# targetAudioTrackDescriptors = [rtd for rtd in self.__targetMediaDescriptor.getReorderedTrackDescriptors() if rtd.getType() == TrackType.AUDIO]
|
|
||||||
targetAudioTrackDescriptors = [td for td in self.__targetMediaDescriptor.getAllTrackDescriptors() if td.getType() == TrackType.AUDIO]
|
targetAudioTrackDescriptors = [td for td in self.__targetMediaDescriptor.getAllTrackDescriptors() if td.getType() == TrackType.AUDIO]
|
||||||
|
|
||||||
trackSubIndex = 0
|
trackSubIndex = 0
|
||||||
for trackDescriptor in targetAudioTrackDescriptors:
|
for trackDescriptor in targetAudioTrackDescriptors:
|
||||||
|
|
||||||
# Calculate source sub index
|
|
||||||
#changedTargetTrackDescriptor : TrackDescriptor = targetAudioTrackDescriptors[trackDescriptor.getIndex()]
|
|
||||||
#changedTargetTrackSourceIndex = changedTargetTrackDescriptor.getSourceIndex()
|
|
||||||
#sourceSubIndex = sourceAudioTrackDescriptors[changedTargetTrackSourceIndex].getSubIndex()
|
|
||||||
|
|
||||||
trackAudioLayout = trackDescriptor.getAudioLayout()
|
trackAudioLayout = trackDescriptor.getAudioLayout()
|
||||||
|
|
||||||
#TODO: Sollte nicht die sub index unverändert bleiben wenn jellyfin reordering angewendet wurde?
|
|
||||||
# siehe auch: MediaDescriptor.getInputMappingTokens()
|
|
||||||
#trackSubIndex = trackDescriptor.getSubIndex()
|
|
||||||
|
|
||||||
if trackAudioLayout == AudioLayout.LAYOUT_6_1:
|
if trackAudioLayout == AudioLayout.LAYOUT_6_1:
|
||||||
audioTokens += [f"-c:a:{trackSubIndex}",
|
audioTokens += [f"-c:a:{trackSubIndex}",
|
||||||
'libopus',
|
'libopus',
|
||||||
@@ -170,24 +153,31 @@ class FfxController():
|
|||||||
# -disposition:s:0 default -disposition:s:1 0
|
# -disposition:s:0 default -disposition:s:1 0
|
||||||
def generateDispositionTokens(self):
|
def generateDispositionTokens(self):
|
||||||
|
|
||||||
# sourceTrackDescriptors = [] if self.__sourceMediaDescriptor is None else self.__sourceMediaDescriptor.getAllTrackDescriptors()
|
|
||||||
targetTrackDescriptors = self.__targetMediaDescriptor.getAllTrackDescriptors()
|
targetTrackDescriptors = self.__targetMediaDescriptor.getAllTrackDescriptors()
|
||||||
|
|
||||||
|
sourceTrackDescriptors = ([] if self.__sourceMediaDescriptor is None
|
||||||
|
else self.__sourceMediaDescriptor.getAllTrackDescriptors())
|
||||||
|
|
||||||
dispositionTokens = []
|
dispositionTokens = []
|
||||||
|
|
||||||
# raise click.ClickException(f"ttd subindices: {[t.getSubIndex() for t in targetTrackDescriptors]}")
|
for trackIndex in range(len(targetTrackDescriptors)):
|
||||||
|
|
||||||
#TODO: Sorting here is for the sole purpose to let the tokens appear with ascending subindices. Why necessary? Jellyfin order?
|
td = targetTrackDescriptors[trackIndex]
|
||||||
# for trackDescriptor in sorted(targetTrackDescriptors.copy(), key=lambda d: d.getSubIndex()):
|
|
||||||
for trackDescriptor in targetTrackDescriptors:
|
|
||||||
|
|
||||||
#HINT: No dispositions for pgs subtitle tracks that have no external file source
|
#HINT: No dispositions for pgs subtitle tracks that have no external file source
|
||||||
if (trackDescriptor.getExternalSourceFilePath()
|
if (td.getExternalSourceFilePath()
|
||||||
or trackDescriptor.getCodec() != TrackDescriptor.CODEC_PGS):
|
or td.getCodec() != TrackDescriptor.CODEC_PGS):
|
||||||
|
|
||||||
subIndex = trackDescriptor.getSubIndex()
|
subIndex = td.getSubIndex()
|
||||||
streamIndicator = trackDescriptor.getType().indicator()
|
streamIndicator = td.getType().indicator()
|
||||||
dispositionSet = trackDescriptor.getDispositionSet()
|
|
||||||
|
|
||||||
|
sourceDispositionSet = sourceTrackDescriptors[td.getSourceIndex()].getDispositionSet() if sourceTrackDescriptors else set()
|
||||||
|
|
||||||
|
#TODO: Alles discarden was im targetDescriptor vorhanden ist (?)
|
||||||
|
sourceDispositionSet.discard(TrackDisposition.DEFAULT)
|
||||||
|
|
||||||
|
dispositionSet = td.getDispositionSet() | sourceDispositionSet
|
||||||
|
|
||||||
if dispositionSet:
|
if dispositionSet:
|
||||||
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in dispositionSet])]
|
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in dispositionSet])]
|
||||||
@@ -197,185 +187,63 @@ class FfxController():
|
|||||||
return dispositionTokens
|
return dispositionTokens
|
||||||
|
|
||||||
|
|
||||||
# def generateMetadataTokens(self):
|
|
||||||
# """Source media descriptor is mandatory"""
|
|
||||||
#
|
|
||||||
# metadataTokens = []
|
|
||||||
#
|
|
||||||
# # click.echo(f"source media descriptor: track indices={[d.getIndex() for d in sourceMediaDescriptor.getAllTrackDescriptors()]}")
|
|
||||||
# # click.echo(f"target media descriptor: track indices={[d.getIndex() for d in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
|
||||||
#
|
|
||||||
# # +jellyfin -jellyfin
|
|
||||||
# mediaDifferences = self.__targetMediaDescriptor.compare(self.__sourceMediaDescriptor)
|
|
||||||
#
|
|
||||||
# # media diff {'tracks': {'changed': {4: {'tags': {'added': {'Yolo'}}}}}}
|
|
||||||
#
|
|
||||||
# click.echo(f"media diff {mediaDifferences}")
|
|
||||||
#
|
|
||||||
# if MediaDescriptor.TAGS_KEY in mediaDifferences.keys():
|
|
||||||
#
|
|
||||||
# sourceTags = self.__sourceMediaDescriptor.getTags()
|
|
||||||
# targetTags = self.__targetMediaDescriptor.getTags()
|
|
||||||
#
|
|
||||||
# #TODO: Warum erscheint nur -1 im output?
|
|
||||||
# if DIFF_REMOVED_KEY in mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
|
||||||
# # for removedTagKey in mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
|
|
||||||
# # row = (f"removed media tag: key='{removedTagKey}' value='{sourceTags[removedTagKey]}'",)
|
|
||||||
# # self.differencesTable.add_row(*map(str, row))
|
|
||||||
# pass
|
|
||||||
# #metadataTokens += [f"-map_metadata:g", "-1"]
|
|
||||||
#
|
|
||||||
# #for targetMediaTagKey in targetTags:
|
|
||||||
# #metadataTokens += [f"-metadata:g", f"{targetMediaTagKey}={targetTags[targetMediaTagKey]}"]
|
|
||||||
#
|
|
||||||
# else:
|
|
||||||
#
|
|
||||||
# if DIFF_ADDED_KEY in mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
|
||||||
# for addedTagKey in mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
|
|
||||||
# # row = (f"added media tag: key='{addedTagKey}' value='{targetTags[addedTagKey]}'",)
|
|
||||||
# click.echo(f"added metadata key='{addedTagKey}' value='{targetTags[addedTagKey]}'->'{targetTags[addedTagKey]}'")
|
|
||||||
# # self.differencesTable.add_row(*map(str, row))
|
|
||||||
# #pass
|
|
||||||
# metadataTokens += [f"-metadata:g", f"{addedTagKey}={targetTags[addedTagKey]}"]
|
|
||||||
#
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# if DIFF_CHANGED_KEY in mediaDifferences[MediaDescriptor.TAGS_KEY].keys():
|
|
||||||
# for changedTagKey in mediaDifferences[MediaDescriptor.TAGS_KEY][DIFF_CHANGED_KEY]:
|
|
||||||
# #row = (f"changed media tag: key='{changedTagKey}' value='{sourceTags[changedTagKey]}'->'{targetTags[changedTagKey]}'",)
|
|
||||||
# click.echo(f"changed metadata key='{changedTagKey}' value='{sourceTags[changedTagKey]}'->'{targetTags[changedTagKey]}'")
|
|
||||||
# # self.differencesTable.add_row(*map(str, row))
|
|
||||||
# #pass
|
|
||||||
# metadataTokens += [f"-metadata:g", f"{changedTagKey}={targetTags[changedTagKey]}"]
|
|
||||||
#
|
|
||||||
# if MediaDescriptor.TRACKS_KEY in mediaDifferences.keys():
|
|
||||||
#
|
|
||||||
# sourceTrackDescriptors = self.__sourceMediaDescriptor.getAllTrackDescriptors()
|
|
||||||
# targetTrackDescriptors = self.__targetMediaDescriptor.getReorderedTrackDescriptors()
|
|
||||||
#
|
|
||||||
# if DIFF_ADDED_KEY in mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
|
||||||
# addedTracksIndices = mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]
|
|
||||||
# raise click.ClickException(f"FfxController.generateMetadataTokens(): Adding tracks is not supported. Track indices {addedTracksIndices}")
|
|
||||||
#
|
|
||||||
# #raise click.ClickException(f"add track {mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]}")
|
|
||||||
# #for addedTrackIndex in mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY]:
|
|
||||||
# #addedTrack : Track = targetTrackDescriptors[addedTrackIndex]
|
|
||||||
# # row = (f"added {addedTrack.getType().label()} track: index={addedTrackIndex} lang={addedTrack.getLanguage().threeLetter()}",)
|
|
||||||
# # self.differencesTable.add_row(*map(str, row))
|
|
||||||
#
|
|
||||||
# if DIFF_REMOVED_KEY in mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
|
||||||
# removedTracksIndices = mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_ADDED_KEY].keys()
|
|
||||||
# raise click.ClickException(f"FfxController.generateMetadataTokens(): Removing tracks is not supported. Track indices {removedTracksIndices}")
|
|
||||||
# #for removedTrackIndex in mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_REMOVED_KEY]:
|
|
||||||
# # row = (f"removed track: index={removedTrackIndex}",)
|
|
||||||
# # self.differencesTable.add_row(*map(str, row))
|
|
||||||
#
|
|
||||||
# # media diff {'tracks': {'changed': {4: {'tags': {'added': {'Yolo'}}}}}}
|
|
||||||
# if DIFF_CHANGED_KEY in mediaDifferences[MediaDescriptor.TRACKS_KEY].keys():
|
|
||||||
# for changedTrackIndex in mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY].keys():
|
|
||||||
#
|
|
||||||
# changedTargetTrackDescriptor : TrackDescriptor = targetTrackDescriptors[changedTrackIndex]
|
|
||||||
# changedTargetTrackSourceIndex = changedTargetTrackDescriptor.getSourceIndex()
|
|
||||||
# changedTargetSourceSubIndex = sourceTrackDescriptors[changedTargetTrackSourceIndex].getSubIndex()
|
|
||||||
# # changedSourceTrackDescriptor : TrackDescriptor = sourceTrackDescriptors[changedTargetTrackSourceIndex]
|
|
||||||
# # changedSourceTrackSubIndex = changedSourceTrackDescriptor.getSubIndex()
|
|
||||||
#
|
|
||||||
# changedTrackDiff : dict = mediaDifferences[MediaDescriptor.TRACKS_KEY][DIFF_CHANGED_KEY][changedTrackIndex]
|
|
||||||
#
|
|
||||||
# if MediaDescriptor.TAGS_KEY in changedTrackDiff.keys():
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# if DIFF_REMOVED_KEY in changedTrackDiff[MediaDescriptor.TAGS_KEY]:
|
|
||||||
# #for removedTagKey in changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_REMOVED_KEY]:
|
|
||||||
# # row = (f"changed {changedTargetTrackDescriptor.getType().label()} track index={changedTrackIndex} removed key={removedTagKey}",)
|
|
||||||
# # self.differencesTable.add_row(*map(str, row))
|
|
||||||
#
|
|
||||||
# #addedTagValue = targetTrackDescriptors[changedTargetTrackSourceIndex].getTags()[addedTagKey]
|
|
||||||
#
|
|
||||||
# metadataTokens += [f"-map_metadata:s:{changedTargetTrackDescriptor.getType().indicator()}:{changedTargetSourceSubIndex}", "-1"]
|
|
||||||
#
|
|
||||||
# for targetTrackTagKey, targetTrackTagValue in changedTargetTrackDescriptor.getTags():
|
|
||||||
# metadataTokens += [f"-metadata:s:{changedTargetTrackDescriptor.getType().indicator()}:{changedTargetSourceSubIndex}",
|
|
||||||
# f"{targetTrackTagKey}={targetTrackTagValue}"]
|
|
||||||
#
|
|
||||||
# else:
|
|
||||||
#
|
|
||||||
# # media diff {'tracks': {'changed': {4: {'tags': {'added': {'Yolo'}}}}}}
|
|
||||||
# if DIFF_ADDED_KEY in changedTrackDiff[MediaDescriptor.TAGS_KEY]:
|
|
||||||
# for addedTagKey in changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY]:
|
|
||||||
#
|
|
||||||
# addedTagValue = targetTrackDescriptors[changedTargetTrackSourceIndex].getTags()[addedTagKey]
|
|
||||||
#
|
|
||||||
# # addedTagValue = changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY][addedTagKey]
|
|
||||||
#
|
|
||||||
# # click.echo(f"addedTagValue={addedTagValue}")
|
|
||||||
# # click.echo(f"sourceTrackDescriptors: subindex={[s.getSubIndex() for s in sourceTrackDescriptors]} sourceindex={[s.getSourceIndex() for s in sourceTrackDescriptors]} tags={[s.getTags() for s in sourceTrackDescriptors]}")
|
|
||||||
# # click.echo(f"targetTrackDescriptors: subindex={[t.getSubIndex() for t in targetTrackDescriptors]} sourceindex={[t.getSourceIndex() for t in targetTrackDescriptors]} tags={[t.getTags() for t in targetTrackDescriptors]}")
|
|
||||||
# # click.echo(f"changed track_index={changedTrackIndex} indicator={changedTargetTrackDescriptor.getType().indicator()} key={addedTagKey} value={addedTagValue} source_index={changedSourceTrackIndex}")
|
|
||||||
#
|
|
||||||
# metadataTokens += [f"-metadata:s:{changedTargetTrackDescriptor.getType().indicator()}:{changedTargetSourceSubIndex}",
|
|
||||||
# f"{addedTagKey}={addedTagValue}"]
|
|
||||||
#
|
|
||||||
# # media diff {'tracks': {'changed': {4: {'tags': {'added': {'Yolo'}}}}}}
|
|
||||||
# if DIFF_CHANGED_KEY in changedTrackDiff[MediaDescriptor.TAGS_KEY]:
|
|
||||||
# for changedTagKey in changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_CHANGED_KEY]:
|
|
||||||
#
|
|
||||||
# changedTagValue = targetTrackDescriptors[changedTargetTrackSourceIndex].getTags()[changedTagKey]
|
|
||||||
# # sourceSubIndex = sourceTrackDescriptors[changedTargetTrackSourceIndex].getSubIndex()
|
|
||||||
# # addedTagValue = changedTrackDiff[MediaDescriptor.TAGS_KEY][DIFF_ADDED_KEY][addedTagKey]
|
|
||||||
#
|
|
||||||
# # click.echo(f"addedTagValue={addedTagValue}")
|
|
||||||
# # click.echo(f"sourceTrackDescriptors: subindex={[s.getSubIndex() for s in sourceTrackDescriptors]} sourceindex={[s.getSourceIndex() for s in sourceTrackDescriptors]} tags={[s.getTags() for s in sourceTrackDescriptors]}")
|
|
||||||
# # click.echo(f"targetTrackDescriptors: subindex={[t.getSubIndex() for t in targetTrackDescriptors]} sourceindex={[t.getSourceIndex() for t in targetTrackDescriptors]} tags={[t.getTags() for t in targetTrackDescriptors]}")
|
|
||||||
# # click.echo(f"changed track_index={changedTrackIndex} indicator={changedTargetTrackDescriptor.getType().indicator()} key={addedTagKey} value={addedTagValue} source_index={changedSourceTrackIndex}")
|
|
||||||
#
|
|
||||||
# metadataTokens += [f"-metadata:s:{changedTargetTrackDescriptor.getType().indicator()}:{changedTargetSourceSubIndex}",
|
|
||||||
# f"{changedTagKey}={changedTagValue}"]
|
|
||||||
#
|
|
||||||
# # if TrackDescriptor.DISPOSITION_SET_KEY in changedTrackDiff.keys():
|
|
||||||
#
|
|
||||||
# # if DIFF_ADDED_KEY in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]:
|
|
||||||
# # for addedDisposition in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY][DIFF_ADDED_KEY]:
|
|
||||||
# # # row = (f"changed {changedTargetTrackDescriptor.getType().label()} track index={changedTrackIndex} added disposition={addedDisposition.label()}",)
|
|
||||||
# # # self.differencesTable.add_row(*map(str, row))
|
|
||||||
# # pass
|
|
||||||
#
|
|
||||||
# # if DIFF_REMOVED_KEY in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY]:
|
|
||||||
# # for removedDisposition in changedTrackDiff[TrackDescriptor.DISPOSITION_SET_KEY][DIFF_REMOVED_KEY]:
|
|
||||||
# # # row = (f"changed {changedTargetTrackDescriptor.getType().label()} track index={changedTrackIndex} removed disposition={removedDisposition.label()}",)
|
|
||||||
# # # self.differencesTable.add_row(*map(str, row))
|
|
||||||
# # pass
|
|
||||||
|
|
||||||
# return metadataTokens
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def generateMetadataTokens(self):
|
def generateMetadataTokens(self):
|
||||||
|
|
||||||
metadataTokens = []
|
metadataTokens = []
|
||||||
|
|
||||||
for tagKey, tagValue in self.__targetMediaDescriptor.getTags().items():
|
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
|
||||||
|
|
||||||
|
signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
|
||||||
|
removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
|
||||||
|
removeTrackKeys = metadataConfiguration['streams']['remove'] if 'streams' in metadataConfiguration.keys() and 'remove' in metadataConfiguration['streams'].keys() else []
|
||||||
|
|
||||||
|
mediaTags = {k:v for k,v in self.__targetMediaDescriptor.getTags().items() if not k in removeGlobalKeys}
|
||||||
|
|
||||||
|
if (not 'no_signature' in self.__context.keys()
|
||||||
|
or not self.__context['no_signature']):
|
||||||
|
outputMediaTags = mediaTags | signatureTags
|
||||||
|
else:
|
||||||
|
outputMediaTags = mediaTags
|
||||||
|
|
||||||
|
for tagKey, tagValue in outputMediaTags.items():
|
||||||
metadataTokens += [f"-metadata:g",
|
metadataTokens += [f"-metadata:g",
|
||||||
f"{tagKey}={tagValue}"]
|
f"{tagKey}={tagValue}"]
|
||||||
|
|
||||||
|
for removeKey in removeGlobalKeys:
|
||||||
|
metadataTokens += [f"-metadata:g",
|
||||||
|
f"{removeKey}="]
|
||||||
|
|
||||||
|
|
||||||
|
removeMkvmergeMetadata = (not 'keep_mkvmerge_metadata' in self.__context.keys()
|
||||||
|
or not self.__context['keep_mkvmerge_metadata'])
|
||||||
|
|
||||||
#HINT: With current ffmpeg version track metadata tags are not passed to the outfile
|
#HINT: With current ffmpeg version track metadata tags are not passed to the outfile
|
||||||
for td in self.__targetMediaDescriptor.getAllTrackDescriptors():
|
for td in self.__targetMediaDescriptor.getAllTrackDescriptors():
|
||||||
|
|
||||||
|
typeIndicator = td.getType().indicator()
|
||||||
|
subIndex = td.getSubIndex()
|
||||||
|
|
||||||
for tagKey, tagValue in td.getTags().items():
|
for tagKey, tagValue in td.getTags().items():
|
||||||
|
|
||||||
metadataTokens += [f"-metadata:s:{td.getType().indicator()}:{td.getSubIndex()}",
|
if not tagKey in removeTrackKeys:
|
||||||
|
metadataTokens += [f"-metadata:s:{typeIndicator}:{subIndex}",
|
||||||
f"{tagKey}={tagValue}"]
|
f"{tagKey}={tagValue}"]
|
||||||
|
|
||||||
|
for removeKey in removeTrackKeys:
|
||||||
|
metadataTokens += [f"-metadata:s:{typeIndicator}:{subIndex}",
|
||||||
|
f"{removeKey}="]
|
||||||
|
|
||||||
|
|
||||||
return metadataTokens
|
return metadataTokens
|
||||||
|
|
||||||
|
|
||||||
def runJob(self,
|
def runJob(self,
|
||||||
sourcePath,
|
sourcePath,
|
||||||
targetPath,
|
targetPath,
|
||||||
|
targetFormat: str = '',
|
||||||
videoEncoder: VideoEncoder = VideoEncoder.VP9,
|
videoEncoder: VideoEncoder = VideoEncoder.VP9,
|
||||||
quality: int = DEFAULT_QUALITY,
|
quality: int = DEFAULT_QUALITY,
|
||||||
preset: int = DEFAULT_AV1_PRESET,
|
preset: int = DEFAULT_AV1_PRESET):
|
||||||
denoise: bool = False):
|
|
||||||
|
|
||||||
|
|
||||||
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
|
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
|
||||||
@@ -387,11 +255,9 @@ class FfxController():
|
|||||||
+ self.__targetMediaDescriptor.getInputMappingTokens()
|
+ self.__targetMediaDescriptor.getInputMappingTokens()
|
||||||
+ self.generateDispositionTokens())
|
+ self.generateDispositionTokens())
|
||||||
|
|
||||||
if not self.__sourceMediaDescriptor is None:
|
# Optional tokens
|
||||||
commandSequence += self.generateMetadataTokens()
|
commandSequence += self.generateMetadataTokens()
|
||||||
|
commandSequence += self.__context['denoiser'].generateDenoiseTokens()
|
||||||
if denoise:
|
|
||||||
commandSequence += self.generateDenoiseTokens()
|
|
||||||
|
|
||||||
commandSequence += (self.generateAudioEncodingTokens()
|
commandSequence += (self.generateAudioEncodingTokens()
|
||||||
+ self.generateAV1Tokens(int(quality), int(preset))
|
+ self.generateAV1Tokens(int(quality), int(preset))
|
||||||
@@ -401,44 +267,49 @@ class FfxController():
|
|||||||
commandSequence += FfxController.generateCropTokens()
|
commandSequence += FfxController.generateCropTokens()
|
||||||
|
|
||||||
commandSequence += self.generateOutputTokens(targetPath,
|
commandSequence += self.generateOutputTokens(targetPath,
|
||||||
FfxController.DEFAULT_FILE_FORMAT,
|
targetFormat)
|
||||||
FfxController.DEFAULT_FILE_EXTENSION)
|
|
||||||
|
|
||||||
click.echo(f"Command: {' '.join(commandSequence)}")
|
self.__logger.debug(f"FfxController.runJob(): Running command sequence")
|
||||||
|
|
||||||
if not self.__context['dry_run']:
|
if not self.__context['dry_run']:
|
||||||
executeProcess(commandSequence)
|
executeProcess(commandSequence, context = self.__context)
|
||||||
|
|
||||||
|
|
||||||
if videoEncoder == VideoEncoder.VP9:
|
if videoEncoder == VideoEncoder.VP9:
|
||||||
|
|
||||||
commandSequence1 = (commandTokens
|
commandSequence1 = (commandTokens
|
||||||
+ self.__targetMediaDescriptor.getInputMappingTokens(only_video=True)
|
+ self.__targetMediaDescriptor.getInputMappingTokens(only_video=True))
|
||||||
+ self.generateVP9Pass1Tokens(int(quality)))
|
|
||||||
|
# Optional tokens
|
||||||
|
#NOTE: Filters and so needs to run on the first pass as well, as here
|
||||||
|
# the required bitrate for the second run is determined and recorded
|
||||||
|
# TODO: Results seems to be slightly better with first pass omitted,
|
||||||
|
# Confirm or find better filter settings for 2-pass
|
||||||
|
# commandSequence1 += self.__context['denoiser'].generateDenoiseTokens()
|
||||||
|
|
||||||
|
commandSequence1 += self.generateVP9Pass1Tokens(int(quality))
|
||||||
|
|
||||||
if self.__context['perform_crop']:
|
if self.__context['perform_crop']:
|
||||||
commandSequence1 += self.generateCropTokens()
|
commandSequence1 += self.generateCropTokens()
|
||||||
|
|
||||||
commandSequence1 += FfxController.NULL_TOKENS
|
commandSequence1 += FfxController.NULL_TOKENS
|
||||||
|
|
||||||
click.echo(f"Command 1: {' '.join(commandSequence1)}")
|
|
||||||
|
|
||||||
if os.path.exists(FfxController.TEMP_FILE_NAME):
|
if os.path.exists(FfxController.TEMP_FILE_NAME):
|
||||||
os.remove(FfxController.TEMP_FILE_NAME)
|
os.remove(FfxController.TEMP_FILE_NAME)
|
||||||
|
|
||||||
|
self.__logger.debug(f"FfxController.runJob(): Running command sequence 1")
|
||||||
|
|
||||||
if not self.__context['dry_run']:
|
if not self.__context['dry_run']:
|
||||||
executeProcess(commandSequence1)
|
executeProcess(commandSequence1, context = self.__context)
|
||||||
|
|
||||||
commandSequence2 = (commandTokens
|
commandSequence2 = (commandTokens
|
||||||
+ self.__targetMediaDescriptor.getImportFileTokens()
|
+ self.__targetMediaDescriptor.getImportFileTokens()
|
||||||
+ self.__targetMediaDescriptor.getInputMappingTokens()
|
+ self.__targetMediaDescriptor.getInputMappingTokens()
|
||||||
+ self.generateDispositionTokens())
|
+ self.generateDispositionTokens())
|
||||||
|
|
||||||
if not self.__sourceMediaDescriptor is None:
|
# Optional tokens
|
||||||
commandSequence2 += self.generateMetadataTokens()
|
commandSequence2 += self.generateMetadataTokens()
|
||||||
|
commandSequence2 += self.__context['denoiser'].generateDenoiseTokens()
|
||||||
if denoise:
|
|
||||||
commandSequence2 += self.generateDenoiseTokens()
|
|
||||||
|
|
||||||
commandSequence2 += self.generateVP9Pass2Tokens(int(quality)) + self.generateAudioEncodingTokens()
|
commandSequence2 += self.generateVP9Pass2Tokens(int(quality)) + self.generateAudioEncodingTokens()
|
||||||
|
|
||||||
@@ -446,13 +317,36 @@ class FfxController():
|
|||||||
commandSequence2 += self.generateCropTokens()
|
commandSequence2 += self.generateCropTokens()
|
||||||
|
|
||||||
commandSequence2 += self.generateOutputTokens(targetPath,
|
commandSequence2 += self.generateOutputTokens(targetPath,
|
||||||
FfxController.DEFAULT_FILE_FORMAT,
|
targetFormat)
|
||||||
FfxController.DEFAULT_FILE_EXTENSION)
|
|
||||||
|
|
||||||
click.echo(f"Command 2: {' '.join(commandSequence2)}")
|
self.__logger.debug(f"FfxController.runJob(): Running command sequence 2")
|
||||||
|
|
||||||
if not self.__context['dry_run']:
|
if not self.__context['dry_run']:
|
||||||
out, err, rc = executeProcess(commandSequence2)
|
out, err, rc = executeProcess(commandSequence2, context = self.__context)
|
||||||
if rc:
|
if rc:
|
||||||
raise click.ClickException(f"Command resulted in error: rc={rc} error={err}")
|
raise click.ClickException(f"Command resulted in error: rc={rc} error={err}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def createEmptyFile(self,
|
||||||
|
path: str = 'empty.mkv',
|
||||||
|
sizeX: int = 1280,
|
||||||
|
sizeY: int = 720,
|
||||||
|
rate: int = 25,
|
||||||
|
length: int = 10):
|
||||||
|
|
||||||
|
commandTokens = FfxController.COMMAND_TOKENS
|
||||||
|
|
||||||
|
commandTokens += ['-f',
|
||||||
|
'lavfi',
|
||||||
|
'-i',
|
||||||
|
f"color=size={sizeX}x{sizeY}:rate={rate}:color=black",
|
||||||
|
'-f',
|
||||||
|
'lavfi',
|
||||||
|
'-i',
|
||||||
|
'anullsrc=channel_layout=stereo:sample_rate=44100',
|
||||||
|
'-t',
|
||||||
|
str(length),
|
||||||
|
path]
|
||||||
|
|
||||||
|
out, err, rc = executeProcess(commandTokens, context = self.__context)
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ class FileProperties():
|
|||||||
|
|
||||||
FILE_EXTENSIONS = ['mkv', 'mp4', 'avi', 'flv', 'webm']
|
FILE_EXTENSIONS = ['mkv', 'mp4', 'avi', 'flv', 'webm']
|
||||||
|
|
||||||
|
SE_INDICATOR_PATTERN = '([sS][0-9]+[eE][0-9]+)'
|
||||||
SEASON_EPISODE_INDICATOR_MATCH = '[sS]([0-9]+)[eE]([0-9]+)'
|
SEASON_EPISODE_INDICATOR_MATCH = '[sS]([0-9]+)[eE]([0-9]+)'
|
||||||
EPISODE_INDICATOR_MATCH = '[eE]([0-9]+)'
|
EPISODE_INDICATOR_MATCH = '[eE]([0-9]+)'
|
||||||
|
|
||||||
@@ -23,6 +24,8 @@ class FileProperties():
|
|||||||
|
|
||||||
self.context = context
|
self.context = context
|
||||||
|
|
||||||
|
self.__logger = context['logger']
|
||||||
|
|
||||||
# Separate basedir, basename and extension for current source file
|
# Separate basedir, basename and extension for current source file
|
||||||
self.__sourcePath = sourcePath
|
self.__sourcePath = sourcePath
|
||||||
|
|
||||||
@@ -38,27 +41,39 @@ class FileProperties():
|
|||||||
self.__sourceFileBasename = self.__sourceFilename
|
self.__sourceFileBasename = self.__sourceFilename
|
||||||
self.__sourceFilenameExtension = ''
|
self.__sourceFilenameExtension = ''
|
||||||
|
|
||||||
|
|
||||||
self.__pc = PatternController(context)
|
self.__pc = PatternController(context)
|
||||||
|
|
||||||
|
# Checking if database contains matching pattern
|
||||||
matchResult = self.__pc.matchFilename(self.__sourceFilename)
|
matchResult = self.__pc.matchFilename(self.__sourceFilename)
|
||||||
|
|
||||||
|
self.__logger.debug(f"FileProperties.__init__(): Match result: {matchResult}")
|
||||||
|
|
||||||
self.__pattern: Pattern = matchResult['pattern'] if matchResult else None
|
self.__pattern: Pattern = matchResult['pattern'] if matchResult else None
|
||||||
|
|
||||||
matchedGroups = matchResult['match'].groups() if matchResult else {}
|
if matchResult:
|
||||||
seIndicator = matchedGroups[0] if matchedGroups else self.__sourceFilename
|
databaseMatchedGroups = matchResult['match'].groups()
|
||||||
|
self.__logger.debug(f"FileProperties.__init__(): Matched groups: {databaseMatchedGroups}")
|
||||||
|
|
||||||
|
seIndicator = databaseMatchedGroups[0]
|
||||||
|
|
||||||
se_match = re.search(FileProperties.SEASON_EPISODE_INDICATOR_MATCH, seIndicator)
|
se_match = re.search(FileProperties.SEASON_EPISODE_INDICATOR_MATCH, seIndicator)
|
||||||
e_match = re.search(FileProperties.EPISODE_INDICATOR_MATCH, seIndicator)
|
e_match = re.search(FileProperties.EPISODE_INDICATOR_MATCH, seIndicator)
|
||||||
|
|
||||||
self.__season = -1
|
else:
|
||||||
self.__episode = -1
|
self.__logger.debug(f"FileProperties.__init__(): Checking file name for indicator {self.__sourceFilename}")
|
||||||
|
|
||||||
|
se_match = re.search(FileProperties.SEASON_EPISODE_INDICATOR_MATCH, self.__sourceFilename)
|
||||||
|
e_match = re.search(FileProperties.EPISODE_INDICATOR_MATCH, self.__sourceFilename)
|
||||||
|
|
||||||
if se_match is not None:
|
if se_match is not None:
|
||||||
self.__season = int(se_match.group(1))
|
self.__season = int(se_match.group(1))
|
||||||
self.__episode = int(se_match.group(2))
|
self.__episode = int(se_match.group(2))
|
||||||
elif e_match is not None:
|
elif e_match is not None:
|
||||||
|
self.__season = -1
|
||||||
self.__episode = int(e_match.group(1))
|
self.__episode = int(e_match.group(1))
|
||||||
|
else:
|
||||||
|
self.__season = -1
|
||||||
|
self.__episode = -1
|
||||||
|
|
||||||
|
|
||||||
def getFormatData(self):
|
def getFormatData(self):
|
||||||
@@ -86,24 +101,17 @@ class FileProperties():
|
|||||||
"-hide_banner",
|
"-hide_banner",
|
||||||
"-show_format",
|
"-show_format",
|
||||||
"-of", "json",
|
"-of", "json",
|
||||||
self.__sourcePath])
|
self.__sourcePath],
|
||||||
|
context = self.context)
|
||||||
|
|
||||||
if 'Invalid data found when processing input' in ffprobeError:
|
if 'Invalid data found when processing input' in ffprobeError:
|
||||||
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
|
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
|
||||||
|
|
||||||
|
|
||||||
if returnCode != 0:
|
if returnCode != 0:
|
||||||
raise Exception(f"ffprobe returned with error {returnCode}")
|
raise Exception(f"ffprobe returned with error {returnCode}")
|
||||||
|
|
||||||
|
|
||||||
return json.loads(ffprobeOutput)['format']
|
return json.loads(ffprobeOutput)['format']
|
||||||
|
|
||||||
#[{'index': 0, 'codec_name': 'vp9', 'codec_long_name': 'Google VP9', 'profile': 'Profile 0', 'codec_type': 'video', 'codec_tag_string': '[0][0][0][0]', 'codec_tag': '0x0000', 'width': 1920, 'height': 1080, 'coded_width': 1920, 'coded_height': 1080, 'closed_captions': 0, 'film_grain': 0, 'has_b_frames': 0, 'sample_aspect_ratio': '1:1', 'display_aspect_ratio': '16:9', 'pix_fmt': 'yuv420p', 'level': -99, 'color_range': 'tv', 'chroma_location': 'left', 'field_order': 'progressive', 'refs': 1, 'r_frame_rate': '24000/1001', 'avg_frame_rate': '24000/1001', 'time_base': '1/1000', 'start_pts': 0, 'start_time': '0.000000', 'disposition': {'default': 1, 'dub': 0, 'original': 0, 'comment': 0, 'lyrics': 0, 'karaoke': 0, 'forced': 0, 'hearing_impaired': 0, 'visual_impaired': 0, 'clean_effects': 0, 'attached_pic': 0, 'timed_thumbnails': 0, 'non_diegetic': 0, 'captions': 0, 'descriptions': 0, 'metadata': 0, 'dependent': 0, 'still_image': 0}, 'tags': {'BPS': '7974017', 'NUMBER_OF_FRAMES': '34382', 'NUMBER_OF_BYTES': '1429358655', '_STATISTICS_WRITING_APP': "mkvmerge v63.0.0 ('Everything') 64-bit", '_STATISTICS_WRITING_DATE_UTC': '2023-10-07 13:59:46', '_STATISTICS_TAGS': 'BPS DURATION NUMBER_OF_FRAMES NUMBER_OF_BYTES', 'ENCODER': 'Lavc61.3.100 libvpx-vp9', 'DURATION': '00:23:54.016000000'}}]
|
|
||||||
#[{'index': 1, 'codec_name': 'opus', 'codec_long_name': 'Opus (Opus Interactive Audio Codec)', 'codec_type': 'audio', 'codec_tag_string': '[0][0][0][0]', 'codec_tag': '0x0000', 'sample_fmt': 'fltp', 'sample_rate': '48000', 'channels': 2, 'channel_layout': 'stereo', 'bits_per_sample': 0, 'initial_padding': 312, 'r_frame_rate': '0/0', 'avg_frame_rate': '0/0', 'time_base': '1/1000', 'start_pts': -7, 'start_time': '-0.007000', 'extradata_size': 19, 'disposition': {'default': 1, 'dub': 0, 'original': 0, 'comment': 0, 'lyrics': 0, 'karaoke': 0, 'forced': 0, 'hearing_impaired': 0, 'visual_impaired': 0, 'clean_effects': 0, 'attached_pic': 0, 'timed_thumbnails': 0, 'non_diegetic': 0, 'captions': 0, 'descriptions': 0, 'metadata': 0, 'dependent': 0, 'still_image': 0}, 'tags': {'language': 'jpn', 'title': 'Japanisch', 'BPS': '128000', 'NUMBER_OF_FRAMES': '61763', 'NUMBER_OF_BYTES': '22946145', '_STATISTICS_WRITING_APP': "mkvmerge v63.0.0 ('Everything') 64-bit", '_STATISTICS_WRITING_DATE_UTC': '2023-10-07 13:59:46', '_STATISTICS_TAGS': 'BPS DURATION NUMBER_OF_FRAMES NUMBER_OF_BYTES', 'ENCODER': 'Lavc61.3.100 libopus', 'DURATION': '00:23:54.141000000'}}]
|
|
||||||
|
|
||||||
#[{'index': 2, 'codec_name': 'webvtt', 'codec_long_name': 'WebVTT subtitle', 'codec_type': 'subtitle', 'codec_tag_string': '[0][0][0][0]', 'codec_tag': '0x0000', 'r_frame_rate': '0/0', 'avg_frame_rate': '0/0', 'time_base': '1/1000', 'start_pts': -7, 'start_time': '-0.007000', 'duration_ts': 1434141, 'duration': '1434.141000', 'disposition': {'default': 1, 'dub': 0, 'original': 0, 'comment': 0, 'lyrics': 0, 'karaoke': 0, 'forced': 0, 'hearing_impaired': 0, 'visual_impaired': 0, 'clean_effects': 0, 'attached_pic': 0, 'timed_thumbnails': 0, 'non_diegetic': 0, 'captions': 0, 'descriptions': 0, 'metadata': 0, 'dependent': 0, 'still_image': 0}, 'tags': {'language': 'ger', 'title': 'Deutsch [Full]', 'BPS': '118', 'NUMBER_OF_FRAMES': '300', 'NUMBER_OF_BYTES': '21128', '_STATISTICS_WRITING_APP': "mkvmerge v63.0.0 ('Everything') 64-bit", '_STATISTICS_WRITING_DATE_UTC': '2023-10-07 13:59:46', '_STATISTICS_TAGS': 'BPS DURATION NUMBER_OF_FRAMES NUMBER_OF_BYTES', 'ENCODER': 'Lavc61.3.100 webvtt', 'DURATION': '00:23:54.010000000'}}, {'index': 3, 'codec_name': 'webvtt', 'codec_long_name': 'WebVTT subtitle', 'codec_type': 'subtitle', 'codec_tag_string': '[0][0][0][0]', 'codec_tag': '0x0000', 'r_frame_rate': '0/0', 'avg_frame_rate': '0/0', 'time_base': '1/1000', 'start_pts': -7, 'start_time': '-0.007000', 'duration_ts': 1434141, 'duration': '1434.141000', 'disposition': {'default': 0, 'dub': 0, 'original': 0, 'comment': 0, 'lyrics': 0, 'karaoke': 0, 'forced': 0, 'hearing_impaired': 0, 'visual_impaired': 0, 'clean_effects': 0, 'attached_pic': 0, 'timed_thumbnails': 0, 'non_diegetic': 0, 'captions': 0, 'descriptions': 0, 'metadata': 0, 'dependent': 0, 'still_image': 0}, 'tags': {'language': 'eng', 'title': 'Englisch [Full]', 'BPS': '101', 'NUMBER_OF_FRAMES': '276', 'NUMBER_OF_BYTES': '16980', '_STATISTICS_WRITING_APP': "mkvmerge v63.0.0 ('Everything') 64-bit", '_STATISTICS_WRITING_DATE_UTC': '2023-10-07 13:59:46', '_STATISTICS_TAGS': 'BPS DURATION NUMBER_OF_FRAMES NUMBER_OF_BYTES', 'ENCODER': 'Lavc61.3.100 webvtt', 'DURATION': '00:23:53.230000000'}}]
|
|
||||||
|
|
||||||
|
|
||||||
def getStreamData(self):
|
def getStreamData(self):
|
||||||
"""Returns ffprobe stream data as array with elements according to the following example
|
"""Returns ffprobe stream data as array with elements according to the following example
|
||||||
@@ -153,7 +161,8 @@ class FileProperties():
|
|||||||
"-hide_banner",
|
"-hide_banner",
|
||||||
"-show_streams",
|
"-show_streams",
|
||||||
"-of", "json",
|
"-of", "json",
|
||||||
self.__sourcePath])
|
self.__sourcePath],
|
||||||
|
context = self.context)
|
||||||
|
|
||||||
if 'Invalid data found when processing input' in ffprobeError:
|
if 'Invalid data found when processing input' in ffprobeError:
|
||||||
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
|
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
|
||||||
@@ -167,7 +176,7 @@ class FileProperties():
|
|||||||
|
|
||||||
|
|
||||||
def getMediaDescriptor(self):
|
def getMediaDescriptor(self):
|
||||||
return MediaDescriptor.fromFfprobe(self.getFormatData(), self.getStreamData())
|
return MediaDescriptor.fromFfprobe(self.context, self.getFormatData(), self.getStreamData())
|
||||||
|
|
||||||
|
|
||||||
def getShowId(self) -> int:
|
def getShowId(self) -> int:
|
||||||
@@ -179,10 +188,10 @@ class FileProperties():
|
|||||||
return self.__pattern
|
return self.__pattern
|
||||||
|
|
||||||
|
|
||||||
def getSeason(self):
|
def getSeason(self) -> int:
|
||||||
return int(self.__season)
|
return int(self.__season)
|
||||||
|
|
||||||
def getEpisode(self):
|
def getEpisode(self) -> int:
|
||||||
return int(self.__episode)
|
return int(self.__episode)
|
||||||
|
|
||||||
|
|
||||||
@@ -191,48 +200,3 @@ class FileProperties():
|
|||||||
|
|
||||||
def getFileBasename(self):
|
def getFileBasename(self):
|
||||||
return self.__sourceFileBasename
|
return self.__sourceFileBasename
|
||||||
|
|
||||||
|
|
||||||
def assembleTargetFileBasename(self,
|
|
||||||
label: str = "",
|
|
||||||
quality: int = -1,
|
|
||||||
fileIndex: int = -1,
|
|
||||||
indexDigits: int = DEFAULT_INDEX_DIGITS,
|
|
||||||
extraTokens: list = []):
|
|
||||||
|
|
||||||
if 'show_descriptor' in self.context.keys():
|
|
||||||
season_digits = self.context['show_descriptor'][ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]
|
|
||||||
episode_digits = self.context['show_descriptor'][ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]
|
|
||||||
else:
|
|
||||||
season_digits = ShowDescriptor.DEFAULT_INDICATOR_SEASON_DIGITS
|
|
||||||
episode_digits = ShowDescriptor.DEFAULT_INDICATOR_EPISODE_DIGITS
|
|
||||||
|
|
||||||
targetFilenameTokens = []
|
|
||||||
|
|
||||||
# targetFilenameExtension = FfxController.DEFAULT_FILE_EXTENSION if extension is None else str(extension)
|
|
||||||
|
|
||||||
if not label:
|
|
||||||
targetFilenameTokens = [self.__sourceFileBasename]
|
|
||||||
else:
|
|
||||||
targetFilenameTokens = [label]
|
|
||||||
|
|
||||||
if fileIndex > -1:
|
|
||||||
targetFilenameTokens += [f"{fileIndex:0{indexDigits}d}"]
|
|
||||||
elif self.__season > -1 and self.__episode > -1:
|
|
||||||
targetFilenameTokens += [f"S{self.__season:0{season_digits}d}E{self.__episode:0{episode_digits}d}"]
|
|
||||||
elif self.__episode > -1:
|
|
||||||
targetFilenameTokens += [f"E{self.__episode:0{episode_digits}d}"]
|
|
||||||
|
|
||||||
if quality != -1:
|
|
||||||
targetFilenameTokens += [f"q{quality}"]
|
|
||||||
|
|
||||||
# In case source and target filenames are the same add an extension to distinct output from input
|
|
||||||
#if not label and self.__sourceFilenameExtension == targetFilenameExtension:
|
|
||||||
# targetFilenameTokens += ['ffx']
|
|
||||||
targetFilenameTokens += extraTokens
|
|
||||||
|
|
||||||
targetFilename = '_'.join(targetFilenameTokens)
|
|
||||||
|
|
||||||
click.echo(f"Target filename: {targetFilename}")
|
|
||||||
|
|
||||||
return targetFilename
|
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
import click
|
||||||
|
|
||||||
DIFF_ADDED_KEY = 'added'
|
DIFF_ADDED_KEY = 'added'
|
||||||
DIFF_REMOVED_KEY = 'removed'
|
DIFF_REMOVED_KEY = 'removed'
|
||||||
DIFF_CHANGED_KEY = 'changed'
|
DIFF_CHANGED_KEY = 'changed'
|
||||||
@@ -27,6 +29,14 @@ def dictDiff(a : dict, b : dict):
|
|||||||
|
|
||||||
return diffResult
|
return diffResult
|
||||||
|
|
||||||
|
def dictCache(element: dict, cache: list = []):
|
||||||
|
for index in range(len(cache)):
|
||||||
|
diff = dictDiff(cache[index], element)
|
||||||
|
if not diff:
|
||||||
|
return index, cache
|
||||||
|
cache.append(element)
|
||||||
|
return -1, cache
|
||||||
|
|
||||||
|
|
||||||
def setDiff(a : set, b : set) -> set:
|
def setDiff(a : set, b : set) -> set:
|
||||||
|
|
||||||
@@ -42,6 +52,27 @@ def setDiff(a : set, b : set) -> set:
|
|||||||
|
|
||||||
return diffResult
|
return diffResult
|
||||||
|
|
||||||
|
|
||||||
|
def permutateList(inputList: list, permutation: list):
|
||||||
|
|
||||||
|
# 0,1,2: ABC
|
||||||
|
# 0,2,1: ACB
|
||||||
|
# 1,2,0: BCA
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def filterFilename(fileName: str) -> str:
|
def filterFilename(fileName: str) -> str:
|
||||||
|
"""This filter replaces charactes from TMDB responses with characters
|
||||||
|
less problemating when using in filenames or removes them"""
|
||||||
|
|
||||||
|
# This appears in TMDB episode names
|
||||||
|
fileName = str(fileName).replace(' (*)', '')
|
||||||
|
fileName = str(fileName).replace('(*)', '')
|
||||||
|
|
||||||
fileName = str(fileName).replace(':', ';')
|
fileName = str(fileName).replace(':', ';')
|
||||||
return fileName
|
fileName = str(fileName).replace('*', '')
|
||||||
|
fileName = str(fileName).replace("'", '')
|
||||||
|
|
||||||
|
return fileName.strip()
|
||||||
|
|||||||
47
bin/ffx/media_controller.py
Normal file
47
bin/ffx/media_controller.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
import click, re
|
||||||
|
|
||||||
|
from ffx.model.pattern import Pattern
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from ffx.tag_controller import TagController
|
||||||
|
from ffx.track_controller import TrackController
|
||||||
|
|
||||||
|
class MediaController():
|
||||||
|
|
||||||
|
def __init__(self, context):
|
||||||
|
|
||||||
|
self.context = context
|
||||||
|
self.Session = self.context['database']['session'] # convenience
|
||||||
|
|
||||||
|
self.__logger = context['logger']
|
||||||
|
|
||||||
|
self.__tc = TrackController(context = context)
|
||||||
|
self.__tac = TagController(context = context)
|
||||||
|
|
||||||
|
def setPatternMediaDescriptor(self, mediaDescriptor: MediaDescriptor, patternId: int):
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
pid = int(patternId)
|
||||||
|
|
||||||
|
s = self.Session()
|
||||||
|
q = s.query(Pattern).filter(Pattern.id == pid)
|
||||||
|
|
||||||
|
if q.count():
|
||||||
|
pattern = q.first
|
||||||
|
|
||||||
|
for mediaTagKey, mediaTagValue in mediaDescriptor.getTags():
|
||||||
|
self.__tac.updateMediaTag(pid, mediaTagKey, mediaTagValue)
|
||||||
|
for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
|
||||||
|
self.__tc.addTrack(trackDescriptor, patternId = pid)
|
||||||
|
|
||||||
|
s.commit()
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
except Exception as ex:
|
||||||
|
self.__logger.error(f"MediaController.setPatternMediaDescriptor(): {repr(ex)}")
|
||||||
|
raise click.ClickException(f"MediaController.setPatternMediaDescriptor(): {repr(ex)}")
|
||||||
|
finally:
|
||||||
|
s.close()
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
import os
|
import os, re, click, logging
|
||||||
import re
|
|
||||||
import click
|
|
||||||
|
|
||||||
from typing import List, Self
|
from typing import List, Self
|
||||||
|
|
||||||
from ffx.track_type import TrackType
|
from ffx.track_type import TrackType
|
||||||
|
from ffx.iso_language import IsoLanguage
|
||||||
|
|
||||||
from ffx.track_disposition import TrackDisposition
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
|
||||||
from ffx.track_descriptor import TrackDescriptor
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
@@ -27,7 +27,7 @@ class MediaDescriptor:
|
|||||||
FFPROBE_TAGS_KEY = "tags"
|
FFPROBE_TAGS_KEY = "tags"
|
||||||
FFPROBE_CODEC_TYPE_KEY = "codec_type"
|
FFPROBE_CODEC_TYPE_KEY = "codec_type"
|
||||||
|
|
||||||
JELLYFIN_ORDER_FLAG_KEY = "jellyfin_order"
|
# JELLYFIN_ORDER_FLAG_KEY = "jellyfin_order"
|
||||||
|
|
||||||
EXCLUDED_MEDIA_TAGS = ["creation_time"]
|
EXCLUDED_MEDIA_TAGS = ["creation_time"]
|
||||||
|
|
||||||
@@ -36,6 +36,17 @@ class MediaDescriptor:
|
|||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
|
|
||||||
|
if MediaDescriptor.CONTEXT_KEY in kwargs.keys():
|
||||||
|
if type(kwargs[MediaDescriptor.CONTEXT_KEY]) is not dict:
|
||||||
|
raise TypeError(
|
||||||
|
f"MediaDescriptor.__init__(): Argument {MediaDescriptor.CONTEXT_KEY} is required to be of type dict"
|
||||||
|
)
|
||||||
|
self.__context = kwargs[MediaDescriptor.CONTEXT_KEY]
|
||||||
|
self.__logger = self.__context['logger']
|
||||||
|
else:
|
||||||
|
self.__context = {}
|
||||||
|
self.__logger = logging.getLogger('FFX').addHandler(logging.NullHandler())
|
||||||
|
|
||||||
if MediaDescriptor.TAGS_KEY in kwargs.keys():
|
if MediaDescriptor.TAGS_KEY in kwargs.keys():
|
||||||
if type(kwargs[MediaDescriptor.TAGS_KEY]) is not dict:
|
if type(kwargs[MediaDescriptor.TAGS_KEY]) is not dict:
|
||||||
raise TypeError(
|
raise TypeError(
|
||||||
@@ -61,15 +72,41 @@ class MediaDescriptor:
|
|||||||
else:
|
else:
|
||||||
self.__trackDescriptors = []
|
self.__trackDescriptors = []
|
||||||
|
|
||||||
if MediaDescriptor.JELLYFIN_ORDER_FLAG_KEY in kwargs.keys():
|
#TODO: to be removed
|
||||||
if type(kwargs[MediaDescriptor.JELLYFIN_ORDER_FLAG_KEY]) is not bool:
|
|
||||||
raise TypeError(
|
|
||||||
f"MediaDescriptor.__init__(): Argument {MediaDescriptor.JELLYFIN_ORDER_FLAG_KEY} is required to be of type bool"
|
|
||||||
)
|
|
||||||
self.__jellyfinOrder = kwargs[MediaDescriptor.JELLYFIN_ORDER_FLAG_KEY]
|
|
||||||
else:
|
|
||||||
self.__jellyfinOrder = False
|
self.__jellyfinOrder = False
|
||||||
|
|
||||||
|
def setTrackLanguage(self, language: str, index: int, trackType: TrackType = None):
|
||||||
|
|
||||||
|
trackLanguage = IsoLanguage.findThreeLetter(language)
|
||||||
|
if trackLanguage == IsoLanguage.UNDEFINED:
|
||||||
|
self.__logger.warning('MediaDescriptor.setTrackLanguage(): Parameter language does not contain a registered '
|
||||||
|
+ f"ISO 639 3-letter language code, skipping to set language for"
|
||||||
|
+ str('' if trackType is None else trackType.label()) + f"track {index}")
|
||||||
|
|
||||||
|
trackList = self.getTrackDescriptors(trackType=trackType)
|
||||||
|
|
||||||
|
if index < 0 or index > len(trackList) - 1:
|
||||||
|
self.__logger.warning(f"MediaDescriptor.setTrackLanguage(): Parameter index ({index}) is "
|
||||||
|
+ f"out of range of {'' if trackType is None else trackType.label()}track list")
|
||||||
|
|
||||||
|
td: TrackDescriptor = trackList[index]
|
||||||
|
td.setLanguage(trackLanguage)
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def setTrackTitle(self, title: str, index: int, trackType: TrackType = None):
|
||||||
|
|
||||||
|
trackList = self.getTrackDescriptors(trackType=trackType)
|
||||||
|
|
||||||
|
if index < 0 or index > len(trackList) - 1:
|
||||||
|
self.__logger.error(f"MediaDescriptor.setTrackTitle(): Parameter index ({index}) is "
|
||||||
|
+ f"out of range of {'' if trackType is None else trackType.label()}track list")
|
||||||
|
raise click.Abort()
|
||||||
|
|
||||||
|
td: TrackDescriptor = trackList[index]
|
||||||
|
td.setTitle(title)
|
||||||
|
|
||||||
|
|
||||||
def setDefaultSubTrack(self, trackType: TrackType, subIndex: int):
|
def setDefaultSubTrack(self, trackType: TrackType, subIndex: int):
|
||||||
for t in self.getAllTrackDescriptors():
|
for t in self.getAllTrackDescriptors():
|
||||||
@@ -85,7 +122,6 @@ class MediaDescriptor:
|
|||||||
TrackDisposition.FORCED, t.getSubIndex() == int(subIndex)
|
TrackDisposition.FORCED, t.getSubIndex() == int(subIndex)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def checkConfiguration(self):
|
def checkConfiguration(self):
|
||||||
|
|
||||||
videoTracks = self.getVideoTracks()
|
videoTracks = self.getVideoTracks()
|
||||||
@@ -114,43 +150,76 @@ class MediaDescriptor:
|
|||||||
raise ValueError('Multiple streams originating from the same source stream')
|
raise ValueError('Multiple streams originating from the same source stream')
|
||||||
|
|
||||||
|
|
||||||
def applyJellyfinOrder(self):
|
def applyOverrides(self, overrides: dict):
|
||||||
"""Reorder subtracks in types with default the last, then make subindices flat again"""
|
|
||||||
|
|
||||||
# videoTracks = self.sortSubIndices(self.getVideoTracks())
|
if 'languages' in overrides.keys():
|
||||||
# audioTracks = self.sortSubIndices(self.getAudioTracks())
|
for trackIndex in overrides['languages'].keys():
|
||||||
# subtitleTracks = self.sortSubIndices(self.getSubtitleTracks())
|
self.setTrackLanguage(overrides['languages'][trackIndex], trackIndex)
|
||||||
|
|
||||||
self.checkConfiguration()
|
if 'titles' in overrides.keys():
|
||||||
|
for trackIndex in overrides['titles'].keys():
|
||||||
|
self.setTrackTitle(overrides['titles'][trackIndex], trackIndex)
|
||||||
|
|
||||||
# from self.__trackDescriptors
|
if 'forced_video' in overrides.keys():
|
||||||
videoTracks = self.getVideoTracks()
|
sti = int(overrides['forced_video'])
|
||||||
audioTracks = self.getAudioTracks()
|
self.setForcedSubTrack(TrackType.VIDEO, sti)
|
||||||
subtitleTracks = self.getSubtitleTracks()
|
self.setDefaultSubTrack(TrackType.VIDEO, sti)
|
||||||
|
|
||||||
defaultVideoTracks = [v for v in videoTracks if v.getDispositionFlag(TrackDisposition.DEFAULT)]
|
elif 'default_video' in overrides.keys():
|
||||||
defaultAudioTracks = [a for a in audioTracks if a.getDispositionFlag(TrackDisposition.DEFAULT)]
|
sti = int(overrides['default_video'])
|
||||||
defaultSubtitleTracks = [s for s in subtitleTracks if s.getDispositionFlag(TrackDisposition.DEFAULT)]
|
self.setDefaultSubTrack(TrackType.VIDEO, sti)
|
||||||
|
|
||||||
if defaultVideoTracks:
|
if 'forced_audio' in overrides.keys():
|
||||||
videoTracks.append(videoTracks.pop(videoTracks.index(defaultVideoTracks[0])))
|
sti = int(overrides['forced_audio'])
|
||||||
self.sortSubIndices(videoTracks)
|
self.setForcedSubTrack(TrackType.AUDIO, sti)
|
||||||
if defaultAudioTracks:
|
self.setDefaultSubTrack(TrackType.AUDIO, sti)
|
||||||
audioTracks.append(audioTracks.pop(audioTracks.index(defaultAudioTracks[0])))
|
|
||||||
self.sortSubIndices(audioTracks)
|
|
||||||
if defaultSubtitleTracks:
|
|
||||||
subtitleTracks.append(subtitleTracks.pop(subtitleTracks.index(defaultSubtitleTracks[0])))
|
|
||||||
self.sortSubIndices(subtitleTracks)
|
|
||||||
|
|
||||||
self.__trackDescriptors = videoTracks + audioTracks + subtitleTracks
|
elif 'default_audio' in overrides.keys():
|
||||||
self.sortIndices(self.__trackDescriptors)
|
sti = int(overrides['default_audio'])
|
||||||
|
self.setDefaultSubTrack(TrackType.AUDIO, sti)
|
||||||
|
|
||||||
|
if 'forced_subtitle' in overrides.keys():
|
||||||
|
sti = int(overrides['forced_subtitle'])
|
||||||
|
self.setForcedSubTrack(TrackType.SUBTITLE, sti)
|
||||||
|
self.setDefaultSubTrack(TrackType.SUBTITLE, sti)
|
||||||
|
|
||||||
|
elif 'default_subtitle' in overrides.keys():
|
||||||
|
sti = int(overrides['default_subtitle'])
|
||||||
|
self.setDefaultSubTrack(TrackType.SUBTITLE, sti)
|
||||||
|
|
||||||
|
if 'stream_order' in overrides.keys():
|
||||||
|
self.rearrangeTrackDescriptors(overrides['stream_order'])
|
||||||
|
|
||||||
|
|
||||||
|
def applySourceIndices(self, sourceMediaDescriptor: Self):
|
||||||
|
sourceTrackDescriptors = sourceMediaDescriptor.getAllTrackDescriptors()
|
||||||
|
|
||||||
|
numTrackDescriptors = len(self.__trackDescriptors)
|
||||||
|
if len(sourceTrackDescriptors) != numTrackDescriptors:
|
||||||
|
raise ValueError('MediaDescriptor.applySourceIndices (): Number of track descriptors does not match')
|
||||||
|
|
||||||
|
for trackIndex in range(numTrackDescriptors):
|
||||||
|
self.__trackDescriptors[trackIndex].setSourceIndex(sourceTrackDescriptors[trackIndex].getSourceIndex())
|
||||||
|
|
||||||
|
|
||||||
|
def rearrangeTrackDescriptors(self, newOrder: List[int]):
|
||||||
|
if len(newOrder) != len(self.__trackDescriptors):
|
||||||
|
raise ValueError('Length of list with reordered indices does not match number of track descriptors')
|
||||||
|
reorderedTrackDescriptors = {}
|
||||||
|
for oldIndex in newOrder:
|
||||||
|
reorderedTrackDescriptors.append(self.__trackDescriptors[oldIndex])
|
||||||
|
self.__trackDescriptors = reorderedTrackDescriptors
|
||||||
|
self.reindexSubIndices()
|
||||||
|
self.reindexIndices()
|
||||||
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def fromFfprobe(cls, formatData, streamData):
|
def fromFfprobe(cls, context, formatData, streamData):
|
||||||
|
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
|
|
||||||
|
kwargs[MediaDescriptor.CONTEXT_KEY] = context
|
||||||
|
|
||||||
if MediaDescriptor.FFPROBE_TAGS_KEY in formatData.keys():
|
if MediaDescriptor.FFPROBE_TAGS_KEY in formatData.keys():
|
||||||
kwargs[MediaDescriptor.TAGS_KEY] = formatData[
|
kwargs[MediaDescriptor.TAGS_KEY] = formatData[
|
||||||
MediaDescriptor.FFPROBE_TAGS_KEY
|
MediaDescriptor.FFPROBE_TAGS_KEY
|
||||||
@@ -193,6 +262,16 @@ class MediaDescriptor:
|
|||||||
subIndex += 1
|
subIndex += 1
|
||||||
return descriptors
|
return descriptors
|
||||||
|
|
||||||
|
def reindexSubIndices(self, trackDescriptors: list = []):
|
||||||
|
tdList = trackDescriptors if trackDescriptors else self.__trackDescriptors
|
||||||
|
subIndexCounter = {}
|
||||||
|
for td in tdList:
|
||||||
|
trackType = td.getType()
|
||||||
|
if trackType not in subIndexCounter.keys():
|
||||||
|
subIndexCounter[trackType] = 0
|
||||||
|
td.setSubIndex(subIndexCounter[trackType])
|
||||||
|
subIndexCounter[trackType] += 1
|
||||||
|
|
||||||
def sortIndices(
|
def sortIndices(
|
||||||
self, descriptors: List[TrackDescriptor]
|
self, descriptors: List[TrackDescriptor]
|
||||||
) -> List[TrackDescriptor]:
|
) -> List[TrackDescriptor]:
|
||||||
@@ -202,19 +281,36 @@ class MediaDescriptor:
|
|||||||
index += 1
|
index += 1
|
||||||
return descriptors
|
return descriptors
|
||||||
|
|
||||||
|
def reindexIndices(self, trackDescriptors: list = []):
|
||||||
|
tdList = trackDescriptors if trackDescriptors else self.__trackDescriptors
|
||||||
|
for trackIndex in range(len(tdList)):
|
||||||
|
tdList[trackIndex].setIndex(trackIndex)
|
||||||
|
|
||||||
def getAllTrackDescriptors(self) -> List[TrackDescriptor]:
|
|
||||||
|
def getAllTrackDescriptors(self):
|
||||||
|
"""Returns all track descriptors sorted by type: video, audio then subtitles"""
|
||||||
return self.getVideoTracks() + self.getAudioTracks() + self.getSubtitleTracks()
|
return self.getVideoTracks() + self.getAudioTracks() + self.getSubtitleTracks()
|
||||||
|
|
||||||
|
|
||||||
|
def getTrackDescriptors(self,
|
||||||
|
trackType: TrackType = None) -> List[TrackDescriptor]:
|
||||||
|
|
||||||
|
if trackType is None:
|
||||||
|
return self.__trackDescriptors
|
||||||
|
|
||||||
|
descriptorList = []
|
||||||
|
for td in self.__trackDescriptors:
|
||||||
|
if td.getType() == trackType:
|
||||||
|
descriptorList.append(td)
|
||||||
|
|
||||||
|
return descriptorList
|
||||||
|
|
||||||
|
|
||||||
def getVideoTracks(self) -> List[TrackDescriptor]:
|
def getVideoTracks(self) -> List[TrackDescriptor]:
|
||||||
return [
|
return [v for v in self.__trackDescriptors if v.getType() == TrackType.VIDEO]
|
||||||
v for v in self.__trackDescriptors if v.getType() == TrackType.VIDEO
|
|
||||||
]
|
|
||||||
|
|
||||||
def getAudioTracks(self) -> List[TrackDescriptor]:
|
def getAudioTracks(self) -> List[TrackDescriptor]:
|
||||||
return [
|
return [a for a in self.__trackDescriptors if a.getType() == TrackType.AUDIO]
|
||||||
a for a in self.__trackDescriptors if a.getType() == TrackType.AUDIO
|
|
||||||
]
|
|
||||||
|
|
||||||
def getSubtitleTracks(self) -> List[TrackDescriptor]:
|
def getSubtitleTracks(self) -> List[TrackDescriptor]:
|
||||||
return [
|
return [
|
||||||
@@ -227,17 +323,12 @@ class MediaDescriptor:
|
|||||||
def compare(self, vsMediaDescriptor: Self):
|
def compare(self, vsMediaDescriptor: Self):
|
||||||
|
|
||||||
if not isinstance(vsMediaDescriptor, self.__class__):
|
if not isinstance(vsMediaDescriptor, self.__class__):
|
||||||
raise click.ClickException(
|
self.__logger.error(f"MediaDescriptor.compare(): Argument is required to be of type {self.__class__}")
|
||||||
f"MediaDescriptor.compare(): Argument is required to be of type {self.__class__}"
|
raise click.Abort()
|
||||||
)
|
|
||||||
|
|
||||||
vsTags = vsMediaDescriptor.getTags()
|
vsTags = vsMediaDescriptor.getTags()
|
||||||
tags = self.getTags()
|
tags = self.getTags()
|
||||||
|
|
||||||
# tags ist leer
|
|
||||||
# click.echo(f"tags={tags} vsTags={vsTags}")
|
|
||||||
# raise click.Abort
|
|
||||||
|
|
||||||
# HINT: Some tags differ per file, for example creation_time, so these are removed before diff
|
# HINT: Some tags differ per file, for example creation_time, so these are removed before diff
|
||||||
for emt in MediaDescriptor.EXCLUDED_MEDIA_TAGS:
|
for emt in MediaDescriptor.EXCLUDED_MEDIA_TAGS:
|
||||||
if emt in tags.keys():
|
if emt in tags.keys():
|
||||||
@@ -306,12 +397,11 @@ class MediaDescriptor:
|
|||||||
|
|
||||||
return compareResult
|
return compareResult
|
||||||
|
|
||||||
|
|
||||||
def getImportFileTokens(self, use_sub_index: bool = True):
|
def getImportFileTokens(self, use_sub_index: bool = True):
|
||||||
|
|
||||||
# reorderedTrackDescriptors = self.getReorderedTrackDescriptors()
|
|
||||||
importFileTokens = []
|
importFileTokens = []
|
||||||
|
|
||||||
#for rtd in reorderedTrackDescriptors:
|
|
||||||
for td in self.__trackDescriptors:
|
for td in self.__trackDescriptors:
|
||||||
|
|
||||||
importedFilePath = td.getExternalSourceFilePath()
|
importedFilePath = td.getExternalSourceFilePath()
|
||||||
@@ -326,13 +416,23 @@ class MediaDescriptor:
|
|||||||
|
|
||||||
|
|
||||||
def getInputMappingTokens(self, use_sub_index: bool = True, only_video: bool = False):
|
def getInputMappingTokens(self, use_sub_index: bool = True, only_video: bool = False):
|
||||||
|
"""Tracks must be reordered for source index order"""
|
||||||
|
|
||||||
# reorderedTrackDescriptors = self.getReorderedTrackDescriptors()
|
|
||||||
inputMappingTokens = []
|
inputMappingTokens = []
|
||||||
|
|
||||||
filePointer = 1
|
filePointer = 1
|
||||||
#for rtd in reorderedTrackDescriptors:
|
for trackIndex in range(len(self.__trackDescriptors)):
|
||||||
for td in self.__trackDescriptors:
|
|
||||||
|
td = self.__trackDescriptors[trackIndex]
|
||||||
|
|
||||||
|
stdi = self.__trackDescriptors[td.getSourceIndex()].getIndex()
|
||||||
|
stdsi = self.__trackDescriptors[td.getSourceIndex()].getSubIndex()
|
||||||
|
|
||||||
|
# sti = self.__trackDescriptors[trackIndex].getSourceIndex()
|
||||||
|
# sotd = sourceOrderTrackDescriptors[sti]
|
||||||
|
|
||||||
|
# appearently this negates applyJellyfinOrder
|
||||||
|
#for rtd in sorted(self.__trackDescriptors.copy(), key=lambda d: d.getSourceIndex()):
|
||||||
|
|
||||||
trackType = td.getType()
|
trackType = td.getType()
|
||||||
|
|
||||||
@@ -355,15 +455,16 @@ class MediaDescriptor:
|
|||||||
if td.getCodec() != TrackDescriptor.CODEC_PGS:
|
if td.getCodec() != TrackDescriptor.CODEC_PGS:
|
||||||
inputMappingTokens += [
|
inputMappingTokens += [
|
||||||
"-map",
|
"-map",
|
||||||
f"0:{trackType.indicator()}:{td.getSubIndex()}",
|
f"0:{trackType.indicator()}:{stdsi}",
|
||||||
]
|
]
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if td.getCodec() != TrackDescriptor.CODEC_PGS:
|
if td.getCodec() != TrackDescriptor.CODEC_PGS:
|
||||||
inputMappingTokens += ["-map", f"0:{td.getIndex()}"]
|
inputMappingTokens += ["-map", f"0:{stdi}"]
|
||||||
|
|
||||||
return inputMappingTokens
|
return inputMappingTokens
|
||||||
|
|
||||||
|
|
||||||
def searchSubtitleFiles(self, searchDirectory, prefix):
|
def searchSubtitleFiles(self, searchDirectory, prefix):
|
||||||
|
|
||||||
sesl_match = re.compile(MediaDescriptor.SEASON_EPISODE_STREAM_LANGUAGE_MATCH)
|
sesl_match = re.compile(MediaDescriptor.SEASON_EPISODE_STREAM_LANGUAGE_MATCH)
|
||||||
@@ -387,25 +488,23 @@ class MediaDescriptor:
|
|||||||
|
|
||||||
subtitleFileDescriptors.append(subtitleFileDescriptor)
|
subtitleFileDescriptors.append(subtitleFileDescriptor)
|
||||||
|
|
||||||
click.echo(f"Available subtitle files {subtitleFileDescriptors}\n")
|
self.__logger.debug(f"searchSubtitleFiles(): Available subtitle files {subtitleFileDescriptors}")
|
||||||
|
|
||||||
return subtitleFileDescriptors
|
return subtitleFileDescriptors
|
||||||
|
|
||||||
|
|
||||||
def importSubtitles(self, searchDirectory, prefix, season: int = -1, episode: int = -1):
|
def importSubtitles(self, searchDirectory, prefix, season: int = -1, episode: int = -1):
|
||||||
|
|
||||||
click.echo(f"Season: {season} Episode: {episode}")
|
# click.echo(f"Season: {season} Episode: {episode}")
|
||||||
|
self.__logger.debug(f"importSubtitles(): Season: {season} Episode: {episode}")
|
||||||
|
|
||||||
availableFileSubtitleDescriptors = self.searchSubtitleFiles(searchDirectory, prefix)
|
availableFileSubtitleDescriptors = self.searchSubtitleFiles(searchDirectory, prefix)
|
||||||
|
|
||||||
click.echo(f"availableFileSubtitleDescriptors: {availableFileSubtitleDescriptors}")
|
self.__logger.debug(f"importSubtitles(): availableFileSubtitleDescriptors: {availableFileSubtitleDescriptors}")
|
||||||
|
|
||||||
subtitleTracks = self.getSubtitleTracks()
|
subtitleTracks = self.getSubtitleTracks()
|
||||||
|
|
||||||
click.echo(f"subtitleTracks: {[s.getIndex() for s in subtitleTracks]}")
|
self.__logger.debug(f"importSubtitles(): subtitleTracks: {[s.getIndex() for s in subtitleTracks]}")
|
||||||
|
|
||||||
# if len(availableFileSubtitleDescriptors) != len(subtitleTracks):
|
|
||||||
# raise click.ClickException(f"MediaDescriptor.importSubtitles(): Number if subtitle files not matching number of subtitle tracks")
|
|
||||||
|
|
||||||
matchingSubtitleFileDescriptors = (
|
matchingSubtitleFileDescriptors = (
|
||||||
sorted(
|
sorted(
|
||||||
@@ -420,10 +519,19 @@ class MediaDescriptor:
|
|||||||
else []
|
else []
|
||||||
)
|
)
|
||||||
|
|
||||||
click.echo(f"matchingSubtitleFileDescriptors: {matchingSubtitleFileDescriptors}")
|
self.__logger.debug(f"importSubtitles(): matchingSubtitleFileDescriptors: {matchingSubtitleFileDescriptors}")
|
||||||
|
|
||||||
for msfd in matchingSubtitleFileDescriptors:
|
for msfd in matchingSubtitleFileDescriptors:
|
||||||
matchingSubtitleTrackDescriptor = [s for s in subtitleTracks if s.getIndex() == msfd["index"]]
|
matchingSubtitleTrackDescriptor = [s for s in subtitleTracks if s.getIndex() == msfd["index"]]
|
||||||
if matchingSubtitleTrackDescriptor:
|
if matchingSubtitleTrackDescriptor:
|
||||||
click.echo(f"Found matching subtitle file {msfd["path"]}\n")
|
# click.echo(f"Found matching subtitle file {msfd["path"]}\n")
|
||||||
|
self.__logger.debug(f"importSubtitles(): Found matching subtitle file {msfd['path']}")
|
||||||
matchingSubtitleTrackDescriptor[0].setExternalSourceFilePath(msfd["path"])
|
matchingSubtitleTrackDescriptor[0].setExternalSourceFilePath(msfd["path"])
|
||||||
|
|
||||||
|
|
||||||
|
def getConfiguration(self, label: str = ''):
|
||||||
|
yield f"--- {label if label else 'MediaDescriptor '+str(id(self))} {' '.join([str(k)+'='+str(v) for k,v in self.__mediaTags.items()])}"
|
||||||
|
for td in self.getAllTrackDescriptors():
|
||||||
|
yield (f"{td.getIndex()}:{td.getType().indicator()}:{td.getSubIndex()} "
|
||||||
|
+ '|'.join([d.indicator() for d in td.getDispositionSet()])
|
||||||
|
+ ' ' + ' '.join([str(k)+'='+str(v) for k,v in td.getTags().items()]))
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ from textual.containers import Grid
|
|||||||
from ffx.model.show import Show
|
from ffx.model.show import Show
|
||||||
from ffx.model.pattern import Pattern
|
from ffx.model.pattern import Pattern
|
||||||
|
|
||||||
|
from ffx.audio_layout import AudioLayout
|
||||||
|
|
||||||
from .pattern_controller import PatternController
|
from .pattern_controller import PatternController
|
||||||
from .show_controller import ShowController
|
from .show_controller import ShowController
|
||||||
from .track_controller import TrackController
|
from .track_controller import TrackController
|
||||||
@@ -42,7 +44,7 @@ class MediaDetailsScreen(Screen):
|
|||||||
Grid {
|
Grid {
|
||||||
grid-size: 5 8;
|
grid-size: 5 8;
|
||||||
grid-rows: 8 2 2 2 8 2 2 8;
|
grid-rows: 8 2 2 2 8 2 2 8;
|
||||||
grid-columns: 25 25 100 10 75;
|
grid-columns: 25 25 120 10 75;
|
||||||
height: 100%;
|
height: 100%;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
padding: 1;
|
padding: 1;
|
||||||
@@ -157,7 +159,7 @@ class MediaDetailsScreen(Screen):
|
|||||||
self.__currentPattern = self.__mediaFileProperties.getPattern()
|
self.__currentPattern = self.__mediaFileProperties.getPattern()
|
||||||
|
|
||||||
# keine tags vorhanden
|
# keine tags vorhanden
|
||||||
self.__targetMediaDescriptor = self.__currentPattern.getMediaDescriptor() if self.__currentPattern is not None else None
|
self.__targetMediaDescriptor = self.__currentPattern.getMediaDescriptor(self.context) if self.__currentPattern is not None else None
|
||||||
|
|
||||||
# Enumerating differences between media descriptors
|
# Enumerating differences between media descriptors
|
||||||
# from file (=current) vs from stored in database (=target)
|
# from file (=current) vs from stored in database (=target)
|
||||||
@@ -299,11 +301,13 @@ class MediaDetailsScreen(Screen):
|
|||||||
typeCounter[trackType] = 0
|
typeCounter[trackType] = 0
|
||||||
|
|
||||||
dispoSet = td.getDispositionSet()
|
dispoSet = td.getDispositionSet()
|
||||||
|
audioLayout = td.getAudioLayout()
|
||||||
row = (td.getIndex(),
|
row = (td.getIndex(),
|
||||||
trackType.label(),
|
trackType.label(),
|
||||||
typeCounter[trackType],
|
typeCounter[trackType],
|
||||||
td.getAudioLayout().label() if trackType == TrackType.AUDIO else ' ',
|
td.getCodec(),
|
||||||
|
audioLayout.label() if trackType == TrackType.AUDIO
|
||||||
|
and audioLayout != AudioLayout.LAYOUT_UNDEFINED else ' ',
|
||||||
td.getLanguage().label(),
|
td.getLanguage().label(),
|
||||||
td.getTitle(),
|
td.getTitle(),
|
||||||
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
|
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
|
||||||
@@ -341,7 +345,8 @@ class MediaDetailsScreen(Screen):
|
|||||||
# Define the columns with headers
|
# Define the columns with headers
|
||||||
self.column_key_track_index = self.tracksTable.add_column("Index", width=5)
|
self.column_key_track_index = self.tracksTable.add_column("Index", width=5)
|
||||||
self.column_key_track_type = self.tracksTable.add_column("Type", width=10)
|
self.column_key_track_type = self.tracksTable.add_column("Type", width=10)
|
||||||
self.column_key_track_sub_index = self.tracksTable.add_column("Subindex", width=5)
|
self.column_key_track_sub_index = self.tracksTable.add_column("SubIndex", width=8)
|
||||||
|
self.column_key_track_codec = self.tracksTable.add_column("Codec", width=10)
|
||||||
self.column_key_track_layout = self.tracksTable.add_column("Layout", width=10)
|
self.column_key_track_layout = self.tracksTable.add_column("Layout", width=10)
|
||||||
self.column_key_track_language = self.tracksTable.add_column("Language", width=15)
|
self.column_key_track_language = self.tracksTable.add_column("Language", width=15)
|
||||||
self.column_key_track_title = self.tracksTable.add_column("Title", width=48)
|
self.column_key_track_title = self.tracksTable.add_column("Title", width=48)
|
||||||
@@ -419,14 +424,12 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
if event.button.id == "pattern_button":
|
if event.button.id == "pattern_button":
|
||||||
|
|
||||||
INDICATOR_PATTERN = '([sS][0-9]+[eE][0-9]+)'
|
|
||||||
|
|
||||||
pattern = self.query_one("#pattern_input", Input).value
|
pattern = self.query_one("#pattern_input", Input).value
|
||||||
|
|
||||||
patternMatch = re.search(INDICATOR_PATTERN, pattern)
|
patternMatch = re.search(FileProperties.SE_INDICATOR_PATTERN, pattern)
|
||||||
|
|
||||||
if patternMatch:
|
if patternMatch:
|
||||||
self.query_one("#pattern_input", Input).value = pattern.replace(patternMatch.group(1), INDICATOR_PATTERN)
|
self.query_one("#pattern_input", Input).value = pattern.replace(patternMatch.group(1), FileProperties.SE_INDICATOR_PATTERN)
|
||||||
|
|
||||||
|
|
||||||
if event.button.id == "select_default_button":
|
if event.button.id == "select_default_button":
|
||||||
@@ -452,10 +455,12 @@ class MediaDetailsScreen(Screen):
|
|||||||
selected_track_data = self.tracksTable.get_row(row_key)
|
selected_track_data = self.tracksTable.get_row(row_key)
|
||||||
|
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self.context
|
||||||
kwargs[TrackDescriptor.INDEX_KEY] = int(selected_track_data[0])
|
kwargs[TrackDescriptor.INDEX_KEY] = int(selected_track_data[0])
|
||||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.fromLabel(selected_track_data[1])
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.fromLabel(selected_track_data[1])
|
||||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = int(selected_track_data[2])
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = int(selected_track_data[2])
|
||||||
|
kwargs[TrackDescriptor.CODEC_NAME_KEY] = int(selected_track_data[3])
|
||||||
|
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(selected_track_data[4])
|
||||||
|
|
||||||
return TrackDescriptor(**kwargs)
|
return TrackDescriptor(**kwargs)
|
||||||
else:
|
else:
|
||||||
@@ -504,7 +509,7 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
if patternDescriptor:
|
if patternDescriptor:
|
||||||
patternId = self.__pc.addPattern(patternDescriptor)
|
patternId = self.__pc.addPattern(patternDescriptor)
|
||||||
|
if patternId:
|
||||||
self.highlightPattern(False)
|
self.highlightPattern(False)
|
||||||
|
|
||||||
for tagKey, tagValue in self.__currentMediaDescriptor.getTags().items():
|
for tagKey, tagValue in self.__currentMediaDescriptor.getTags().items():
|
||||||
@@ -624,9 +629,7 @@ class MediaDetailsScreen(Screen):
|
|||||||
|
|
||||||
def action_edit_pattern(self):
|
def action_edit_pattern(self):
|
||||||
|
|
||||||
patternDescriptor = {}
|
patternDescriptor = self.getPatternDescriptorFromInput()
|
||||||
patternDescriptor['show_id'] = self.getSelectedShowDescriptor().getId()
|
|
||||||
patternDescriptor['pattern'] = self.getPatternFromInput()
|
|
||||||
|
|
||||||
if patternDescriptor['pattern']:
|
if patternDescriptor['pattern']:
|
||||||
|
|
||||||
|
|||||||
@@ -41,9 +41,9 @@ class Pattern(Base):
|
|||||||
def getShowId(self):
|
def getShowId(self):
|
||||||
return int(self.show_id)
|
return int(self.show_id)
|
||||||
|
|
||||||
def getShowDescriptor(self) -> ShowDescriptor:
|
def getShowDescriptor(self, context) -> ShowDescriptor:
|
||||||
click.echo(f"self.show {self.show} id={self.show_id}")
|
# click.echo(f"self.show {self.show} id={self.show_id}")
|
||||||
return self.show.getDescriptor()
|
return self.show.getDescriptor(context)
|
||||||
|
|
||||||
def getId(self):
|
def getId(self):
|
||||||
return int(self.id)
|
return int(self.id)
|
||||||
@@ -55,11 +55,13 @@ class Pattern(Base):
|
|||||||
return {str(t.key):str(t.value) for t in self.media_tags}
|
return {str(t.key):str(t.value) for t in self.media_tags}
|
||||||
|
|
||||||
|
|
||||||
def getMediaDescriptor(self):
|
def getMediaDescriptor(self, context):
|
||||||
|
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
kwargs[MediaDescriptor.TAGS_KEY] = self.getTags()
|
|
||||||
|
|
||||||
|
kwargs[MediaDescriptor.CONTEXT_KEY] = context
|
||||||
|
|
||||||
|
kwargs[MediaDescriptor.TAGS_KEY] = self.getTags()
|
||||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = []
|
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = []
|
||||||
|
|
||||||
# Set ordered subindices
|
# Set ordered subindices
|
||||||
@@ -68,7 +70,7 @@ class Pattern(Base):
|
|||||||
trackType = track.getType()
|
trackType = track.getType()
|
||||||
if not trackType in subIndexCounter.keys():
|
if not trackType in subIndexCounter.keys():
|
||||||
subIndexCounter[trackType] = 0
|
subIndexCounter[trackType] = 0
|
||||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY].append(track.getDescriptor(subIndex = subIndexCounter[trackType]))
|
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY].append(track.getDescriptor(context, subIndex = subIndexCounter[trackType]))
|
||||||
subIndexCounter[trackType] += 1
|
subIndexCounter[trackType] += 1
|
||||||
|
|
||||||
return MediaDescriptor(**kwargs)
|
return MediaDescriptor(**kwargs)
|
||||||
|
|||||||
@@ -44,10 +44,10 @@ class Show(Base):
|
|||||||
indicator_episode_digits = Column(Integer, default=ShowDescriptor.DEFAULT_INDICATOR_EPISODE_DIGITS)
|
indicator_episode_digits = Column(Integer, default=ShowDescriptor.DEFAULT_INDICATOR_EPISODE_DIGITS)
|
||||||
|
|
||||||
|
|
||||||
def getDescriptor(self):
|
def getDescriptor(self, context):
|
||||||
|
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
|
kwargs[ShowDescriptor.CONTEXT_KEY] = context
|
||||||
kwargs[ShowDescriptor.ID_KEY] = int(self.id)
|
kwargs[ShowDescriptor.ID_KEY] = int(self.id)
|
||||||
kwargs[ShowDescriptor.NAME_KEY] = str(self.name)
|
kwargs[ShowDescriptor.NAME_KEY] = str(self.name)
|
||||||
kwargs[ShowDescriptor.YEAR_KEY] = int(self.year)
|
kwargs[ShowDescriptor.YEAR_KEY] = int(self.year)
|
||||||
|
|||||||
@@ -153,7 +153,7 @@ class Track(Base):
|
|||||||
return TrackType.fromIndex(self.track_type)
|
return TrackType.fromIndex(self.track_type)
|
||||||
|
|
||||||
def getCodec(self):
|
def getCodec(self):
|
||||||
return str(self.codec_name)
|
return self.codec_name
|
||||||
|
|
||||||
def getIndex(self):
|
def getIndex(self):
|
||||||
return int(self.index) if self.index is not None else -1
|
return int(self.index) if self.index is not None else -1
|
||||||
@@ -189,10 +189,13 @@ class Track(Base):
|
|||||||
return bool(self.disposition_flags & 2**disposition.index())
|
return bool(self.disposition_flags & 2**disposition.index())
|
||||||
|
|
||||||
|
|
||||||
def getDescriptor(self, subIndex : int = -1) -> TrackDescriptor:
|
def getDescriptor(self, context = None, subIndex : int = -1) -> TrackDescriptor:
|
||||||
|
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
|
|
||||||
|
if not context is None:
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = context
|
||||||
|
|
||||||
kwargs[TrackDescriptor.ID_KEY] = self.getId()
|
kwargs[TrackDescriptor.ID_KEY] = self.getId()
|
||||||
kwargs[TrackDescriptor.PATTERN_ID_KEY] = self.getPatternId()
|
kwargs[TrackDescriptor.PATTERN_ID_KEY] = self.getPatternId()
|
||||||
|
|
||||||
|
|||||||
142
bin/ffx/nlmeans_controller.py
Normal file
142
bin/ffx/nlmeans_controller.py
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
class NlmeansController():
|
||||||
|
"""
|
||||||
|
s: double
|
||||||
|
|
||||||
|
Denoising strength (from 1 to 30) (default 1)
|
||||||
|
Trade-off between noise removal and detail retention. Comparable to gaussian sigma.
|
||||||
|
|
||||||
|
p: int patch size (from 0 to 99) (default 7)
|
||||||
|
|
||||||
|
Catches larger areas reducing broader noise patterns, but costly
|
||||||
|
|
||||||
|
pc: int patch size for chroma planes (from 0 to 99) (default 0)
|
||||||
|
|
||||||
|
r: int research window (from 0 to 99) (default 15)
|
||||||
|
|
||||||
|
Range to search for comparable patches.
|
||||||
|
Better filtering but costly
|
||||||
|
|
||||||
|
rc: int research window for chroma planes (from 0 to 99) (default 0)
|
||||||
|
|
||||||
|
Good values to denoise film grain that was subobtimally encoded:
|
||||||
|
strength: float = 2.8
|
||||||
|
patchSize: int = 12
|
||||||
|
chromaPatchSize: int = 8
|
||||||
|
researchWindow: int = 22
|
||||||
|
chromaResearchWindow: int= 16
|
||||||
|
"""
|
||||||
|
|
||||||
|
DEFAULT_STRENGTH: float = 2.8
|
||||||
|
DEFAULT_PATCH_SIZE: int = 13
|
||||||
|
DEFAULT_CHROMA_PATCH_SIZE: int = 9
|
||||||
|
DEFAULT_RESEARCH_WINDOW: int = 23
|
||||||
|
DEFAULT_CHROMA_RESEARCH_WINDOW: int= 17
|
||||||
|
|
||||||
|
def __init__(self,
|
||||||
|
parameters: str = "none",
|
||||||
|
strength: str = "",
|
||||||
|
patchSize: str = "",
|
||||||
|
chromaPatchSize: str = "",
|
||||||
|
researchWindow: str = "",
|
||||||
|
chromaResearchWindow: str = "",
|
||||||
|
useHardware: bool = False):
|
||||||
|
|
||||||
|
self.__isActive = (parameters != "none"
|
||||||
|
or strength
|
||||||
|
or patchSize
|
||||||
|
or chromaPatchSize
|
||||||
|
or researchWindow
|
||||||
|
or chromaResearchWindow)
|
||||||
|
self.__useHardware = useHardware
|
||||||
|
|
||||||
|
parameterTokens = parameters.split(',')
|
||||||
|
|
||||||
|
self.__strengthList = []
|
||||||
|
if strength:
|
||||||
|
strengthTokens = strength.split(',')
|
||||||
|
for st in strengthTokens:
|
||||||
|
try:
|
||||||
|
strengthValue = float(st)
|
||||||
|
except:
|
||||||
|
raise ValueError('NlmeansController: Strength value has to be of type float')
|
||||||
|
if strengthValue < 1.0 or strengthValue > 30.0:
|
||||||
|
raise ValueError('NlmeansController: Strength value has to be between 1.0 and 30.0')
|
||||||
|
self.__strengthList.append(strengthValue)
|
||||||
|
else:
|
||||||
|
self.__strengthList = [NlmeansController.DEFAULT_STRENGTH]
|
||||||
|
|
||||||
|
self.__patchSizeList = []
|
||||||
|
if patchSize:
|
||||||
|
patchSizeTokens = patchSize.split(',')
|
||||||
|
for pst in patchSizeTokens:
|
||||||
|
try:
|
||||||
|
patchSizeValue = int(pst)
|
||||||
|
except:
|
||||||
|
raise ValueError('NlmeansController: Patch size value has to be of type int')
|
||||||
|
if patchSizeValue < 0 or patchSizeValue > 99:
|
||||||
|
raise ValueError('NlmeansController: Patch size value has to be between 0 and 99')
|
||||||
|
if patchSizeValue % 2 == 0:
|
||||||
|
raise ValueError('NlmeansController: Patch size value has to an odd number')
|
||||||
|
self.__patchSizeList.append(patchSizeValue)
|
||||||
|
else:
|
||||||
|
self.__patchSizeList = [NlmeansController.DEFAULT_PATCH_SIZE]
|
||||||
|
|
||||||
|
self.__chromaPatchSizeList = []
|
||||||
|
if chromaPatchSize:
|
||||||
|
chromaPatchSizeTokens = chromaPatchSize.split(',')
|
||||||
|
for cpst in chromaPatchSizeTokens:
|
||||||
|
try:
|
||||||
|
chromaPatchSizeValue = int(pst)
|
||||||
|
except:
|
||||||
|
raise ValueError('NlmeansController: Chroma patch size value has to be of type int')
|
||||||
|
if chromaPatchSizeValue < 0 or chromaPatchSizeValue > 99:
|
||||||
|
raise ValueError('NlmeansController: Chroma patch value has to be between 0 and 99')
|
||||||
|
if chromaPatchSizeValue % 2 == 0:
|
||||||
|
raise ValueError('NlmeansController: Chroma patch value has to an odd number')
|
||||||
|
self.__chromaPatchSizeList.append(chromaPatchSizeValue)
|
||||||
|
else:
|
||||||
|
self.__chromaPatchSizeList = [NlmeansController.DEFAULT_CHROMA_PATCH_SIZE]
|
||||||
|
|
||||||
|
self.__researchWindowList = []
|
||||||
|
if researchWindow:
|
||||||
|
researchWindowTokens = researchWindow.split(',')
|
||||||
|
for rwt in researchWindowTokens:
|
||||||
|
try:
|
||||||
|
researchWindowValue = int(rwt)
|
||||||
|
except:
|
||||||
|
raise ValueError('NlmeansController: Research window value has to be of type int')
|
||||||
|
if researchWindowValue < 0 or researchWindowValue > 99:
|
||||||
|
raise ValueError('NlmeansController: Research window value has to be between 0 and 99')
|
||||||
|
if researchWindowValue % 2 == 0:
|
||||||
|
raise ValueError('NlmeansController: Research window value has to an odd number')
|
||||||
|
self.__researchWindowList.append(researchWindowValue)
|
||||||
|
else:
|
||||||
|
self.__researchWindowList = [NlmeansController.DEFAULT_RESEARCH_WINDOW]
|
||||||
|
|
||||||
|
self.__chromaResearchWindowList = []
|
||||||
|
if chromaResearchWindow:
|
||||||
|
chromaResearchWindowTokens = chromaResearchWindow.split(',')
|
||||||
|
for crwt in chromaResearchWindowTokens:
|
||||||
|
try:
|
||||||
|
chromaResearchWindowValue = int(crwt)
|
||||||
|
except:
|
||||||
|
raise ValueError('NlmeansController: Chroma research window value has to be of type int')
|
||||||
|
if chromaResearchWindowValue < 0 or chromaResearchWindowValue > 99:
|
||||||
|
raise ValueError('NlmeansController: Chroma research window value has to be between 0 and 99')
|
||||||
|
if chromaResearchWindowValue % 2 == 0:
|
||||||
|
raise ValueError('NlmeansController: Chroma research window value has to an odd number')
|
||||||
|
self.__chromaResearchWindowList.append(chromaResearchWindowValue)
|
||||||
|
else:
|
||||||
|
self.__chromaResearchWindowList = [NlmeansController.DEFAULT_CHROMA_RESEARCH_WINDOW]
|
||||||
|
|
||||||
|
def isActive(self):
|
||||||
|
return self.__isActive
|
||||||
|
|
||||||
|
def generateDenoiseTokens(self):
|
||||||
|
filterName = 'nlmeans_opencl' if self.__useHardware else 'nlmeans'
|
||||||
|
return ['-vf', f"{filterName}=s={self.__strengthList[0]}"
|
||||||
|
+ f":p={self.__patchSizeList[0]}"
|
||||||
|
+ f":pc={self.__chromaPatchSizeList[0]}"
|
||||||
|
+ f":r={self.__researchWindowList[0]}"
|
||||||
|
+ f":rc={self.__chromaResearchWindowList[0]}"] if self.__isActive else []
|
||||||
|
|
||||||
@@ -16,7 +16,8 @@ class PatternController():
|
|||||||
try:
|
try:
|
||||||
|
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']))
|
q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']),
|
||||||
|
Pattern.pattern == str(patternDescriptor['pattern']))
|
||||||
|
|
||||||
if not q.count():
|
if not q.count():
|
||||||
pattern = Pattern(show_id = int(patternDescriptor['show_id']),
|
pattern = Pattern(show_id = int(patternDescriptor['show_id']),
|
||||||
@@ -25,7 +26,7 @@ class PatternController():
|
|||||||
s.commit()
|
s.commit()
|
||||||
return pattern.getId()
|
return pattern.getId()
|
||||||
else:
|
else:
|
||||||
return None
|
return 0
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"PatternController.addPattern(): {repr(ex)}")
|
raise click.ClickException(f"PatternController.addPattern(): {repr(ex)}")
|
||||||
@@ -116,7 +117,8 @@ class PatternController():
|
|||||||
s.close()
|
s.close()
|
||||||
|
|
||||||
|
|
||||||
def matchFilename(self, filename : str) -> re.Match:
|
def matchFilename(self, filename : str) -> dict:
|
||||||
|
"""Returns dict {'match': <a regex match obj>, 'pattern': <ffx pattern obj>} or empty dict of no pattern was found"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
s = self.Session()
|
s = self.Session()
|
||||||
@@ -126,7 +128,7 @@ class PatternController():
|
|||||||
|
|
||||||
for pattern in q.all():
|
for pattern in q.all():
|
||||||
patternMatch = re.search(str(pattern.pattern), str(filename))
|
patternMatch = re.search(str(pattern.pattern), str(filename))
|
||||||
if patternMatch:
|
if patternMatch is not None:
|
||||||
matchResult['match'] = patternMatch
|
matchResult['match'] = patternMatch
|
||||||
matchResult['pattern'] = pattern
|
matchResult['pattern'] = pattern
|
||||||
|
|
||||||
@@ -137,18 +139,18 @@ class PatternController():
|
|||||||
finally:
|
finally:
|
||||||
s.close()
|
s.close()
|
||||||
|
|
||||||
def getMediaDescriptor(self, patternId):
|
# def getMediaDescriptor(self, context, patternId):
|
||||||
|
#
|
||||||
try:
|
# try:
|
||||||
s = self.Session()
|
# s = self.Session()
|
||||||
q = s.query(Pattern).filter(Pattern.id == int(patternId))
|
# q = s.query(Pattern).filter(Pattern.id == int(patternId))
|
||||||
|
#
|
||||||
if q.count():
|
# if q.count():
|
||||||
return q.first().getMediaDescriptor()
|
# return q.first().getMediaDescriptor(context)
|
||||||
else:
|
# else:
|
||||||
return None
|
# return None
|
||||||
|
#
|
||||||
except Exception as ex:
|
# except Exception as ex:
|
||||||
raise click.ClickException(f"PatternController.getPatternDescriptor(): {repr(ex)}")
|
# raise click.ClickException(f"PatternController.getMediaDescriptor(): {repr(ex)}")
|
||||||
finally:
|
# finally:
|
||||||
s.close()
|
# s.close()
|
||||||
@@ -7,6 +7,8 @@ from textual.containers import Grid
|
|||||||
from .show_controller import ShowController
|
from .show_controller import ShowController
|
||||||
from .pattern_controller import PatternController
|
from .pattern_controller import PatternController
|
||||||
|
|
||||||
|
from ffx.model.pattern import Pattern
|
||||||
|
|
||||||
|
|
||||||
# Screen[dict[int, str, int]]
|
# Screen[dict[int, str, int]]
|
||||||
class PatternDeleteScreen(Screen):
|
class PatternDeleteScreen(Screen):
|
||||||
@@ -51,16 +53,16 @@ class PatternDeleteScreen(Screen):
|
|||||||
self.__pc = PatternController(context = self.context)
|
self.__pc = PatternController(context = self.context)
|
||||||
self.__sc = ShowController(context = self.context)
|
self.__sc = ShowController(context = self.context)
|
||||||
|
|
||||||
self.pattern_id = patternId
|
self.__patternId = patternId
|
||||||
self.pattern_obj = self.__pc.getPatternDescriptor(patternId) if patternId is not None else {}
|
self.__pattern: Pattern = self.__pc.getPattern(patternId) if patternId is not None else {}
|
||||||
self.show_obj = self.__sc.getShowDescriptor(showId) if showId is not None else {}
|
self.__showDescriptor = self.__sc.getShowDescriptor(showId) if showId is not None else {}
|
||||||
|
|
||||||
|
|
||||||
def on_mount(self):
|
def on_mount(self):
|
||||||
if self.show_obj:
|
if self.__showDescriptor:
|
||||||
self.query_one("#showlabel", Static).update(f"{self.show_obj['id']} - {self.show_obj['name']} ({self.show_obj['year']})")
|
self.query_one("#showlabel", Static).update(f"{self.__showDescriptor.getId()} - {self.__showDescriptor.getName()} ({self.__showDescriptor.getYear()})")
|
||||||
if self.pattern_obj:
|
if not self.__pattern is None:
|
||||||
self.query_one("#patternlabel", Static).update(str(self.pattern_obj['pattern']))
|
self.query_one("#patternlabel", Static).update(str(self.__pattern.pattern))
|
||||||
|
|
||||||
|
|
||||||
def compose(self):
|
def compose(self):
|
||||||
@@ -94,11 +96,11 @@ class PatternDeleteScreen(Screen):
|
|||||||
|
|
||||||
if event.button.id == "delete_button":
|
if event.button.id == "delete_button":
|
||||||
|
|
||||||
if self.pattern_id is None:
|
if self.__patternId is None:
|
||||||
raise click.ClickException('PatternDeleteScreen.on_button_pressed(): pattern id is undefined')
|
raise click.ClickException('PatternDeleteScreen.on_button_pressed(): pattern id is undefined')
|
||||||
|
|
||||||
if self.__pc.deletePattern(self.pattern_id):
|
if self.__pc.deletePattern(self.__patternId):
|
||||||
self.dismiss(self.pattern_obj)
|
self.dismiss(self.__pattern)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
#TODO: Meldung
|
#TODO: Meldung
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import click, re
|
import click, re
|
||||||
|
from typing import List
|
||||||
|
|
||||||
from textual import events
|
from textual import events
|
||||||
from textual.app import App, ComposeResult
|
from textual.app import App, ComposeResult
|
||||||
@@ -28,6 +29,10 @@ from ffx.track_descriptor import TrackDescriptor
|
|||||||
|
|
||||||
from textual.widgets._data_table import CellDoesNotExist
|
from textual.widgets._data_table import CellDoesNotExist
|
||||||
|
|
||||||
|
from ffx.file_properties import FileProperties
|
||||||
|
from ffx.iso_language import IsoLanguage
|
||||||
|
from ffx.audio_layout import AudioLayout
|
||||||
|
|
||||||
|
|
||||||
# Screen[dict[int, str, int]]
|
# Screen[dict[int, str, int]]
|
||||||
class PatternDetailsScreen(Screen):
|
class PatternDetailsScreen(Screen):
|
||||||
@@ -35,9 +40,9 @@ class PatternDetailsScreen(Screen):
|
|||||||
CSS = """
|
CSS = """
|
||||||
|
|
||||||
Grid {
|
Grid {
|
||||||
grid-size: 5 13;
|
grid-size: 7 13;
|
||||||
grid-rows: 2 2 2 2 2 8 2 2 8 2 2 2 2;
|
grid-rows: 2 2 2 2 2 8 2 2 8 2 2 2 2;
|
||||||
grid-columns: 25 25 25 25 25;
|
grid-columns: 25 25 25 25 25 25 25;
|
||||||
height: 100%;
|
height: 100%;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
padding: 1;
|
padding: 1;
|
||||||
@@ -68,6 +73,12 @@ class PatternDetailsScreen(Screen):
|
|||||||
.five {
|
.five {
|
||||||
column-span: 5;
|
column-span: 5;
|
||||||
}
|
}
|
||||||
|
.six {
|
||||||
|
column-span: 6;
|
||||||
|
}
|
||||||
|
.seven {
|
||||||
|
column-span: 7;
|
||||||
|
}
|
||||||
|
|
||||||
.box {
|
.box {
|
||||||
height: 100%;
|
height: 100%;
|
||||||
@@ -124,9 +135,10 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
typeCounter = {}
|
typeCounter = {}
|
||||||
|
|
||||||
|
tr: Track
|
||||||
for tr in tracks:
|
for tr in tracks:
|
||||||
|
|
||||||
td : TrackDescriptor = tr.getDescriptor()
|
td : TrackDescriptor = tr.getDescriptor(self.context)
|
||||||
|
|
||||||
trackType = td.getType()
|
trackType = td.getType()
|
||||||
if not trackType in typeCounter.keys():
|
if not trackType in typeCounter.keys():
|
||||||
@@ -134,19 +146,61 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
dispoSet = td.getDispositionSet()
|
dispoSet = td.getDispositionSet()
|
||||||
|
|
||||||
|
trackLanguage = td.getLanguage()
|
||||||
|
audioLayout = td.getAudioLayout()
|
||||||
row = (td.getIndex(),
|
row = (td.getIndex(),
|
||||||
trackType.label(),
|
trackType.label(),
|
||||||
typeCounter[trackType],
|
typeCounter[trackType],
|
||||||
td.getAudioLayout().label() if trackType == TrackType.AUDIO else ' ',
|
td.getCodec(),
|
||||||
td.getLanguage().label(),
|
audioLayout.label() if trackType == TrackType.AUDIO
|
||||||
|
and audioLayout != AudioLayout.LAYOUT_UNDEFINED else ' ',
|
||||||
|
trackLanguage.label() if trackLanguage != IsoLanguage.UNDEFINED else ' ',
|
||||||
td.getTitle(),
|
td.getTitle(),
|
||||||
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
|
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
|
||||||
'Yes' if TrackDisposition.FORCED in dispoSet else 'No')
|
'Yes' if TrackDisposition.FORCED in dispoSet else 'No',
|
||||||
|
td.getSourceIndex())
|
||||||
|
|
||||||
self.tracksTable.add_row(*map(str, row))
|
self.tracksTable.add_row(*map(str, row))
|
||||||
|
|
||||||
typeCounter[trackType] += 1
|
typeCounter[trackType] += 1
|
||||||
|
|
||||||
|
|
||||||
|
def swapTracks(self, trackIndex1: int, trackIndex2: int):
|
||||||
|
|
||||||
|
ti1 = int(trackIndex1)
|
||||||
|
ti2 = int(trackIndex2)
|
||||||
|
|
||||||
|
siblingDescriptors: List[TrackDescriptor] = self.__tc.findSiblingDescriptors(self.__pattern.getId())
|
||||||
|
|
||||||
|
numSiblings = len(siblingDescriptors)
|
||||||
|
|
||||||
|
if ti1 < 0 or ti1 >= numSiblings:
|
||||||
|
raise ValueError(f"PatternDetailsScreen.swapTracks(): trackIndex1 ({ti1}) is out of range ({numSiblings})")
|
||||||
|
|
||||||
|
if ti2 < 0 or ti2 >= numSiblings:
|
||||||
|
raise ValueError(f"PatternDetailsScreen.swapTracks(): trackIndex2 ({ti2}) is out of range ({numSiblings})")
|
||||||
|
|
||||||
|
sibling1 = siblingDescriptors[trackIndex1]
|
||||||
|
sibling2 = siblingDescriptors[trackIndex2]
|
||||||
|
|
||||||
|
# raise click.ClickException(f"siblings id1={sibling1.getId()} id2={sibling2.getId()}")
|
||||||
|
|
||||||
|
subIndex2 = sibling2.getSubIndex()
|
||||||
|
|
||||||
|
sibling2.setIndex(sibling1.getIndex())
|
||||||
|
sibling2.setSubIndex(sibling1.getSubIndex())
|
||||||
|
|
||||||
|
sibling1.setIndex(trackIndex2)
|
||||||
|
sibling1.setSubIndex(subIndex2)
|
||||||
|
|
||||||
|
if not self.__tc.updateTrack(sibling1.getId(), sibling1):
|
||||||
|
raise click.ClickException('Update sibling1 failed')
|
||||||
|
if not self.__tc.updateTrack(sibling2.getId(), sibling2):
|
||||||
|
raise click.ClickException('Update sibling2 failed')
|
||||||
|
|
||||||
|
self.updateTracks()
|
||||||
|
|
||||||
|
|
||||||
def updateTags(self):
|
def updateTags(self):
|
||||||
|
|
||||||
self.tagsTable.clear()
|
self.tagsTable.clear()
|
||||||
@@ -179,7 +233,7 @@ class PatternDetailsScreen(Screen):
|
|||||||
def compose(self):
|
def compose(self):
|
||||||
|
|
||||||
|
|
||||||
self.tagsTable = DataTable(classes="five")
|
self.tagsTable = DataTable(classes="seven")
|
||||||
|
|
||||||
# Define the columns with headers
|
# Define the columns with headers
|
||||||
self.column_key_tag_key = self.tagsTable.add_column("Key", width=10)
|
self.column_key_tag_key = self.tagsTable.add_column("Key", width=10)
|
||||||
@@ -188,16 +242,18 @@ class PatternDetailsScreen(Screen):
|
|||||||
self.tagsTable.cursor_type = 'row'
|
self.tagsTable.cursor_type = 'row'
|
||||||
|
|
||||||
|
|
||||||
self.tracksTable = DataTable(id="tracks_table", classes="five")
|
self.tracksTable = DataTable(id="tracks_table", classes="seven")
|
||||||
|
|
||||||
self.column_key_track_index = self.tracksTable.add_column("Index", width=5)
|
self.column_key_track_index = self.tracksTable.add_column("Index", width=5)
|
||||||
self.column_key_track_type = self.tracksTable.add_column("Type", width=10)
|
self.column_key_track_type = self.tracksTable.add_column("Type", width=10)
|
||||||
self.column_key_track_sub_index = self.tracksTable.add_column("Subindex", width=5)
|
self.column_key_track_sub_index = self.tracksTable.add_column("SubIndex", width=8)
|
||||||
|
self.column_key_track_codec = self.tracksTable.add_column("Codec", width=10)
|
||||||
self.column_key_track_audio_layout = self.tracksTable.add_column("Layout", width=10)
|
self.column_key_track_audio_layout = self.tracksTable.add_column("Layout", width=10)
|
||||||
self.column_key_track_language = self.tracksTable.add_column("Language", width=15)
|
self.column_key_track_language = self.tracksTable.add_column("Language", width=15)
|
||||||
self.column_key_track_title = self.tracksTable.add_column("Title", width=48)
|
self.column_key_track_title = self.tracksTable.add_column("Title", width=48)
|
||||||
self.column_key_track_default = self.tracksTable.add_column("Default", width=8)
|
self.column_key_track_default = self.tracksTable.add_column("Default", width=8)
|
||||||
self.column_key_track_forced = self.tracksTable.add_column("Forced", width=8)
|
self.column_key_track_forced = self.tracksTable.add_column("Forced", width=8)
|
||||||
|
self.column_key_track_source_index = self.tracksTable.add_column("SrcIndex", width=8)
|
||||||
|
|
||||||
self.tracksTable.cursor_type = 'row'
|
self.tracksTable.cursor_type = 'row'
|
||||||
|
|
||||||
@@ -208,21 +264,21 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
# 1
|
# 1
|
||||||
yield Static("Edit filename pattern" if self.__pattern is not None else "New filename pattern", id="toplabel")
|
yield Static("Edit filename pattern" if self.__pattern is not None else "New filename pattern", id="toplabel")
|
||||||
yield Input(type="text", id="pattern_input", classes="four")
|
yield Input(type="text", id="pattern_input", classes="six")
|
||||||
|
|
||||||
# 2
|
# 2
|
||||||
yield Static("from show")
|
yield Static("from show")
|
||||||
yield Static("", id="showlabel", classes="three")
|
yield Static("", id="showlabel", classes="five")
|
||||||
yield Button("Substitute pattern", id="pattern_button")
|
yield Button("Substitute pattern", id="pattern_button")
|
||||||
|
|
||||||
# 3
|
# 3
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="seven")
|
||||||
# 4
|
# 4
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
# 5
|
# 5
|
||||||
yield Static("Media Tags")
|
yield Static("Media Tags")
|
||||||
yield Static(" ")
|
|
||||||
|
|
||||||
if self.__pattern is not None:
|
if self.__pattern is not None:
|
||||||
yield Button("Add", id="button_add_tag")
|
yield Button("Add", id="button_add_tag")
|
||||||
@@ -232,15 +288,20 @@ class PatternDetailsScreen(Screen):
|
|||||||
yield Static(" ")
|
yield Static(" ")
|
||||||
yield Static(" ")
|
yield Static(" ")
|
||||||
yield Static(" ")
|
yield Static(" ")
|
||||||
|
|
||||||
|
yield Static(" ")
|
||||||
|
yield Static(" ")
|
||||||
|
yield Static(" ")
|
||||||
|
|
||||||
# 6
|
# 6
|
||||||
yield self.tagsTable
|
yield self.tagsTable
|
||||||
|
|
||||||
# 7
|
# 7
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
# 8
|
# 8
|
||||||
yield Static("Streams")
|
yield Static("Streams")
|
||||||
yield Static(" ")
|
|
||||||
|
|
||||||
if self.__pattern is not None:
|
if self.__pattern is not None:
|
||||||
yield Button("Add", id="button_add_track")
|
yield Button("Add", id="button_add_track")
|
||||||
@@ -250,22 +311,27 @@ class PatternDetailsScreen(Screen):
|
|||||||
yield Static(" ")
|
yield Static(" ")
|
||||||
yield Static(" ")
|
yield Static(" ")
|
||||||
yield Static(" ")
|
yield Static(" ")
|
||||||
|
|
||||||
|
yield Static(" ")
|
||||||
|
yield Button("Up", id="button_track_up")
|
||||||
|
yield Button("Down", id="button_track_down")
|
||||||
|
|
||||||
# 9
|
# 9
|
||||||
yield self.tracksTable
|
yield self.tracksTable
|
||||||
|
|
||||||
# 10
|
# 10
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
# 11
|
# 11
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
# 12
|
# 12
|
||||||
yield Button("Save", id="save_button")
|
yield Button("Save", id="save_button")
|
||||||
yield Button("Cancel", id="cancel_button")
|
yield Button("Cancel", id="cancel_button")
|
||||||
yield Static(" ", classes="three")
|
yield Static(" ", classes="five")
|
||||||
|
|
||||||
# 13
|
# 13
|
||||||
yield Static(" ", classes="five")
|
yield Static(" ", classes="seven")
|
||||||
|
|
||||||
yield Footer()
|
yield Footer()
|
||||||
|
|
||||||
@@ -292,7 +358,7 @@ class PatternDetailsScreen(Screen):
|
|||||||
trackIndex = int(selected_track_data[0])
|
trackIndex = int(selected_track_data[0])
|
||||||
trackSubIndex = int(selected_track_data[2])
|
trackSubIndex = int(selected_track_data[2])
|
||||||
|
|
||||||
return self.__tc.getTrack(self.__pattern.getId(), trackIndex).getDescriptor(subIndex=trackSubIndex)
|
return self.__tc.getTrack(self.__pattern.getId(), trackIndex).getDescriptor(self.context, subIndex=trackSubIndex)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
@@ -345,8 +411,7 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
patternId = self.__pc.addPattern(patternDescriptor)
|
patternId = self.__pc.addPattern(patternDescriptor)
|
||||||
if patternId is not None:
|
if patternId:
|
||||||
|
|
||||||
self.dismiss(patternDescriptor)
|
self.dismiss(patternDescriptor)
|
||||||
else:
|
else:
|
||||||
#TODO: Meldung
|
#TODO: Meldung
|
||||||
@@ -388,14 +453,33 @@ class PatternDetailsScreen(Screen):
|
|||||||
|
|
||||||
if event.button.id == "pattern_button":
|
if event.button.id == "pattern_button":
|
||||||
|
|
||||||
INDICATOR_PATTERN = '([sS][0-9]+[eE][0-9]+)'
|
|
||||||
|
|
||||||
pattern = self.query_one("#pattern_input", Input).value
|
pattern = self.query_one("#pattern_input", Input).value
|
||||||
|
|
||||||
patternMatch = re.search(INDICATOR_PATTERN, pattern)
|
patternMatch = re.search(FileProperties.SE_INDICATOR_PATTERN, pattern)
|
||||||
|
|
||||||
if patternMatch:
|
if patternMatch:
|
||||||
self.query_one("#pattern_input", Input).value = pattern.replace(patternMatch.group(1), INDICATOR_PATTERN)
|
self.query_one("#pattern_input", Input).value = pattern.replace(patternMatch.group(1),
|
||||||
|
FileProperties.SE_INDICATOR_PATTERN)
|
||||||
|
|
||||||
|
|
||||||
|
if event.button.id == "button_track_up":
|
||||||
|
|
||||||
|
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
||||||
|
selectedTrackIndex = selectedTrackDescriptor.getIndex()
|
||||||
|
|
||||||
|
if selectedTrackIndex > 0 and selectedTrackIndex < self.tracksTable.row_count:
|
||||||
|
correspondingTrackIndex = selectedTrackIndex - 1
|
||||||
|
self.swapTracks(selectedTrackIndex, correspondingTrackIndex)
|
||||||
|
|
||||||
|
|
||||||
|
if event.button.id == "button_track_down":
|
||||||
|
|
||||||
|
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
||||||
|
selectedTrackIndex = selectedTrackDescriptor.getIndex()
|
||||||
|
|
||||||
|
if selectedTrackIndex >= 0 and selectedTrackIndex < (self.tracksTable.row_count - 1):
|
||||||
|
correspondingTrackIndex = selectedTrackIndex + 1
|
||||||
|
self.swapTracks(selectedTrackIndex, correspondingTrackIndex)
|
||||||
|
|
||||||
|
|
||||||
def handle_add_track(self, trackDescriptor : TrackDescriptor):
|
def handle_add_track(self, trackDescriptor : TrackDescriptor):
|
||||||
|
|||||||
@@ -1,9 +1,32 @@
|
|||||||
import subprocess
|
import subprocess, logging
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
def executeProcess(commandSequence: List[str]):
|
def executeProcess(commandSequence: List[str], directory: str = None, context: dict = None):
|
||||||
# process = subprocess.Popen([t.encode('utf-8') for t in commandSequence], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
"""
|
||||||
process = subprocess.Popen(commandSequence, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
|
niceness -20 bis +19
|
||||||
|
cpu_percent: 1 bis 99
|
||||||
|
"""
|
||||||
|
|
||||||
|
logger = (context['logger'] if not context is None
|
||||||
|
else logging.getLogger('FFX').addHandler(logging.NullHandler()))
|
||||||
|
|
||||||
|
niceSequence = []
|
||||||
|
|
||||||
|
niceness = (int(context['resource_limits']['niceness'])
|
||||||
|
if 'resource_limits' in context.keys() and 'niceness' in context['resource_limits'].keys() else 99)
|
||||||
|
cpu_percent = (int(context['resource_limits']['cpu_percent'])
|
||||||
|
if 'resource_limits' in context.keys() and 'cpu_percent' in context['resource_limits'].keys() else 0)
|
||||||
|
|
||||||
|
if niceness >= -20 and niceness <= 19:
|
||||||
|
niceSequence += ['nice', '-n', str(niceness)]
|
||||||
|
if cpu_percent >= 1:
|
||||||
|
niceSequence += ['cpulimit', '-l', str(cpu_percent), '--']
|
||||||
|
|
||||||
|
niceCommand = niceSequence + commandSequence
|
||||||
|
|
||||||
|
logger.debug(f"executeProcess() command sequence: {' '.join(niceCommand)}")
|
||||||
|
|
||||||
|
process = subprocess.Popen(niceCommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8', cwd = directory)
|
||||||
output, error = process.communicate()
|
output, error = process.communicate()
|
||||||
# return output.decode('utf-8'), error.decode('utf-8'), process.returncode
|
|
||||||
return output, error, process.returncode
|
return output, error, process.returncode
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ class ShowController():
|
|||||||
|
|
||||||
if q.count():
|
if q.count():
|
||||||
show: Show = q.first()
|
show: Show = q.first()
|
||||||
return show.getDescriptor()
|
return show.getDescriptor(self.context)
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"ShowController.getShowDescriptor(): {repr(ex)}")
|
raise click.ClickException(f"ShowController.getShowDescriptor(): {repr(ex)}")
|
||||||
|
|||||||
@@ -1,19 +1,10 @@
|
|||||||
import click
|
import logging
|
||||||
|
|
||||||
from typing import List, Self
|
|
||||||
|
|
||||||
from ffx.track_type import TrackType
|
|
||||||
from ffx.track_disposition import TrackDisposition
|
|
||||||
|
|
||||||
from ffx.track_descriptor import TrackDescriptor
|
|
||||||
|
|
||||||
from ffx.helper import dictDiff, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY
|
|
||||||
|
|
||||||
|
|
||||||
class ShowDescriptor():
|
class ShowDescriptor():
|
||||||
"""This class represents the structural content of a media file including streams and metadata"""
|
"""This class represents the structural content of a media file including streams and metadata"""
|
||||||
|
|
||||||
# CONTEXT_KEY = 'context'
|
CONTEXT_KEY = 'context'
|
||||||
|
|
||||||
ID_KEY = 'id'
|
ID_KEY = 'id'
|
||||||
NAME_KEY = 'name'
|
NAME_KEY = 'name'
|
||||||
@@ -32,6 +23,17 @@ class ShowDescriptor():
|
|||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
|
|
||||||
|
if ShowDescriptor.CONTEXT_KEY in kwargs.keys():
|
||||||
|
if type(kwargs[ShowDescriptor.CONTEXT_KEY]) is not dict:
|
||||||
|
raise TypeError(
|
||||||
|
f"ShowDescriptor.__init__(): Argument {ShowDescriptor.CONTEXT_KEY} is required to be of type dict"
|
||||||
|
)
|
||||||
|
self.__context = kwargs[ShowDescriptor.CONTEXT_KEY]
|
||||||
|
self.__logger = self.__context['logger']
|
||||||
|
else:
|
||||||
|
self.__context = {}
|
||||||
|
self.__logger = logging.getLogger('FFX').addHandler(logging.NullHandler())
|
||||||
|
|
||||||
if ShowDescriptor.ID_KEY in kwargs.keys():
|
if ShowDescriptor.ID_KEY in kwargs.keys():
|
||||||
if type(kwargs[ShowDescriptor.ID_KEY]) is not int:
|
if type(kwargs[ShowDescriptor.ID_KEY]) is not int:
|
||||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.ID_KEY} is required to be of type int")
|
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.ID_KEY} is required to be of type int")
|
||||||
@@ -53,33 +55,34 @@ class ShowDescriptor():
|
|||||||
else:
|
else:
|
||||||
self.__showYear = -1
|
self.__showYear = -1
|
||||||
|
|
||||||
|
|
||||||
if ShowDescriptor.INDEX_SEASON_DIGITS_KEY in kwargs.keys():
|
if ShowDescriptor.INDEX_SEASON_DIGITS_KEY in kwargs.keys():
|
||||||
if type(kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]) is not int:
|
if type(kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]) is not int:
|
||||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDEX_SEASON_DIGITS_KEY} is required to be of type int")
|
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDEX_SEASON_DIGITS_KEY} is required to be of type int")
|
||||||
self.__indexSeasonDigits = kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]
|
self.__indexSeasonDigits = kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]
|
||||||
else:
|
else:
|
||||||
self.__indexSeasonDigits = -1
|
self.__indexSeasonDigits = ShowDescriptor.DEFAULT_INDEX_SEASON_DIGITS
|
||||||
|
|
||||||
if ShowDescriptor.INDEX_EPISODE_DIGITS_KEY in kwargs.keys():
|
if ShowDescriptor.INDEX_EPISODE_DIGITS_KEY in kwargs.keys():
|
||||||
if type(kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]) is not int:
|
if type(kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]) is not int:
|
||||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDEX_EPISODE_DIGITS_KEY} is required to be of type int")
|
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDEX_EPISODE_DIGITS_KEY} is required to be of type int")
|
||||||
self.__indexEpisodeDigits = kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]
|
self.__indexEpisodeDigits = kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]
|
||||||
else:
|
else:
|
||||||
self.__indexEpisodeDigits = -1
|
self.__indexEpisodeDigits = ShowDescriptor.DEFAULT_INDEX_EPISODE_DIGITS
|
||||||
|
|
||||||
if ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY in kwargs.keys():
|
if ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY in kwargs.keys():
|
||||||
if type(kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]) is not int:
|
if type(kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]) is not int:
|
||||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY} is required to be of type int")
|
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY} is required to be of type int")
|
||||||
self.__indicatorSeasonDigits = kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]
|
self.__indicatorSeasonDigits = kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]
|
||||||
else:
|
else:
|
||||||
self.__indicatorSeasonDigits = -1
|
self.__indicatorSeasonDigits = ShowDescriptor.DEFAULT_INDICATOR_SEASON_DIGITS
|
||||||
|
|
||||||
if ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY in kwargs.keys():
|
if ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY in kwargs.keys():
|
||||||
if type(kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]) is not int:
|
if type(kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]) is not int:
|
||||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY} is required to be of type int")
|
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY} is required to be of type int")
|
||||||
self.__indicatorEpisodeDigits = kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]
|
self.__indicatorEpisodeDigits = kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]
|
||||||
else:
|
else:
|
||||||
self.__indicatorEpisodeDigits = -1
|
self.__indicatorEpisodeDigits = ShowDescriptor.DEFAULT_INDICATOR_EPISODE_DIGITS
|
||||||
|
|
||||||
|
|
||||||
def getId(self):
|
def getId(self):
|
||||||
|
|||||||
@@ -208,7 +208,7 @@ class ShowDetailsScreen(Screen):
|
|||||||
self.app.push_screen(PatternDeleteScreen(patternId = selectedPatternId, showId = self.__showDescriptor.getId()), self.handle_remove_pattern)
|
self.app.push_screen(PatternDeleteScreen(patternId = selectedPatternId, showId = self.__showDescriptor.getId()), self.handle_remove_pattern)
|
||||||
|
|
||||||
|
|
||||||
def handle_remove_pattern(self, screenResult):
|
def handle_remove_pattern(self, pattern):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
row_key, col_key = self.patternTable.coordinate_to_cell_key(self.patternTable.cursor_coordinate)
|
row_key, col_key = self.patternTable.coordinate_to_cell_key(self.patternTable.cursor_coordinate)
|
||||||
@@ -354,8 +354,7 @@ class ShowDetailsScreen(Screen):
|
|||||||
|
|
||||||
showDescriptor = self.getShowDescriptorFromInput()
|
showDescriptor = self.getShowDescriptorFromInput()
|
||||||
if not showDescriptor is None:
|
if not showDescriptor is None:
|
||||||
showResult = self.__tc.queryShow(showDescriptor.getId())
|
showName, showYear = self.__tc.getShowNameAndYear(showDescriptor.getId())
|
||||||
firstAirDate = datetime.strptime(showResult['first_air_date'], '%Y-%m-%d')
|
|
||||||
|
|
||||||
self.query_one("#name_input", Input).value = filterFilename(showResult['name'])
|
self.query_one("#name_input", Input).value = filterFilename(showName)
|
||||||
self.query_one("#year_input", Input).value = str(firstAirDate.year)
|
self.query_one("#year_input", Input).value = str(showYear)
|
||||||
|
|||||||
64
bin/ffx/test/_basename_combinator_1.py
Normal file
64
bin/ffx/test/_basename_combinator_1.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .basename_combinator import BasenameCombinator
|
||||||
|
|
||||||
|
from .indicator_combinator import IndicatorCombinator
|
||||||
|
from .label_combinator import LabelCombinator
|
||||||
|
|
||||||
|
class BasenameCombinator2(BasenameCombinator):
|
||||||
|
"""documentation_site"""
|
||||||
|
|
||||||
|
VARIANT = 'B2'
|
||||||
|
|
||||||
|
# def __init__(self, SubCombinators: dict = {}, context = None):
|
||||||
|
def __init__(self, context = None):
|
||||||
|
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return BasenameCombinator2.VARIANT
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def assertFunc(self, mediaDescriptor: MediaDescriptor):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
|
||||||
|
for L in LabelCombinator.getAllClassReferences():
|
||||||
|
# for I in IndicatorCombinator.getAllClassReferences():
|
||||||
|
# for S in SiteCombinator.getAllClassReferences():
|
||||||
|
# for T in TitleCombinator.getAllClassReferences():
|
||||||
|
#
|
||||||
|
|
||||||
|
l = L(self._context)
|
||||||
|
|
||||||
|
yieldObj = {}
|
||||||
|
|
||||||
|
yieldObj['identifier'] = self.getIdentifier()
|
||||||
|
|
||||||
|
yieldObj['variants'] = [self.getVariant(),
|
||||||
|
l.getVariant()]
|
||||||
|
|
||||||
|
yieldObj['payload'] = {'label': l.getPayload()}
|
||||||
|
|
||||||
|
yieldObj['assertSelectors'] = ['B', 'L']
|
||||||
|
|
||||||
|
yieldObj['assertFuncs'] = [self.assertFunc,
|
||||||
|
l.assertFunc]
|
||||||
|
|
||||||
|
yieldObj['shouldFail'] = (self.shouldFail()
|
||||||
|
| l.shouldFail())
|
||||||
|
|
||||||
|
yield yieldObj
|
||||||
35
bin/ffx/test/basename_combinator.py
Normal file
35
bin/ffx/test/basename_combinator.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
class BasenameCombinator():
|
||||||
|
|
||||||
|
IDENTIFIER = 'basename'
|
||||||
|
|
||||||
|
BASENAME = 'media'
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return BasenameCombinator.IDENTIFIER
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def list():
|
||||||
|
basePath = os.path.dirname(__file__)
|
||||||
|
return [os.path.basename(p)[20:-3]
|
||||||
|
for p
|
||||||
|
in glob.glob(f"{ basePath }/basename_combinator_*.py", recursive = True)
|
||||||
|
if p != __file__]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getClassReference(identifier):
|
||||||
|
importlib.import_module(f"ffx.test.basename_combinator_{ identifier }")
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.basename_combinator_{ identifier }"]):
|
||||||
|
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
||||||
|
if inspect.isclass(obj) and name != 'BasenameCombinator' and name.startswith('BasenameCombinator'):
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getAllClassReferences():
|
||||||
|
return [BasenameCombinator.getClassReference(i) for i in BasenameCombinator.list()]
|
||||||
107
bin/ffx/test/basename_combinator_0.py
Normal file
107
bin/ffx/test/basename_combinator_0.py
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .basename_combinator import BasenameCombinator
|
||||||
|
|
||||||
|
from .indicator_combinator import IndicatorCombinator
|
||||||
|
from .label_combinator import LabelCombinator
|
||||||
|
|
||||||
|
|
||||||
|
class BasenameCombinator0(BasenameCombinator):
|
||||||
|
"""base[_indicator]"""
|
||||||
|
|
||||||
|
VARIANT = 'B0'
|
||||||
|
|
||||||
|
# def __init__(self, SubCombinators: dict = {}, context = None):
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return BasenameCombinator0.VARIANT
|
||||||
|
|
||||||
|
def getPayload(self, indicator = '', label = ''):
|
||||||
|
|
||||||
|
basename = BasenameCombinator.BASENAME
|
||||||
|
expectedBasename = label if label else BasenameCombinator.BASENAME
|
||||||
|
|
||||||
|
if indicator:
|
||||||
|
basename += f"_{indicator}"
|
||||||
|
expectedBasename += f"_{indicator}"
|
||||||
|
|
||||||
|
return {'basename': basename,
|
||||||
|
'label': label,
|
||||||
|
'expectedBasename': expectedBasename}
|
||||||
|
|
||||||
|
|
||||||
|
def assertFunc(self, indicator = '', label = ''):
|
||||||
|
|
||||||
|
def f(testObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'filenames' in testObj.keys():
|
||||||
|
raise KeyError("testObj does not contain key 'filenames'")
|
||||||
|
|
||||||
|
fNames = testObj['filenames']
|
||||||
|
|
||||||
|
assert len(fNames) == 1, "More than one result file was created"
|
||||||
|
|
||||||
|
resultFilename = fNames[0]
|
||||||
|
|
||||||
|
fTokens = resultFilename.split('.')
|
||||||
|
|
||||||
|
resultBasename = '.'.join(fTokens[:-1])
|
||||||
|
resultExtension = fTokens[-1]
|
||||||
|
|
||||||
|
if not indicator and not label:
|
||||||
|
|
||||||
|
assert resultBasename == BasenameCombinator.BASENAME, f"Result basename is not {BasenameCombinator.BASENAME}"
|
||||||
|
if not indicator and label:
|
||||||
|
assert resultBasename == label, f"Result basename is not {label}"
|
||||||
|
if indicator and not label:
|
||||||
|
assert resultBasename == f"{BasenameCombinator.BASENAME}_{indicator}", f"Result basename is not {BasenameCombinator.BASENAME}_{indicator}"
|
||||||
|
if indicator and label:
|
||||||
|
assert resultBasename == f"{label}_{indicator}", f"Result basename is not {label}_{indicator}"
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
|
||||||
|
ic = IndicatorCombinator(self._context)
|
||||||
|
|
||||||
|
for L in LabelCombinator.getAllClassReferences():
|
||||||
|
for i in ic.getYield():
|
||||||
|
|
||||||
|
l = L(self._context)
|
||||||
|
|
||||||
|
indicator = i['indicator']
|
||||||
|
indicatorVariant = i['variant']
|
||||||
|
|
||||||
|
yieldObj = {}
|
||||||
|
|
||||||
|
yieldObj['identifier'] = self.getIdentifier()
|
||||||
|
|
||||||
|
yieldObj['variants'] = [self.getVariant(),
|
||||||
|
l.getVariant(),
|
||||||
|
indicatorVariant]
|
||||||
|
|
||||||
|
yieldObj['payload'] = self.getPayload(indicator = indicator,
|
||||||
|
label = l.getPayload())
|
||||||
|
|
||||||
|
yieldObj['assertSelectors'] = ['B', 'L', 'I']
|
||||||
|
|
||||||
|
yieldObj['assertFuncs'] = [self.assertFunc(indicator, l.getPayload()), l.assertFunc, ic.assertFunc]
|
||||||
|
|
||||||
|
yieldObj['shouldFail'] = (self.shouldFail()
|
||||||
|
| l.shouldFail()
|
||||||
|
| ic.shouldFail())
|
||||||
|
|
||||||
|
yield yieldObj
|
||||||
159
bin/ffx/test/basename_combinator_2.py
Normal file
159
bin/ffx/test/basename_combinator_2.py
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .basename_combinator import BasenameCombinator
|
||||||
|
|
||||||
|
from .indicator_combinator import IndicatorCombinator
|
||||||
|
from .label_combinator import LabelCombinator
|
||||||
|
from .title_combinator import TitleCombinator
|
||||||
|
from .release_combinator import ReleaseCombinator
|
||||||
|
from .show_combinator import ShowCombinator
|
||||||
|
|
||||||
|
|
||||||
|
class BasenameCombinator2(BasenameCombinator):
|
||||||
|
"""show[_indicator]_group"""
|
||||||
|
|
||||||
|
VARIANT = 'B2'
|
||||||
|
|
||||||
|
# def __init__(self, SubCombinators: dict = {}, context = None):
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return BasenameCombinator2.VARIANT
|
||||||
|
|
||||||
|
#
|
||||||
|
# SHOW_LIST = [
|
||||||
|
# 'Boruto: Naruto Next Generations (2017)',
|
||||||
|
# 'The Rising of the Shield Hero (2019)',
|
||||||
|
# 'Scrubs - Die Anfänger (2001)'
|
||||||
|
# ]
|
||||||
|
#
|
||||||
|
# RELEASE_LIST = [
|
||||||
|
# ".GerEngSub.AAC.1080pINDICATOR.WebDL.x264-Tanuki",
|
||||||
|
# ".German.AC3.DL.1080pINDICATOR.BluRay.x264-AST4u",
|
||||||
|
# "-720pINDICATOR"
|
||||||
|
# ]
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self,
|
||||||
|
indicator = '',
|
||||||
|
label = '',
|
||||||
|
show = '',
|
||||||
|
release = ''):
|
||||||
|
|
||||||
|
if label:
|
||||||
|
basename = label
|
||||||
|
expectedBasename = label
|
||||||
|
if indicator:
|
||||||
|
basename += f"_{indicator}"
|
||||||
|
expectedBasename += f"_{indicator}"
|
||||||
|
else:
|
||||||
|
basename = show+release
|
||||||
|
expectedBasename = basename
|
||||||
|
|
||||||
|
return {'basename': basename,
|
||||||
|
'label': label,
|
||||||
|
'expectedBasename': expectedBasename}
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self,
|
||||||
|
indicator = '',
|
||||||
|
label = '',
|
||||||
|
show = '',
|
||||||
|
release = ''):
|
||||||
|
|
||||||
|
def f(testObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'filenames' in testObj.keys():
|
||||||
|
raise KeyError("testObj does not contain key 'filenames'")
|
||||||
|
|
||||||
|
fNames = testObj['filenames']
|
||||||
|
|
||||||
|
assert len(fNames) == 1, "More than one result file was created"
|
||||||
|
|
||||||
|
resultFilename = fNames[0]
|
||||||
|
|
||||||
|
fTokens = resultFilename.split('.')
|
||||||
|
|
||||||
|
resultBasename = '.'.join(fTokens[:-1])
|
||||||
|
resultExtension = fTokens[-1]
|
||||||
|
|
||||||
|
if not indicator and not label:
|
||||||
|
assert resultBasename == show+release, f"Result basename is not {show+release}"
|
||||||
|
elif not indicator and label:
|
||||||
|
assert resultBasename == label, f"Result basename is not {label}"
|
||||||
|
elif indicator and not label:
|
||||||
|
assert resultBasename == show+release, f"Result basename is not {show+release}"
|
||||||
|
elif indicator and label:
|
||||||
|
assert resultBasename == f"{label}_{indicator}", f"Result basename is not {label}_{indicator}"
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
|
||||||
|
ic = IndicatorCombinator(self._context)
|
||||||
|
sc = ShowCombinator(self._context)
|
||||||
|
|
||||||
|
for L in LabelCombinator.getAllClassReferences():
|
||||||
|
for iy in ic.getYield():
|
||||||
|
|
||||||
|
indicator = iy['indicator']
|
||||||
|
indicatorVariant = iy['variant']
|
||||||
|
|
||||||
|
rc = ReleaseCombinator(self._context, indicator=indicator)
|
||||||
|
|
||||||
|
for sy in sc.getYield():
|
||||||
|
for ry in rc.getYield():
|
||||||
|
|
||||||
|
l = L(self._context)
|
||||||
|
|
||||||
|
show = sy['show']
|
||||||
|
showVariant = sy['variant']
|
||||||
|
|
||||||
|
release = ry['release']
|
||||||
|
releaseVariant = ry['variant']
|
||||||
|
|
||||||
|
yieldObj = {}
|
||||||
|
|
||||||
|
yieldObj['identifier'] = self.getIdentifier()
|
||||||
|
|
||||||
|
yieldObj['variants'] = [self.getVariant(),
|
||||||
|
l.getVariant(),
|
||||||
|
indicatorVariant,
|
||||||
|
showVariant,
|
||||||
|
releaseVariant]
|
||||||
|
|
||||||
|
yieldObj['payload'] = self.getPayload(indicator = indicator,
|
||||||
|
label = l.getPayload(),
|
||||||
|
show = show,
|
||||||
|
release = release)
|
||||||
|
|
||||||
|
yieldObj['assertSelectors'] = ['B', 'L', 'I', 'S', 'R']
|
||||||
|
|
||||||
|
yieldObj['assertFuncs'] = [self.createAssertFunc(indicator,
|
||||||
|
l.getPayload(),
|
||||||
|
show = show,
|
||||||
|
release = release),
|
||||||
|
l.assertFunc,
|
||||||
|
ic.assertFunc,
|
||||||
|
sc.assertFunc,
|
||||||
|
rc.assertFunc]
|
||||||
|
|
||||||
|
yieldObj['shouldFail'] = (self.shouldFail()
|
||||||
|
| l.shouldFail()
|
||||||
|
| ic.shouldFail()
|
||||||
|
| sc.shouldFail()
|
||||||
|
| rc.shouldFail())
|
||||||
|
|
||||||
|
yield yieldObj
|
||||||
13
bin/ffx/test/combinator.py
Normal file
13
bin/ffx/test/combinator.py
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
class Combinator():
|
||||||
|
|
||||||
|
def __init__(self, SubCombinations: dict):
|
||||||
|
self._SubCombinators = SubCombinations
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def assertFunc(self, testObj):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def getYield(yieldObj: dict):
|
||||||
|
pass
|
||||||
35
bin/ffx/test/disposition_combinator_2.py
Normal file
35
bin/ffx/test/disposition_combinator_2.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
class DispositionCombinator2():
|
||||||
|
|
||||||
|
IDENTIFIER = 'disposition2'
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return DispositionCombinator2.IDENTIFIER
|
||||||
|
def getVariant(self):
|
||||||
|
return self._variant
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def list():
|
||||||
|
basePath = os.path.dirname(__file__)
|
||||||
|
return [os.path.basename(p)[25:-3]
|
||||||
|
for p
|
||||||
|
in glob.glob(f"{ basePath }/disposition_combinator_2_*.py", recursive = True)
|
||||||
|
if p != __file__]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getClassReference(identifier):
|
||||||
|
importlib.import_module(f"ffx.test.disposition_combinator_2_{ identifier }")
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.disposition_combinator_2_{ identifier }"]):
|
||||||
|
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
||||||
|
if inspect.isclass(obj) and name != 'DispositionCombinator2' and name.startswith('DispositionCombinator2'):
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getAllClassReferences():
|
||||||
|
return [DispositionCombinator2.getClassReference(i) for i in DispositionCombinator2.list()]
|
||||||
76
bin/ffx/test/disposition_combinator_2_0.py
Normal file
76
bin/ffx/test/disposition_combinator_2_0.py
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .disposition_combinator_2 import DispositionCombinator2
|
||||||
|
|
||||||
|
|
||||||
|
class DispositionCombinator20(DispositionCombinator2):
|
||||||
|
|
||||||
|
# COMMENT
|
||||||
|
# DESCRIPTIONS
|
||||||
|
|
||||||
|
VARIANT = 'D00'
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return DispositionCombinator20.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
subtrack0 = set()
|
||||||
|
subtrack1 = set()
|
||||||
|
|
||||||
|
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||||
|
# so some checks for preserved dispositions are omitted for now
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0.add(TrackDisposition.FORCED) # COMMENT
|
||||||
|
# subtrack1.add(TrackDisposition.DESCRIPTIONS) # DESCRIPTIONS
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1)
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.FORCED)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved set 'forced' disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
# assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DESCRIPTIONS)
|
||||||
|
# ), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved set 'descriptions' disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
114
bin/ffx/test/disposition_combinator_2_1.py
Normal file
114
bin/ffx/test/disposition_combinator_2_1.py
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .disposition_combinator_2 import DispositionCombinator2
|
||||||
|
|
||||||
|
|
||||||
|
class DispositionCombinator21(DispositionCombinator2):
|
||||||
|
|
||||||
|
VARIANT = 'D10'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return DispositionCombinator21.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0 = set()
|
||||||
|
subtrack1 = set([TrackDisposition.DEFAULT])
|
||||||
|
else:
|
||||||
|
subtrack0 = set([TrackDisposition.DEFAULT])
|
||||||
|
subtrack1 = set()
|
||||||
|
|
||||||
|
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||||
|
# so some checks for preserved dispositions are omitted for now
|
||||||
|
if self.__createPresets:
|
||||||
|
# subtrack0.add(TrackDisposition.COMMENT) # COMMENT
|
||||||
|
subtrack1.add(TrackDisposition.FORCED) # DESCRIPTIONS
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1)
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
if self._context['use_jellyfin']:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert not (trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.FORCED)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved set 'forced' disposition"
|
||||||
|
# source subIndex 0
|
||||||
|
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not set default disposition"
|
||||||
|
# assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.COMMENT)
|
||||||
|
# ), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved set 'comment' disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not set default disposition"
|
||||||
|
# assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.COMMENT)
|
||||||
|
# ), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved set 'comment' disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert not (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.FORCED)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved set 'forced' disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
if self._context['use_jellyfin']:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
# source subIndex 0
|
||||||
|
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not set default disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not set default disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
79
bin/ffx/test/disposition_combinator_2_2.py
Normal file
79
bin/ffx/test/disposition_combinator_2_2.py
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .disposition_combinator_2 import DispositionCombinator2
|
||||||
|
|
||||||
|
|
||||||
|
class DispositionCombinator22(DispositionCombinator2):
|
||||||
|
|
||||||
|
VARIANT = 'D01'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return DispositionCombinator22.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0 = set([TrackDisposition.DEFAULT])
|
||||||
|
subtrack1 = set()
|
||||||
|
else:
|
||||||
|
subtrack0 = set()
|
||||||
|
subtrack1 = set([TrackDisposition.DEFAULT])
|
||||||
|
|
||||||
|
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||||
|
# so some checks for preserved dispositions are omitted for now
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0.add(TrackDisposition.FORCED) # COMMENT
|
||||||
|
# subtrack1.add(TrackDisposition.DESCRIPTIONS) # DESCRIPTIONS
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1)
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.FORCED)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved set 'descriptions' disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not set default disposition"
|
||||||
|
# assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DESCRIPTIONS)
|
||||||
|
# ), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved 'forced' disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not set default disposition"
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
43
bin/ffx/test/disposition_combinator_2_3 .py
Normal file
43
bin/ffx/test/disposition_combinator_2_3 .py
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .disposition_combinator_2 import DispositionCombinator2
|
||||||
|
|
||||||
|
|
||||||
|
class DispositionCombinator23(DispositionCombinator2):
|
||||||
|
|
||||||
|
VARIANT = 'D11'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return DispositionCombinator23.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
subtrack0 = set([TrackDisposition.DEFAULT])
|
||||||
|
subtrack1 = set([TrackDisposition.DEFAULT])
|
||||||
|
|
||||||
|
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||||
|
# so some checks for preserved dispositions are omitted for now
|
||||||
|
if self.__createPresets:
|
||||||
|
# subtrack0.add(TrackDisposition.COMMENT) # COMMENT
|
||||||
|
subtrack1.add(TrackDisposition.FORCED) # DESCRIPTIONS
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1)
|
||||||
|
|
||||||
|
#TODO: tmdb cases
|
||||||
|
def createAssertFunc(self):
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
pass
|
||||||
|
return f
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return True
|
||||||
34
bin/ffx/test/disposition_combinator_3.py
Normal file
34
bin/ffx/test/disposition_combinator_3.py
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
class DispositionCombinator3():
|
||||||
|
|
||||||
|
IDENTIFIER = 'disposition3'
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return DispositionCombinator3.IDENTIFIER
|
||||||
|
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def list():
|
||||||
|
basePath = os.path.dirname(__file__)
|
||||||
|
return [os.path.basename(p)[25:-3]
|
||||||
|
for p
|
||||||
|
in glob.glob(f"{ basePath }/disposition_combinator_3_*.py", recursive = True)
|
||||||
|
if p != __file__]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getClassReference(identifier):
|
||||||
|
importlib.import_module(f"ffx.test.disposition_combinator_3_{ identifier }")
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.disposition_combinator_3_{ identifier }"]):
|
||||||
|
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
||||||
|
if inspect.isclass(obj) and name != 'DispositionCombinator3' and name.startswith('DispositionCombinator3'):
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getAllClassReferences():
|
||||||
|
return [DispositionCombinator3.getClassReference(i) for i in DispositionCombinator3.list()]
|
||||||
92
bin/ffx/test/disposition_combinator_3_0.py
Normal file
92
bin/ffx/test/disposition_combinator_3_0.py
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .disposition_combinator_3 import DispositionCombinator3
|
||||||
|
|
||||||
|
|
||||||
|
class DispositionCombinator30(DispositionCombinator3):
|
||||||
|
|
||||||
|
VARIANT = 'D000'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return DispositionCombinator30.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
subtrack0 = set()
|
||||||
|
subtrack1 = set()
|
||||||
|
subtrack2 = set()
|
||||||
|
|
||||||
|
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||||
|
# so some checks for preserved dispositions are omitted for now
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0.add(TrackDisposition.FORCED) # COMMENT
|
||||||
|
# subtrack1.add(TrackDisposition.DESCRIPTIONS) # DESCRIPTIONS
|
||||||
|
# subtrack2.add(TrackDisposition.HEARING_IMPAIRED) # HEARING_IMPAIRED
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1,
|
||||||
|
subtrack2)
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert not (trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.FORCED)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved default disposition"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert not (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
# assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DESCRIPTIONS)
|
||||||
|
# ), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved default disposition"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert not (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has set default disposition"
|
||||||
|
# assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.HEARING_IMPAIRED)
|
||||||
|
# ), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not preserved default disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert not (trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert not (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert not (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has set default disposition"
|
||||||
|
return f
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
147
bin/ffx/test/disposition_combinator_3_1.py
Normal file
147
bin/ffx/test/disposition_combinator_3_1.py
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .disposition_combinator_3 import DispositionCombinator3
|
||||||
|
|
||||||
|
|
||||||
|
class DispositionCombinator31(DispositionCombinator3):
|
||||||
|
|
||||||
|
VARIANT = 'D100'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return DispositionCombinator31.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0 = set()
|
||||||
|
subtrack1 = set()
|
||||||
|
subtrack2 = set([TrackDisposition.DEFAULT])
|
||||||
|
else:
|
||||||
|
subtrack0 = set([TrackDisposition.DEFAULT])
|
||||||
|
subtrack1 = set()
|
||||||
|
subtrack2 = set()
|
||||||
|
|
||||||
|
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||||
|
# so some checks for preserved dispositions are omitted for now
|
||||||
|
if self.__createPresets:
|
||||||
|
# subtrack0.add(TrackDisposition.COMMENT) # COMMENT
|
||||||
|
subtrack1.add(TrackDisposition.FORCED) # DESCRIPTIONS
|
||||||
|
# subtrack2.add(TrackDisposition.HEARING_IMPAIRED) # HEARING_IMPAIRED
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1,
|
||||||
|
subtrack2)
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
if self._context['use_jellyfin']:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.FORCED)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved descriptions disposition"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
# assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.HEARING_IMPAIRED)
|
||||||
|
# ), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved default disposition"
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not set default disposition"
|
||||||
|
# assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.COMMENT)
|
||||||
|
# ), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not preserved set default disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
def f(assertObj: dict):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not set default disposition"
|
||||||
|
# assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.COMMENT)
|
||||||
|
# ), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved default disposition"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.FORCED)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved default disposition"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has set default disposition"
|
||||||
|
# assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.HEARING_IMPAIRED)
|
||||||
|
# ), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not preserved default disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
if self._context['use_jellyfin']:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not set default disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
def f(assertObj: dict):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not set default disposition"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has set default disposition"
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
132
bin/ffx/test/disposition_combinator_3_2.py
Normal file
132
bin/ffx/test/disposition_combinator_3_2.py
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .disposition_combinator_3 import DispositionCombinator3
|
||||||
|
|
||||||
|
|
||||||
|
class DispositionCombinator32(DispositionCombinator3):
|
||||||
|
|
||||||
|
VARIANT = 'D010'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return DispositionCombinator32.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0 = set([TrackDisposition.DEFAULT])
|
||||||
|
subtrack1 = set()
|
||||||
|
subtrack2 = set()
|
||||||
|
else:
|
||||||
|
subtrack0 = set()
|
||||||
|
subtrack1 = set([TrackDisposition.DEFAULT])
|
||||||
|
subtrack2 = set()
|
||||||
|
|
||||||
|
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||||
|
# so some checks for preserved dispositions are omitted for now
|
||||||
|
if self.__createPresets:
|
||||||
|
# subtrack0.add(TrackDisposition.COMMENT) # COMMENT
|
||||||
|
# subtrack1.add(TrackDisposition.DESCRIPTIONS) # DESCRIPTIONS
|
||||||
|
subtrack2.add(TrackDisposition.FORCED) # HEARING_IMPAIRED
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1,
|
||||||
|
subtrack2)
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
if self._context['use_jellyfin']:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
# assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.COMMENT)
|
||||||
|
# ), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved set default disposition"
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.FORCED)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved default disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not set default disposition"
|
||||||
|
# assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.DESCRIPTIONS)
|
||||||
|
# ), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not preserved descriptions disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
# assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.COMMENT)
|
||||||
|
# ), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved set default disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not set default disposition"
|
||||||
|
# assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DESCRIPTIONS)
|
||||||
|
# ), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved descriptions disposition"
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has set default disposition"
|
||||||
|
assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.FORCED)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not preserved default disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
if self._context['use_jellyfin']:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not set default disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not set default disposition"
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has set default disposition"
|
||||||
|
return f
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
132
bin/ffx/test/disposition_combinator_3_3.py
Normal file
132
bin/ffx/test/disposition_combinator_3_3.py
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .disposition_combinator_3 import DispositionCombinator3
|
||||||
|
|
||||||
|
|
||||||
|
class DispositionCombinator33(DispositionCombinator3):
|
||||||
|
|
||||||
|
VARIANT = 'D001'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return DispositionCombinator33.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0 = set()
|
||||||
|
subtrack1 = set([TrackDisposition.DEFAULT])
|
||||||
|
subtrack2 = set()
|
||||||
|
else:
|
||||||
|
subtrack0 = set()
|
||||||
|
subtrack1 = set()
|
||||||
|
subtrack2 = set([TrackDisposition.DEFAULT])
|
||||||
|
|
||||||
|
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||||
|
# so some checks for preserved dispositions are omitted for now
|
||||||
|
if self.__createPresets:
|
||||||
|
# subtrack0.add(TrackDisposition.COMMENT) # COMMENT
|
||||||
|
subtrack1.add(TrackDisposition.FORCED) # DESCRIPTIONS
|
||||||
|
# subtrack2.add(TrackDisposition.HEARING_IMPAIRED) # HEARING_IMPAIRED
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1,
|
||||||
|
subtrack2)
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
if self._context['use_jellyfin']:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
# assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.COMMENT)
|
||||||
|
# ), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved set default disposition"
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
# assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.HEARING_IMPAIRED)
|
||||||
|
# ), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved default disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not set default disposition"
|
||||||
|
assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.FORCED)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not preserved descriptions disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
# assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.COMMENT)
|
||||||
|
# ), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved set default disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.FORCED)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved descriptions disposition"
|
||||||
|
# source subIndex 2
|
||||||
|
assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not set default disposition"
|
||||||
|
# assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.HEARING_IMPAIRED)
|
||||||
|
# ), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not preserved default disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
if self._context['use_jellyfin']:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not set default disposition"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict):
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
trackDescriptors = assertObj['tracks']
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||||
|
# source subIndex 2
|
||||||
|
assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||||
|
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not set default disposition"
|
||||||
|
return f
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
46
bin/ffx/test/disposition_combinator_3_4.py
Normal file
46
bin/ffx/test/disposition_combinator_3_4.py
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .disposition_combinator_3 import DispositionCombinator3
|
||||||
|
|
||||||
|
|
||||||
|
class DispositionCombinator34(DispositionCombinator3):
|
||||||
|
|
||||||
|
VARIANT = 'D101'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return DispositionCombinator34.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
subtrack0 = set([TrackDisposition.DEFAULT])
|
||||||
|
subtrack1 = set()
|
||||||
|
subtrack2 = set([TrackDisposition.DEFAULT])
|
||||||
|
|
||||||
|
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||||
|
# so some checks for preserved dispositions are omitted for now
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0.add(TrackDisposition.FORCED) # COMMENT
|
||||||
|
# subtrack1.add(TrackDisposition.DESCRIPTIONS) # DESCRIPTIONS
|
||||||
|
# subtrack2.add(TrackDisposition.HEARING_IMPAIRED) # HEARING_IMPAIRED
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1,
|
||||||
|
subtrack2)
|
||||||
|
|
||||||
|
#TODO: tmpdb cases
|
||||||
|
def createAssertFunc(self):
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
pass
|
||||||
|
return f
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return True
|
||||||
276
bin/ffx/test/helper.py
Normal file
276
bin/ffx/test/helper.py
Normal file
@@ -0,0 +1,276 @@
|
|||||||
|
import os, math, tempfile, click
|
||||||
|
|
||||||
|
|
||||||
|
from ffx.ffx_controller import FfxController
|
||||||
|
|
||||||
|
from ffx.process import executeProcess
|
||||||
|
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.helper import dictCache
|
||||||
|
|
||||||
|
|
||||||
|
SHORT_SUBTITLE_SEQUENCE = [{'start': 1, 'end': 2, 'text': 'yolo'},
|
||||||
|
{'start': 3, 'end': 4, 'text': 'zolo'},
|
||||||
|
{'start': 5, 'end': 6, 'text': 'golo'}]
|
||||||
|
|
||||||
|
def getTimeString(hours: float = 0.0,
|
||||||
|
minutes: float = 0.0,
|
||||||
|
seconds: float = 0.0,
|
||||||
|
millis: float = 0.0,
|
||||||
|
format: str = ''):
|
||||||
|
|
||||||
|
duration = (hours * 3600.0
|
||||||
|
+ minutes * 60.0
|
||||||
|
+ seconds
|
||||||
|
+ millis / 1000.0)
|
||||||
|
|
||||||
|
hours = math.floor(duration / 3600.0)
|
||||||
|
remaining = duration - 3600.0 * hours
|
||||||
|
|
||||||
|
minutes = math.floor(remaining / 60.0)
|
||||||
|
remaining = remaining - 60.0 * minutes
|
||||||
|
|
||||||
|
seconds = math.floor(remaining)
|
||||||
|
remaining = remaining - seconds
|
||||||
|
|
||||||
|
millis = math.floor(remaining * 1000)
|
||||||
|
|
||||||
|
if format == 'ass':
|
||||||
|
return f"{hours:01d}:{minutes:02d}:{seconds:02d}.{millis:02d}"
|
||||||
|
|
||||||
|
# srt & vtt
|
||||||
|
return f"{hours:02d}:{minutes:02d}:{seconds:02d}.{millis:03d}"
|
||||||
|
|
||||||
|
|
||||||
|
def createAssFile(entries: dict, directory = None):
|
||||||
|
|
||||||
|
# [Script Info]
|
||||||
|
# ; Script generated by FFmpeg/Lavc61.3.100
|
||||||
|
# ScriptType: v4.00+
|
||||||
|
# PlayResX: 384
|
||||||
|
# PlayResY: 288
|
||||||
|
# ScaledBorderAndShadow: yes
|
||||||
|
# YCbCr Matrix: None
|
||||||
|
#
|
||||||
|
# [V4+ Styles]
|
||||||
|
# Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
|
||||||
|
# Style: Default,Arial,16,&Hffffff,&Hffffff,&H0,&H0,0,0,0,0,100,100,0,0,1,1,0,2,10,10,10,1
|
||||||
|
#
|
||||||
|
# [Events]
|
||||||
|
# Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||||
|
# Dialogue: 0,0:00:01.00,0:00:02.00,Default,,0,0,0,,yolo
|
||||||
|
# Dialogue: 0,0:00:03.00,0:00:04.00,Default,,0,0,0,,zolo
|
||||||
|
# Dialogue: 0,0:00:05.00,0:00:06.00,Default,,0,0,0,,golo
|
||||||
|
tmpFileName = tempfile.mktemp(suffix=".ass", dir = directory)
|
||||||
|
|
||||||
|
with open(tmpFileName, 'w') as tmpFile:
|
||||||
|
|
||||||
|
tmpFile.write("[Script Info]\n")
|
||||||
|
tmpFile.write("; Script generated by Ffx\n")
|
||||||
|
tmpFile.write("ScriptType: v4.00+\n")
|
||||||
|
tmpFile.write("PlayResX: 384\n")
|
||||||
|
tmpFile.write("PlayResY: 288\n")
|
||||||
|
tmpFile.write("ScaledBorderAndShadow: yes\n")
|
||||||
|
tmpFile.write("YCbCr Matrix: None\n")
|
||||||
|
tmpFile.write("\n")
|
||||||
|
tmpFile.write("[V4+ Styles]\n")
|
||||||
|
tmpFile.write("Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding\n")
|
||||||
|
tmpFile.write("Style: Default,Arial,16,&Hffffff,&Hffffff,&H0,&H0,0,0,0,0,100,100,0,0,1,1,0,2,10,10,10,1\n")
|
||||||
|
tmpFile.write("\n")
|
||||||
|
tmpFile.write("[Events]\n")
|
||||||
|
tmpFile.write("Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n")
|
||||||
|
|
||||||
|
for entryIndex in range(len(entries)):
|
||||||
|
tmpFile.write(f"Dialogue: 0,{getTimeString(seconds=entries[entryIndex]['start'], format='ass')},{getTimeString(seconds=entries[entryIndex]['end'], format='ass')},Default,,0,0,0,,{entries[entryIndex]['text']}\n")
|
||||||
|
|
||||||
|
return tmpFileName
|
||||||
|
|
||||||
|
def createSrtFile(entries: dict, directory = None):
|
||||||
|
# 1
|
||||||
|
# 00:00:00,000 --> 00:00:02,500
|
||||||
|
# Welcome to the Example Subtitle File!
|
||||||
|
#
|
||||||
|
# 2
|
||||||
|
# 00:00:03,000 --> 00:00:06,000
|
||||||
|
# This is a demonstration of SRT subtitles.
|
||||||
|
#
|
||||||
|
# 3
|
||||||
|
# 00:00:07,000 --> 00:00:10,500
|
||||||
|
# You can use SRT files to add subtitles to your videos.
|
||||||
|
|
||||||
|
tmpFileName = tempfile.mktemp(suffix=".srt", dir = directory)
|
||||||
|
|
||||||
|
with open(tmpFileName, 'w') as tmpFile:
|
||||||
|
|
||||||
|
for entryIndex in range(len(entries)):
|
||||||
|
|
||||||
|
tmpFile.write(f"{entryIndex}\n")
|
||||||
|
tmpFile.write(f"{getTimeString(seconds=entries[entryIndex]['start'])} --> {getTimeString(seconds=entries[entryIndex]['end'])}\n")
|
||||||
|
tmpFile.write(f"{entries[entryIndex]['text']}\n\n")
|
||||||
|
|
||||||
|
return tmpFileName
|
||||||
|
|
||||||
|
|
||||||
|
def createVttFile(entries: dict, directory = None):
|
||||||
|
# WEBVTT
|
||||||
|
#
|
||||||
|
# 01:20:33.050 --> 01:20:35.050
|
||||||
|
# Yolo
|
||||||
|
|
||||||
|
tmpFileName = tempfile.mktemp(suffix=".vtt", dir = directory)
|
||||||
|
|
||||||
|
with open(tmpFileName, 'w') as tmpFile:
|
||||||
|
|
||||||
|
tmpFile.write("WEBVTT\n")
|
||||||
|
|
||||||
|
for entryIndex in range(len(entries)):
|
||||||
|
|
||||||
|
tmpFile.write("\n")
|
||||||
|
tmpFile.write(f"{getTimeString(seconds=entries[entryIndex]['start'])} --> {getTimeString(seconds=entries[entryIndex]['end'])}\n")
|
||||||
|
tmpFile.write(f"{entries[entryIndex]['text']}\n")
|
||||||
|
|
||||||
|
|
||||||
|
return tmpFileName
|
||||||
|
|
||||||
|
|
||||||
|
def createMediaTestFile(mediaDescriptor: MediaDescriptor,
|
||||||
|
directory: str = '',
|
||||||
|
baseName: str = 'media',
|
||||||
|
format: str = '',
|
||||||
|
extension: str = 'mkv',
|
||||||
|
sizeX: int = 1280,
|
||||||
|
sizeY: int = 720,
|
||||||
|
rate: int = 25,
|
||||||
|
length: int = 10,
|
||||||
|
logger = None):
|
||||||
|
|
||||||
|
# subtitleFilePath = createVttFile(SHORT_SUBTITLE_SEQUENCE)
|
||||||
|
|
||||||
|
# commandTokens = FfxController.COMMAND_TOKENS
|
||||||
|
commandTokens = ['ffmpeg', '-y']
|
||||||
|
|
||||||
|
generatorCache = []
|
||||||
|
generatorTokens = []
|
||||||
|
mappingTokens = []
|
||||||
|
importTokens = []
|
||||||
|
metadataTokens = []
|
||||||
|
|
||||||
|
|
||||||
|
for mediaTagKey, mediaTagValue in mediaDescriptor.getTags().items():
|
||||||
|
metadataTokens += ['-metadata:g', f"{mediaTagKey}={mediaTagValue}"]
|
||||||
|
|
||||||
|
subIndexCounter = {}
|
||||||
|
|
||||||
|
for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
|
||||||
|
|
||||||
|
trackType = trackDescriptor.getType()
|
||||||
|
|
||||||
|
if trackType == TrackType.VIDEO:
|
||||||
|
|
||||||
|
cacheIndex, generatorCache = dictCache({'type': TrackType.VIDEO}, generatorCache)
|
||||||
|
# click.echo(f"createMediaTestFile() cache index={cacheIndex} size={len(generatorCache)}")
|
||||||
|
|
||||||
|
if cacheIndex == -1:
|
||||||
|
generatorTokens += ['-f',
|
||||||
|
'lavfi',
|
||||||
|
'-i',
|
||||||
|
f"color=size={sizeX}x{sizeY}:rate={rate}:color=black"]
|
||||||
|
|
||||||
|
sourceIndex = len(generatorCache) - 1 if cacheIndex == -1 else cacheIndex
|
||||||
|
mappingTokens += ['-map', f"{sourceIndex}:v:0"]
|
||||||
|
|
||||||
|
if not trackType in subIndexCounter.keys():
|
||||||
|
subIndexCounter[trackType] = 0
|
||||||
|
for mediaTagKey, mediaTagValue in trackDescriptor.getTags().items():
|
||||||
|
metadataTokens += [f"-metadata:s:{trackType.indicator()}:{subIndexCounter[trackType]}",
|
||||||
|
f"{mediaTagKey}={mediaTagValue}"]
|
||||||
|
subIndexCounter[trackType] += 1
|
||||||
|
|
||||||
|
if trackType == TrackType.AUDIO:
|
||||||
|
|
||||||
|
audioLayout = 'stereo'
|
||||||
|
|
||||||
|
cacheIndex, generatorCache = dictCache({'type': TrackType.AUDIO, 'layout': audioLayout}, generatorCache)
|
||||||
|
# click.echo(f"createMediaTestFile() cache index={cacheIndex} size={len(generatorCache)}")
|
||||||
|
|
||||||
|
# click.echo(f"generartorCache index={cacheIndex} len={len(generatorCache)}")
|
||||||
|
if cacheIndex == -1:
|
||||||
|
generatorTokens += ['-f',
|
||||||
|
'lavfi',
|
||||||
|
'-i',
|
||||||
|
f"anullsrc=channel_layout={audioLayout}:sample_rate=44100"]
|
||||||
|
|
||||||
|
sourceIndex = len(generatorCache) - 1 if cacheIndex == -1 else cacheIndex
|
||||||
|
mappingTokens += ['-map', f"{sourceIndex}:a:0"]
|
||||||
|
|
||||||
|
if not trackType in subIndexCounter.keys():
|
||||||
|
subIndexCounter[trackType] = 0
|
||||||
|
for mediaTagKey, mediaTagValue in trackDescriptor.getTags().items():
|
||||||
|
metadataTokens += [f"-metadata:s:{trackType.indicator()}:{subIndexCounter[trackType]}",
|
||||||
|
f"{mediaTagKey}={mediaTagValue}"]
|
||||||
|
subIndexCounter[trackType] += 1
|
||||||
|
|
||||||
|
if trackType == TrackType.SUBTITLE:
|
||||||
|
|
||||||
|
cacheIndex, generatorCache = dictCache({'type': TrackType.SUBTITLE}, generatorCache)
|
||||||
|
# click.echo(f"createMediaTestFile() cache index={cacheIndex} size={len(generatorCache)}")
|
||||||
|
|
||||||
|
if cacheIndex == -1:
|
||||||
|
importTokens = ['-i', createVttFile(SHORT_SUBTITLE_SEQUENCE, directory=directory if directory else None)]
|
||||||
|
|
||||||
|
sourceIndex = len(generatorCache) - 1 if cacheIndex == -1 else cacheIndex
|
||||||
|
mappingTokens += ['-map', f"{sourceIndex}:s:0"]
|
||||||
|
|
||||||
|
if not trackType in subIndexCounter.keys():
|
||||||
|
subIndexCounter[trackType] = 0
|
||||||
|
for mediaTagKey, mediaTagValue in trackDescriptor.getTags().items():
|
||||||
|
metadataTokens += [f"-metadata:s:{trackType.indicator()}:{subIndexCounter[trackType]}",
|
||||||
|
f"{mediaTagKey}={mediaTagValue}"]
|
||||||
|
subIndexCounter[trackType] += 1
|
||||||
|
|
||||||
|
ffxContext = {'logger': logger}
|
||||||
|
fc = FfxController(ffxContext, mediaDescriptor)
|
||||||
|
|
||||||
|
commandTokens += (generatorTokens
|
||||||
|
+ importTokens
|
||||||
|
+ mappingTokens
|
||||||
|
+ metadataTokens
|
||||||
|
+ fc.generateDispositionTokens())
|
||||||
|
|
||||||
|
|
||||||
|
commandTokens += ['-t', str(length)]
|
||||||
|
|
||||||
|
if format:
|
||||||
|
commandTokens += ['-f', format]
|
||||||
|
|
||||||
|
fileName = f"{baseName}.{extension}"
|
||||||
|
|
||||||
|
if directory:
|
||||||
|
outputPath = os.path.join(directory, fileName)
|
||||||
|
else:
|
||||||
|
outputPath = fileName
|
||||||
|
|
||||||
|
commandTokens += [outputPath]
|
||||||
|
|
||||||
|
|
||||||
|
ctx = {'logger': logger}
|
||||||
|
|
||||||
|
out, err, rc = executeProcess(commandTokens, context = ctx)
|
||||||
|
|
||||||
|
if not logger is None:
|
||||||
|
if out:
|
||||||
|
logger.debug(f"createMediaTestFile(): Process output: {out}")
|
||||||
|
if rc:
|
||||||
|
logger.debug(f"createMediaTestFile(): Process returned ERROR {rc} ({err})")
|
||||||
|
|
||||||
|
|
||||||
|
return outputPath
|
||||||
|
|
||||||
|
|
||||||
|
def createEmptyDirectory():
|
||||||
|
return tempfile.mkdtemp()
|
||||||
|
|
||||||
|
def createEmptyFile(suffix=None):
|
||||||
|
return tempfile.mkstemp(suffix=suffix)
|
||||||
43
bin/ffx/test/indicator_combinator.py
Normal file
43
bin/ffx/test/indicator_combinator.py
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
class IndicatorCombinator():
|
||||||
|
|
||||||
|
IDENTIFIER = 'indicator'
|
||||||
|
|
||||||
|
MAX_SEASON = 2
|
||||||
|
MAX_EPISODE = 3
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return IndicatorCombinator.IDENTIFIER
|
||||||
|
|
||||||
|
def getPayload(self, season: int = -1, episode: int = -1):
|
||||||
|
if season == -1 and episode == -1:
|
||||||
|
return {
|
||||||
|
'variant': 'S00E00',
|
||||||
|
'indicator': '',
|
||||||
|
'season': season,
|
||||||
|
'episode': episode
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
'variant': f"S{season:02d}E{episode:02d}",
|
||||||
|
'indicator': f"S{season:02d}E{episode:02d}",
|
||||||
|
'season': season,
|
||||||
|
'episode': episode
|
||||||
|
}
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
|
||||||
|
yield self.getPayload()
|
||||||
|
for season in range(IndicatorCombinator.MAX_SEASON):
|
||||||
|
for episode in range(IndicatorCombinator.MAX_EPISODE):
|
||||||
|
yield self.getPayload(season+1, episode+1)
|
||||||
36
bin/ffx/test/label_combinator.py
Normal file
36
bin/ffx/test/label_combinator.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
class LabelCombinator():
|
||||||
|
|
||||||
|
IDENTIFIER = 'label'
|
||||||
|
PREFIX = 'label_combinator_'
|
||||||
|
|
||||||
|
LABEL = 'ffx'
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return LabelCombinator.IDENTIFIER
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def list():
|
||||||
|
basePath = os.path.dirname(__file__)
|
||||||
|
return [os.path.basename(p)[len(LabelCombinator.PREFIX):-3]
|
||||||
|
for p
|
||||||
|
in glob.glob(f"{ basePath }/{LabelCombinator.PREFIX}*.py", recursive = True)
|
||||||
|
if p != __file__]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getClassReference(identifier):
|
||||||
|
importlib.import_module(f"ffx.test.{LabelCombinator.PREFIX}{ identifier }")
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.{LabelCombinator.PREFIX}{ identifier }"]):
|
||||||
|
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
||||||
|
if inspect.isclass(obj) and name != 'LabelCombinator' and name.startswith('LabelCombinator'):
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getAllClassReferences():
|
||||||
|
return [LabelCombinator.getClassReference(i) for i in LabelCombinator.list()]
|
||||||
30
bin/ffx/test/label_combinator_0.py
Normal file
30
bin/ffx/test/label_combinator_0.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .label_combinator import LabelCombinator
|
||||||
|
|
||||||
|
|
||||||
|
class LabelCombinator0(LabelCombinator):
|
||||||
|
|
||||||
|
VARIANT = 'L0'
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return LabelCombinator0.VARIANT
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
30
bin/ffx/test/label_combinator_1.py
Normal file
30
bin/ffx/test/label_combinator_1.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .label_combinator import LabelCombinator
|
||||||
|
|
||||||
|
class LabelCombinator1(LabelCombinator):
|
||||||
|
|
||||||
|
VARIANT = 'L1'
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return LabelCombinator1.VARIANT
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
return LabelCombinator.LABEL
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
33
bin/ffx/test/media_combinator.py
Normal file
33
bin/ffx/test/media_combinator.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
class MediaCombinator():
|
||||||
|
|
||||||
|
IDENTIFIER = 'media'
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return MediaCombinator.IDENTIFIER
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def list():
|
||||||
|
basePath = os.path.dirname(__file__)
|
||||||
|
return [os.path.basename(p)[17:-3]
|
||||||
|
for p
|
||||||
|
in glob.glob(f"{ basePath }/media_combinator_*.py", recursive = True)
|
||||||
|
if p != __file__]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getClassReference(identifier):
|
||||||
|
importlib.import_module(f"ffx.test.media_combinator_{ identifier }")
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.media_combinator_{ identifier }"]):
|
||||||
|
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
||||||
|
if inspect.isclass(obj) and name != 'MediaCombinator' and name.startswith('MediaCombinator'):
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getAllClassReferences():
|
||||||
|
return [MediaCombinator.getClassReference(i) for i in MediaCombinator.list()]
|
||||||
103
bin/ffx/test/media_combinator_0.py
Normal file
103
bin/ffx/test/media_combinator_0.py
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .media_combinator import MediaCombinator
|
||||||
|
from .media_tag_combinator import MediaTagCombinator
|
||||||
|
|
||||||
|
|
||||||
|
class MediaCombinator0(MediaCombinator):
|
||||||
|
|
||||||
|
VARIANT = 'VA'
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return MediaCombinator0.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor1,
|
||||||
|
trackDescriptor2]
|
||||||
|
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||||
|
# mediaDescriptor.reindexSubIndices()
|
||||||
|
|
||||||
|
return mediaDescriptor
|
||||||
|
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
|
||||||
|
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||||
|
|
||||||
|
mtc = MTC(self._context)
|
||||||
|
|
||||||
|
yObj = {}
|
||||||
|
|
||||||
|
yObj['identifier'] = self.getIdentifier()
|
||||||
|
yObj['variants'] = [self.getVariant(),
|
||||||
|
mtc.getVariant()]
|
||||||
|
yObj['payload'] = self.getPayload()
|
||||||
|
|
||||||
|
yObj['assertSelectors'] = ['M', 'MT']
|
||||||
|
yObj['assertFuncs'] = [self.assertFunc,
|
||||||
|
mtc.createAssertFunc()]
|
||||||
|
yObj['shouldFail'] = (self.shouldFail()
|
||||||
|
| mtc.shouldFail())
|
||||||
|
|
||||||
|
yieldObj = {'target': yObj}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
mtc_p = MTC(self._context, createPresets = True)
|
||||||
|
|
||||||
|
yObj_p = {}
|
||||||
|
|
||||||
|
yObj_p['identifier'] = self.getIdentifier()
|
||||||
|
yObj_p['variants'] = [self.getVariant(),
|
||||||
|
mtc_p.getVariant()]
|
||||||
|
|
||||||
|
yObj_p['payload'] = self.getPayload()
|
||||||
|
|
||||||
|
yObj_p['assertSelectors'] = ['M', 'MT']
|
||||||
|
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||||
|
mtc_p.createAssertFunc()]
|
||||||
|
yObj_p['shouldFail'] = (self.shouldFail()
|
||||||
|
| mtc_p.shouldFail())
|
||||||
|
|
||||||
|
yieldObj['preset'] = yObj_p
|
||||||
|
|
||||||
|
yield yieldObj
|
||||||
114
bin/ffx/test/media_combinator_1.py
Normal file
114
bin/ffx/test/media_combinator_1.py
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .media_combinator import MediaCombinator
|
||||||
|
from .media_tag_combinator import MediaTagCombinator
|
||||||
|
|
||||||
|
|
||||||
|
class MediaCombinator1(MediaCombinator):
|
||||||
|
|
||||||
|
VARIANT = 'VAS'
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return MediaCombinator1.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||||
|
trackDescriptor1,
|
||||||
|
trackDescriptor2]
|
||||||
|
|
||||||
|
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||||
|
# mediaDescriptor.reindexSubIndices()
|
||||||
|
|
||||||
|
return mediaDescriptor
|
||||||
|
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
|
||||||
|
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||||
|
|
||||||
|
mtc = MTC(self._context)
|
||||||
|
|
||||||
|
yObj = {}
|
||||||
|
|
||||||
|
yObj['identifier'] = self.getIdentifier()
|
||||||
|
yObj['variants'] = [self.getVariant(),
|
||||||
|
mtc.getVariant()]
|
||||||
|
|
||||||
|
yObj['payload'] = self.getPayload()
|
||||||
|
|
||||||
|
yObj['assertSelectors'] = ['M', 'MT']
|
||||||
|
yObj['assertFuncs'] = [self.assertFunc,
|
||||||
|
mtc.createAssertFunc()]
|
||||||
|
|
||||||
|
yObj['shouldFail'] = (self.shouldFail()
|
||||||
|
| mtc.shouldFail())
|
||||||
|
|
||||||
|
yieldObj = {'target': yObj}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
mtc_p = MTC(self._context, createPresets = True)
|
||||||
|
|
||||||
|
yObj_p = {}
|
||||||
|
|
||||||
|
yObj_p['identifier'] = self.getIdentifier()
|
||||||
|
yObj_p['variants'] = [self.getVariant(),
|
||||||
|
mtc_p.getVariant()]
|
||||||
|
|
||||||
|
yObj_p['payload'] = self.getPayload()
|
||||||
|
|
||||||
|
yObj_p['assertSelectors'] = ['M', 'MT']
|
||||||
|
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||||
|
mtc_p.createAssertFunc()]
|
||||||
|
|
||||||
|
yObj_p['shouldFail'] = (self.shouldFail()
|
||||||
|
| mtc_p.shouldFail())
|
||||||
|
yieldObj['preset'] = yObj_p
|
||||||
|
|
||||||
|
yield yieldObj
|
||||||
166
bin/ffx/test/media_combinator_2.py
Normal file
166
bin/ffx/test/media_combinator_2.py
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools, click
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .media_combinator import MediaCombinator
|
||||||
|
|
||||||
|
from .disposition_combinator_2 import DispositionCombinator2
|
||||||
|
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||||
|
from .permutation_combinator_2 import PermutationCombinator2
|
||||||
|
from .media_tag_combinator import MediaTagCombinator
|
||||||
|
|
||||||
|
|
||||||
|
class MediaCombinator2(MediaCombinator):
|
||||||
|
|
||||||
|
VARIANT = 'VASS'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return MediaCombinator2.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self,
|
||||||
|
subtitleDispositionTuple = (set(), set()),
|
||||||
|
subtitleTagTuple = ({}, {})):
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[0]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[0]
|
||||||
|
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 3
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 3
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[1]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[1]
|
||||||
|
trackDescriptor3 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||||
|
trackDescriptor1,
|
||||||
|
trackDescriptor2,
|
||||||
|
trackDescriptor3]
|
||||||
|
|
||||||
|
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||||
|
# mediaDescriptor.reindexSubIndices()
|
||||||
|
|
||||||
|
return mediaDescriptor
|
||||||
|
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
|
||||||
|
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||||
|
for DC2 in DispositionCombinator2.getAllClassReferences():
|
||||||
|
for TC2 in TrackTagCombinator2.getAllClassReferences():
|
||||||
|
for J in JellyfinCombinator.getAllClassReferences():
|
||||||
|
|
||||||
|
j = J(self._context)
|
||||||
|
self._context['use_jellyfin'] = j.getPayload()
|
||||||
|
|
||||||
|
dc2 = DC2(self._context)
|
||||||
|
tc2 = TC2(self._context)
|
||||||
|
|
||||||
|
mtc = MTC(self._context)
|
||||||
|
|
||||||
|
yObj = {}
|
||||||
|
|
||||||
|
yObj['identifier'] = self.getIdentifier()
|
||||||
|
yObj['variants'] = [self.getVariant(),
|
||||||
|
f"S:{dc2.getVariant()}",
|
||||||
|
f"S:{tc2.getVariant()}",
|
||||||
|
mtc.getVariant(),
|
||||||
|
j.getVariant()]
|
||||||
|
|
||||||
|
yObj['payload'] = self.getPayload(dc2.getPayload(),
|
||||||
|
tc2.getPayload())
|
||||||
|
|
||||||
|
yObj['assertSelectors'] = ['M', 'SD', 'ST', 'MT', 'J']
|
||||||
|
yObj['assertFuncs'] = [self.assertFunc,
|
||||||
|
dc2.createAssertFunc(),
|
||||||
|
tc2.createAssertFunc(),
|
||||||
|
mtc.createAssertFunc(),
|
||||||
|
j.assertFunc]
|
||||||
|
|
||||||
|
yObj['shouldFail'] = (self.shouldFail()
|
||||||
|
| dc2.shouldFail()
|
||||||
|
| tc2.shouldFail()
|
||||||
|
| mtc.shouldFail()
|
||||||
|
| j.shouldFail())
|
||||||
|
|
||||||
|
yieldObj = {'target': yObj}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
dc2_p = DC2(self._context, createPresets = True)
|
||||||
|
tc2_p = TC2(self._context, createPresets = True)
|
||||||
|
|
||||||
|
mtc_p = MTC(self._context, createPresets = True)
|
||||||
|
|
||||||
|
yObj_p = {}
|
||||||
|
|
||||||
|
yObj_p['identifier'] = self.getIdentifier()
|
||||||
|
yObj_p['variants'] = [self.getVariant(),
|
||||||
|
f"S:{dc2_p.getVariant()}",
|
||||||
|
f"S:{tc2_p.getVariant()}",
|
||||||
|
mtc_p.getVariant(),
|
||||||
|
j.getVariant()]
|
||||||
|
|
||||||
|
yObj_p['payload'] = self.getPayload(dc2_p.getPayload(),
|
||||||
|
tc2_p.getPayload())
|
||||||
|
|
||||||
|
yObj_p['assertSelectors'] = ['M', 'SD', 'ST', 'MT', 'J']
|
||||||
|
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||||
|
dc2_p.createAssertFunc(),
|
||||||
|
tc2_p.createAssertFunc(),
|
||||||
|
mtc_p.createAssertFunc(),
|
||||||
|
j.assertFunc]
|
||||||
|
|
||||||
|
yObj_p['shouldFail'] = (self.shouldFail()
|
||||||
|
| dc2_p.shouldFail()
|
||||||
|
| tc2_p.shouldFail()
|
||||||
|
| mtc_p.shouldFail()
|
||||||
|
| j.shouldFail())
|
||||||
|
|
||||||
|
yieldObj['preset'] = yObj_p
|
||||||
|
|
||||||
|
yield yieldObj
|
||||||
175
bin/ffx/test/media_combinator_3.py
Normal file
175
bin/ffx/test/media_combinator_3.py
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .media_combinator import MediaCombinator
|
||||||
|
|
||||||
|
from .disposition_combinator_3 import DispositionCombinator3
|
||||||
|
from .track_tag_combinator_3 import TrackTagCombinator3
|
||||||
|
from .permutation_combinator_3 import PermutationCombinator3
|
||||||
|
from .media_tag_combinator import MediaTagCombinator
|
||||||
|
|
||||||
|
|
||||||
|
class MediaCombinator3(MediaCombinator):
|
||||||
|
|
||||||
|
VARIANT = 'VASSS'
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return MediaCombinator3.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self,
|
||||||
|
subtitleDispositionTuple = (set(), set(), set()),
|
||||||
|
subtitleTagTuple = ({}, {}, {})):
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[0]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[0]
|
||||||
|
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 3
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 3
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[1]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[1]
|
||||||
|
trackDescriptor3 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 4
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 4
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[2]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[2]
|
||||||
|
trackDescriptor4 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||||
|
trackDescriptor1,
|
||||||
|
trackDescriptor2,
|
||||||
|
trackDescriptor3,
|
||||||
|
trackDescriptor4]
|
||||||
|
|
||||||
|
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||||
|
# mediaDescriptor.reindexSubIndices()
|
||||||
|
|
||||||
|
return mediaDescriptor
|
||||||
|
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
|
||||||
|
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||||
|
for DC3 in DispositionCombinator3.getAllClassReferences():
|
||||||
|
for TC3 in TrackTagCombinator3.getAllClassReferences():
|
||||||
|
for J in JellyfinCombinator.getAllClassReferences():
|
||||||
|
|
||||||
|
j = J(self._context)
|
||||||
|
self._context['use_jellyfin'] = j.getPayload()
|
||||||
|
|
||||||
|
dc3 = DC3(self._context)
|
||||||
|
tc3 = TC3(self._context)
|
||||||
|
|
||||||
|
mtc = MTC(self._context)
|
||||||
|
|
||||||
|
yObj = {}
|
||||||
|
|
||||||
|
yObj['identifier'] = self.getIdentifier()
|
||||||
|
yObj['variants'] = [self.getVariant(),
|
||||||
|
f"S:{dc3.getVariant()}",
|
||||||
|
f"S:{tc3.getVariant()}",
|
||||||
|
mtc.getVariant(),
|
||||||
|
j.getVariant()]
|
||||||
|
|
||||||
|
yObj['payload'] = self.getPayload(dc3.getPayload(),
|
||||||
|
tc3.getPayload())
|
||||||
|
|
||||||
|
yObj['assertSelectors'] = ['M', 'SD', 'ST', 'MT', 'J']
|
||||||
|
yObj['assertFuncs'] = [self.assertFunc,
|
||||||
|
dc3.createAssertFunc(),
|
||||||
|
tc3.createAssertFunc(),
|
||||||
|
mtc.createAssertFunc(),
|
||||||
|
j.assertFunc]
|
||||||
|
|
||||||
|
yObj['shouldFail'] = (self.shouldFail()
|
||||||
|
| dc3.shouldFail()
|
||||||
|
| tc3.shouldFail()
|
||||||
|
| mtc.shouldFail()
|
||||||
|
| j.shouldFail())
|
||||||
|
yieldObj = {'target': yObj}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
dc3_p = DC3(self._context, createPresets = True)
|
||||||
|
tc3_p = TC3(self._context, createPresets = True)
|
||||||
|
|
||||||
|
mtc_p = MTC(self._context, createPresets = True)
|
||||||
|
|
||||||
|
yObj_p = {}
|
||||||
|
|
||||||
|
yObj_p['identifier'] = self.getIdentifier()
|
||||||
|
yObj_p['variants'] = [self.getVariant(),
|
||||||
|
f"S:{dc3_p.getVariant()}",
|
||||||
|
f"S:{tc3_p.getVariant()}",
|
||||||
|
mtc_p.getVariant(),
|
||||||
|
j.getVariant()]
|
||||||
|
|
||||||
|
yObj_p['payload'] = self.getPayload(dc3_p.getPayload(),
|
||||||
|
tc3_p.getPayload())
|
||||||
|
|
||||||
|
yObj_p['assertSelectors'] = ['M', 'SD', 'ST', 'MT', 'J']
|
||||||
|
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||||
|
dc3_p.createAssertFunc(),
|
||||||
|
tc3_p.createAssertFunc(),
|
||||||
|
mtc_p.createAssertFunc(),
|
||||||
|
j.assertFunc]
|
||||||
|
|
||||||
|
yObj_p['shouldFail'] = (self.shouldFail()
|
||||||
|
| dc3_p.shouldFail()
|
||||||
|
| tc3_p.shouldFail()
|
||||||
|
| mtc_p.shouldFail()
|
||||||
|
| j.shouldFail())
|
||||||
|
yieldObj['preset'] = yObj_p
|
||||||
|
|
||||||
|
yield yieldObj
|
||||||
155
bin/ffx/test/media_combinator_4.py
Normal file
155
bin/ffx/test/media_combinator_4.py
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .media_combinator import MediaCombinator
|
||||||
|
|
||||||
|
from .disposition_combinator_2 import DispositionCombinator2
|
||||||
|
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||||
|
from .permutation_combinator_2 import PermutationCombinator2
|
||||||
|
from .media_tag_combinator import MediaTagCombinator
|
||||||
|
|
||||||
|
|
||||||
|
class MediaCombinator4(MediaCombinator):
|
||||||
|
|
||||||
|
VARIANT = 'VAA'
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return MediaCombinator4.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self,
|
||||||
|
audioDispositionTuple = (set(), set()),
|
||||||
|
audioTagTuple = ({}, {})):
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[0]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[0]
|
||||||
|
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[1]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[1]
|
||||||
|
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||||
|
trackDescriptor1,
|
||||||
|
trackDescriptor2]
|
||||||
|
|
||||||
|
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||||
|
# mediaDescriptor.reindexSubIndices()
|
||||||
|
|
||||||
|
return mediaDescriptor
|
||||||
|
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
|
||||||
|
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||||
|
for DC2 in DispositionCombinator2.getAllClassReferences():
|
||||||
|
for TC2 in TrackTagCombinator2.getAllClassReferences():
|
||||||
|
for J in JellyfinCombinator.getAllClassReferences():
|
||||||
|
|
||||||
|
j = J(self._context)
|
||||||
|
self._context['use_jellyfin'] = j.getPayload()
|
||||||
|
|
||||||
|
dc2 = DC2(self._context)
|
||||||
|
tc2 = TC2(self._context)
|
||||||
|
|
||||||
|
mtc = MTC(self._context)
|
||||||
|
|
||||||
|
yObj = {}
|
||||||
|
|
||||||
|
yObj['identifier'] = self.getIdentifier()
|
||||||
|
yObj['variants'] = [self.getVariant(),
|
||||||
|
f"A:{dc2.getVariant()}",
|
||||||
|
f"A:{tc2.getVariant()}",
|
||||||
|
mtc.getVariant(),
|
||||||
|
j.getVariant()]
|
||||||
|
|
||||||
|
yObj['payload'] = self.getPayload(dc2.getPayload(),
|
||||||
|
tc2.getPayload())
|
||||||
|
|
||||||
|
yObj['assertSelectors'] = ['M', 'AD', 'AT', 'MT', 'J']
|
||||||
|
yObj['assertFuncs'] = [self.assertFunc,
|
||||||
|
dc2.createAssertFunc(),
|
||||||
|
tc2.createAssertFunc(),
|
||||||
|
mtc.createAssertFunc(),
|
||||||
|
j.assertFunc]
|
||||||
|
|
||||||
|
yObj['shouldFail'] = (self.shouldFail()
|
||||||
|
| dc2.shouldFail()
|
||||||
|
| tc2.shouldFail()
|
||||||
|
| mtc.shouldFail()
|
||||||
|
| j.shouldFail())
|
||||||
|
yieldObj = {'target': yObj}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
dc2_p = DC2(self._context, createPresets = True)
|
||||||
|
tc2_p = TC2(self._context, createPresets = True)
|
||||||
|
|
||||||
|
mtc_p = MTC(self._context, createPresets = True)
|
||||||
|
|
||||||
|
yObj_p = {}
|
||||||
|
|
||||||
|
yObj_p['identifier'] = self.getIdentifier()
|
||||||
|
yObj_p['variants'] = [self.getVariant(),
|
||||||
|
f"A:{dc2_p.getVariant()}",
|
||||||
|
f"A:{tc2_p.getVariant()}",
|
||||||
|
mtc_p.getVariant(),
|
||||||
|
j.getVariant()]
|
||||||
|
|
||||||
|
yObj_p['payload'] = self.getPayload(dc2_p.getPayload(),
|
||||||
|
tc2_p.getPayload())
|
||||||
|
|
||||||
|
yObj_p['assertSelectors'] = ['M', 'AD', 'AT', 'MT', 'J']
|
||||||
|
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||||
|
dc2_p.createAssertFunc(),
|
||||||
|
tc2_p.createAssertFunc(),
|
||||||
|
mtc_p.createAssertFunc(),
|
||||||
|
j.assertFunc]
|
||||||
|
|
||||||
|
yObj_p['shouldFail'] = (self.shouldFail()
|
||||||
|
| dc2_p.shouldFail()
|
||||||
|
| tc2_p.shouldFail()
|
||||||
|
| mtc_p.shouldFail()
|
||||||
|
| j.shouldFail())
|
||||||
|
yieldObj['preset'] = yObj_p
|
||||||
|
|
||||||
|
yield yieldObj
|
||||||
165
bin/ffx/test/media_combinator_5.py
Normal file
165
bin/ffx/test/media_combinator_5.py
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .media_combinator import MediaCombinator
|
||||||
|
|
||||||
|
from .disposition_combinator_2 import DispositionCombinator2
|
||||||
|
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||||
|
from .permutation_combinator_2 import PermutationCombinator2
|
||||||
|
from .media_tag_combinator import MediaTagCombinator
|
||||||
|
|
||||||
|
|
||||||
|
class MediaCombinator5(MediaCombinator):
|
||||||
|
|
||||||
|
VARIANT = 'VAAS'
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return MediaCombinator5.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self,
|
||||||
|
audioDispositionTuple = (set(), set()),
|
||||||
|
audioTagTuple = ({}, {})):
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[0]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[0]
|
||||||
|
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[1]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[1]
|
||||||
|
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 3
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 3
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor3 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||||
|
trackDescriptor1,
|
||||||
|
trackDescriptor2,
|
||||||
|
trackDescriptor3]
|
||||||
|
|
||||||
|
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||||
|
# mediaDescriptor.reindexSubIndices()
|
||||||
|
|
||||||
|
return mediaDescriptor
|
||||||
|
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
|
||||||
|
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||||
|
for DC2 in DispositionCombinator2.getAllClassReferences():
|
||||||
|
for TC2 in TrackTagCombinator2.getAllClassReferences():
|
||||||
|
for J in JellyfinCombinator.getAllClassReferences():
|
||||||
|
|
||||||
|
j = J(self._context)
|
||||||
|
self._context['use_jellyfin'] = j.getPayload()
|
||||||
|
|
||||||
|
dc2 = DC2(self._context)
|
||||||
|
tc2 = TC2(self._context)
|
||||||
|
|
||||||
|
mtc = MTC(self._context)
|
||||||
|
|
||||||
|
yObj = {}
|
||||||
|
|
||||||
|
yObj['identifier'] = self.getIdentifier()
|
||||||
|
yObj['variants'] = [self.getVariant(),
|
||||||
|
f"A:{dc2.getVariant()}",
|
||||||
|
f"A:{tc2.getVariant()}",
|
||||||
|
mtc.getVariant(),
|
||||||
|
j.getVariant()]
|
||||||
|
|
||||||
|
yObj['payload'] = self.getPayload(dc2.getPayload(),
|
||||||
|
tc2.getPayload())
|
||||||
|
|
||||||
|
yObj['assertSelectors'] = ['M', 'AD', 'AT', 'MT', 'J']
|
||||||
|
yObj['assertFuncs'] = [self.assertFunc,
|
||||||
|
dc2.createAssertFunc(),
|
||||||
|
tc2.createAssertFunc(),
|
||||||
|
mtc.createAssertFunc(),
|
||||||
|
j.assertFunc]
|
||||||
|
|
||||||
|
yObj['shouldFail'] = (self.shouldFail()
|
||||||
|
| dc2.shouldFail()
|
||||||
|
| tc2.shouldFail()
|
||||||
|
| mtc.shouldFail()
|
||||||
|
| j.shouldFail())
|
||||||
|
yieldObj = {'target': yObj}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
dc2_p = DC2(self._context, createPresets = True)
|
||||||
|
tc2_p = TC2(self._context, createPresets = True)
|
||||||
|
|
||||||
|
mtc_p = MTC(self._context, createPresets = True)
|
||||||
|
|
||||||
|
yObj_p = {}
|
||||||
|
|
||||||
|
yObj_p['identifier'] = self.getIdentifier()
|
||||||
|
yObj_p['variants'] = [self.getVariant(),
|
||||||
|
f"A:{dc2_p.getVariant()}",
|
||||||
|
f"A:{tc2_p.getVariant()}",
|
||||||
|
mtc_p.getVariant(),
|
||||||
|
j.getVariant()]
|
||||||
|
|
||||||
|
yObj_p['payload'] = self.getPayload(dc2_p.getPayload(),
|
||||||
|
tc2_p.getPayload())
|
||||||
|
|
||||||
|
yObj_p['assertSelectors'] = ['M', 'AD', 'AT', 'MT', 'J']
|
||||||
|
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||||
|
dc2_p.createAssertFunc(),
|
||||||
|
tc2_p.createAssertFunc(),
|
||||||
|
mtc_p.createAssertFunc(),
|
||||||
|
j.assertFunc]
|
||||||
|
|
||||||
|
yObj_p['shouldFail'] = (self.shouldFail()
|
||||||
|
| dc2_p.shouldFail()
|
||||||
|
| tc2_p.shouldFail()
|
||||||
|
| mtc_p.shouldFail()
|
||||||
|
| j.shouldFail())
|
||||||
|
yieldObj['preset'] = yObj_p
|
||||||
|
|
||||||
|
yield yieldObj
|
||||||
201
bin/ffx/test/media_combinator_6.py
Normal file
201
bin/ffx/test/media_combinator_6.py
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .media_combinator import MediaCombinator
|
||||||
|
|
||||||
|
from .disposition_combinator_2 import DispositionCombinator2
|
||||||
|
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||||
|
from .permutation_combinator_2 import PermutationCombinator2
|
||||||
|
from .media_tag_combinator import MediaTagCombinator
|
||||||
|
|
||||||
|
|
||||||
|
class MediaCombinator6(MediaCombinator):
|
||||||
|
|
||||||
|
VARIANT = 'VAASS'
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return MediaCombinator6.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self,
|
||||||
|
audioDispositionTuple = (set(), set()),
|
||||||
|
audioTagTuple = ({}, {}),
|
||||||
|
subtitleDispositionTuple = (set(), set()),
|
||||||
|
subtitleTagTuple = ({}, {})):
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[0]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[0]
|
||||||
|
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[1]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[1]
|
||||||
|
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 3
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 3
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[0]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[0]
|
||||||
|
trackDescriptor3 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 4
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 4
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[1]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[1]
|
||||||
|
trackDescriptor4 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||||
|
trackDescriptor1,
|
||||||
|
trackDescriptor2,
|
||||||
|
trackDescriptor3,
|
||||||
|
trackDescriptor4]
|
||||||
|
|
||||||
|
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||||
|
# mediaDescriptor.reindexSubIndices()
|
||||||
|
|
||||||
|
return mediaDescriptor
|
||||||
|
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
|
||||||
|
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||||
|
for DC2_A in DispositionCombinator2.getAllClassReferences():
|
||||||
|
for TC2_A in TrackTagCombinator2.getAllClassReferences():
|
||||||
|
for DC2_S in DispositionCombinator2.getAllClassReferences():
|
||||||
|
for TC2_S in TrackTagCombinator2.getAllClassReferences():
|
||||||
|
for J in JellyfinCombinator.getAllClassReferences():
|
||||||
|
|
||||||
|
j = J(self._context)
|
||||||
|
self._context['use_jellyfin'] = j.getPayload()
|
||||||
|
|
||||||
|
dc2a = DC2_A(self._context)
|
||||||
|
tc2a = TC2_A(self._context)
|
||||||
|
dc2s = DC2_S(self._context)
|
||||||
|
tc2s = TC2_S(self._context)
|
||||||
|
|
||||||
|
mtc = MTC(self._context)
|
||||||
|
|
||||||
|
yObj = {}
|
||||||
|
|
||||||
|
yObj['identifier'] = self.getIdentifier()
|
||||||
|
yObj['variants'] = [self.getVariant(),
|
||||||
|
f"A:{dc2a.getVariant()}",
|
||||||
|
f"A:{tc2a.getVariant()}",
|
||||||
|
f"S:{dc2s.getVariant()}",
|
||||||
|
f"S:{tc2s.getVariant()}",
|
||||||
|
mtc.getVariant(),
|
||||||
|
j.getVariant()]
|
||||||
|
|
||||||
|
yObj['payload'] = self.getPayload(dc2a.getPayload(),
|
||||||
|
tc2a.getPayload(),
|
||||||
|
dc2s.getPayload(),
|
||||||
|
tc2s.getPayload())
|
||||||
|
|
||||||
|
yObj['assertSelectors'] = ['M', 'AD', 'AT', 'SD', 'ST', 'MT', 'J']
|
||||||
|
yObj['assertFuncs'] = [self.assertFunc,
|
||||||
|
dc2a.createAssertFunc(),
|
||||||
|
tc2a.createAssertFunc(),
|
||||||
|
dc2s.createAssertFunc(),
|
||||||
|
tc2s.createAssertFunc(),
|
||||||
|
mtc.createAssertFunc(),
|
||||||
|
j.assertFunc]
|
||||||
|
|
||||||
|
yObj['shouldFail'] = (self.shouldFail()
|
||||||
|
| dc2a.shouldFail()
|
||||||
|
| tc2a.shouldFail()
|
||||||
|
| dc2s.shouldFail()
|
||||||
|
| tc2s.shouldFail()
|
||||||
|
| mtc.shouldFail()
|
||||||
|
| j.shouldFail())
|
||||||
|
yieldObj = {'target': yObj}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
dc2a_p = DC2_A(self._context, createPresets = True)
|
||||||
|
tc2a_p = TC2_A(self._context, createPresets = True)
|
||||||
|
dc2s_p = DC2_S(self._context, createPresets = True)
|
||||||
|
tc2s_p = TC2_S(self._context, createPresets = True)
|
||||||
|
|
||||||
|
mtc_p = MTC(self._context, createPresets = True)
|
||||||
|
|
||||||
|
yObj_p = {}
|
||||||
|
|
||||||
|
yObj_p['identifier'] = self.getIdentifier()
|
||||||
|
yObj_p['variants'] = [self.getVariant(),
|
||||||
|
f"A:{dc2a_p.getVariant()}",
|
||||||
|
f"A:{tc2a_p.getVariant()}",
|
||||||
|
f"S:{dc2s_p.getVariant()}",
|
||||||
|
f"S:{tc2s_p.getVariant()}",
|
||||||
|
mtc_p.getVariant(),
|
||||||
|
j.getVariant()]
|
||||||
|
|
||||||
|
yObj_p['payload'] = self.getPayload(dc2a_p.getPayload(),
|
||||||
|
tc2a_p.getPayload(),
|
||||||
|
dc2s_p.getPayload(),
|
||||||
|
tc2s_p.getPayload())
|
||||||
|
|
||||||
|
yObj_p['assertSelectors'] = ['M', 'AD', 'AT', 'SD', 'ST', 'MT', 'J']
|
||||||
|
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||||
|
dc2a_p.createAssertFunc(),
|
||||||
|
tc2a_p.createAssertFunc(),
|
||||||
|
dc2s_p.createAssertFunc(),
|
||||||
|
tc2s_p.createAssertFunc(),
|
||||||
|
mtc_p.createAssertFunc(),
|
||||||
|
j.assertFunc]
|
||||||
|
|
||||||
|
yObj_p['shouldFail'] = (self.shouldFail()
|
||||||
|
| dc2a_p.shouldFail()
|
||||||
|
| tc2a_p.shouldFail()
|
||||||
|
| dc2s_p.shouldFail()
|
||||||
|
| tc2s_p.shouldFail()
|
||||||
|
| mtc_p.shouldFail()
|
||||||
|
| j.shouldFail())
|
||||||
|
yieldObj['preset'] = yObj_p
|
||||||
|
|
||||||
|
yield yieldObj
|
||||||
225
bin/ffx/test/media_combinator_7.py
Normal file
225
bin/ffx/test/media_combinator_7.py
Normal file
@@ -0,0 +1,225 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .media_combinator import MediaCombinator
|
||||||
|
|
||||||
|
from .disposition_combinator_2 import DispositionCombinator2
|
||||||
|
from .disposition_combinator_3 import DispositionCombinator3
|
||||||
|
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||||
|
from .track_tag_combinator_3 import TrackTagCombinator3
|
||||||
|
from .permutation_combinator_2 import PermutationCombinator2
|
||||||
|
from .permutation_combinator_3 import PermutationCombinator3
|
||||||
|
from .media_tag_combinator import MediaTagCombinator
|
||||||
|
|
||||||
|
class MediaCombinator7(MediaCombinator):
|
||||||
|
|
||||||
|
VARIANT = 'VAASSS'
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return MediaCombinator7.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self,
|
||||||
|
audioPermutation,
|
||||||
|
subtitlePermutation,
|
||||||
|
audioDispositionTuple = (set(), set()),
|
||||||
|
audioTagTuple = ({}, {}),
|
||||||
|
subtitleDispositionTuple = (set(), set(), set()),
|
||||||
|
subtitleTagTuple = ({}, {}, {})):
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[0]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[0]
|
||||||
|
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[1]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[1]
|
||||||
|
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 3
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 3
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[0]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[0]
|
||||||
|
trackDescriptor3 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 4
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 4
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[1]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[1]
|
||||||
|
trackDescriptor4 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[TrackDescriptor.INDEX_KEY] = 5
|
||||||
|
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 5
|
||||||
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||||
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 2
|
||||||
|
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[2]
|
||||||
|
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[2]
|
||||||
|
trackDescriptor5 = TrackDescriptor(**kwargs)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||||
|
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||||
|
trackDescriptor1,
|
||||||
|
trackDescriptor2,
|
||||||
|
trackDescriptor3,
|
||||||
|
trackDescriptor4,
|
||||||
|
trackDescriptor5]
|
||||||
|
|
||||||
|
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||||
|
# mediaDescriptor.reindexSubIndices()
|
||||||
|
|
||||||
|
return mediaDescriptor
|
||||||
|
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
|
||||||
|
|
||||||
|
pc2 = PermutationCombinator2(self._context)
|
||||||
|
pc3 = PermutationCombinator3(self._context)
|
||||||
|
|
||||||
|
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||||
|
for DC2_A in DispositionCombinator2.getAllClassReferences():
|
||||||
|
for TC2_A in TrackTagCombinator2.getAllClassReferences():
|
||||||
|
for DC3_S in DispositionCombinator3.getAllClassReferences():
|
||||||
|
for TC3_S in TrackTagCombinator3.getAllClassReferences():
|
||||||
|
for p2y in pc2.getYield():
|
||||||
|
for p3y in pc3.getYield():
|
||||||
|
|
||||||
|
dc2a = DC2_A(self._context)
|
||||||
|
tc2a = TC2_A(self._context)
|
||||||
|
dc3s = DC3_S(self._context)
|
||||||
|
tc3s = TC3_S(self._context)
|
||||||
|
|
||||||
|
mtc = MTC(self._context)
|
||||||
|
|
||||||
|
yObj = {}
|
||||||
|
|
||||||
|
yObj['identifier'] = self.getIdentifier()
|
||||||
|
yObj['variants'] = [self.getVariant(),
|
||||||
|
f"A:{p2y['variant']}",
|
||||||
|
f"S:{p3y['variant']}",
|
||||||
|
f"A:{dc2a.getVariant()}",
|
||||||
|
f"A:{tc2a.getVariant()}",
|
||||||
|
f"S:{dc3s.getVariant()}",
|
||||||
|
f"S:{tc3s.getVariant()}",
|
||||||
|
mtc.getVariant()]
|
||||||
|
|
||||||
|
yObj['payload'] = self.getPayload(p2y['permutation'],
|
||||||
|
p3y['permutation'],
|
||||||
|
dc2a.getPayload(),
|
||||||
|
tc2a.getPayload(),
|
||||||
|
dc3s.getPayload(),
|
||||||
|
tc3s.getPayload())
|
||||||
|
|
||||||
|
yObj['assertSelectors'] = ['M', 'AP', 'SP', 'AD', 'AT', 'SD', 'ST', 'MT']
|
||||||
|
|
||||||
|
yObj['assertFuncs'] = [self.assertFunc,
|
||||||
|
p2y.createAssertFunc(),
|
||||||
|
p3y.createAssertFunc(),
|
||||||
|
dc2a.createAssertFunc(),
|
||||||
|
tc2a.createAssertFunc(),
|
||||||
|
dc3s.createAssertFunc(),
|
||||||
|
tc3s.createAssertFunc(),
|
||||||
|
mtc.createAssertFunc()]
|
||||||
|
|
||||||
|
yObj['shouldFail'] = (self.shouldFail()
|
||||||
|
| p2y.shouldFail()
|
||||||
|
| p3y.shouldFail()
|
||||||
|
| dc2a.shouldFail()
|
||||||
|
| tc2a.shouldFail()
|
||||||
|
| dc3s.shouldFail()
|
||||||
|
| tc3s.shouldFail()
|
||||||
|
| mtc.shouldFail())
|
||||||
|
yieldObj = {'target': yObj}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
dc2a_p = DC2_A(self._context, createPresets = True)
|
||||||
|
tc2a_p = TC2_A(self._context, createPresets = True)
|
||||||
|
dc3s_p = DC3_S(self._context, createPresets = True)
|
||||||
|
tc3s_p = TC3_S(self._context, createPresets = True)
|
||||||
|
|
||||||
|
mtc_p = MTC(self._context, createPresets = True)
|
||||||
|
|
||||||
|
yObj_p = {}
|
||||||
|
|
||||||
|
yObj_p['identifier'] = self.getIdentifier()
|
||||||
|
yObj_p['variants'] = [self.getVariant(),
|
||||||
|
f"A:{dc2a_p.getVariant()}",
|
||||||
|
f"A:{tc2a_p.getVariant()}",
|
||||||
|
f"S:{dc3s_p.getVariant()}",
|
||||||
|
f"S:{tc3s_p.getVariant()}",
|
||||||
|
mtc_p.getVariant(),
|
||||||
|
j.getVariant()]
|
||||||
|
|
||||||
|
yObj_p['payload'] = self.getPayload(dc2a_p.getPayload(),
|
||||||
|
tc2a_p.getPayload(),
|
||||||
|
dc3s_p.getPayload(),
|
||||||
|
tc3s_p.getPayload())
|
||||||
|
|
||||||
|
yObj_p['assertSelectors'] = ['M', 'AD', 'AT', 'SD', 'ST', 'MT', 'J']
|
||||||
|
|
||||||
|
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||||
|
dc2a_p.createAssertFunc(),
|
||||||
|
tc2a_p.createAssertFunc(),
|
||||||
|
dc3s_p.createAssertFunc(),
|
||||||
|
tc3s_p.createAssertFunc(),
|
||||||
|
mtc_p.createAssertFunc(),
|
||||||
|
j.assertFunc]
|
||||||
|
|
||||||
|
yObj_p['shouldFail'] = (self.shouldFail()
|
||||||
|
| dc2a_p.shouldFail()
|
||||||
|
| tc2a_p.shouldFail()
|
||||||
|
| dc3s_p.shouldFail()
|
||||||
|
| tc3s_p.shouldFail()
|
||||||
|
| mtc_p.shouldFail()
|
||||||
|
| j.shouldFail())
|
||||||
|
yieldObj['preset'] = yObj_p
|
||||||
|
|
||||||
|
yield yieldObj
|
||||||
33
bin/ffx/test/media_tag_combinator.py
Normal file
33
bin/ffx/test/media_tag_combinator.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
class MediaTagCombinator():
|
||||||
|
|
||||||
|
IDENTIFIER = 'mediaTag'
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return MediaTagCombinator.IDENTIFIER
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def list():
|
||||||
|
basePath = os.path.dirname(__file__)
|
||||||
|
return [os.path.basename(p)[21:-3]
|
||||||
|
for p
|
||||||
|
in glob.glob(f"{ basePath }/media_tag_combinator_*.py", recursive = True)
|
||||||
|
if p != __file__]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getClassReference(identifier):
|
||||||
|
importlib.import_module(f"ffx.test.media_tag_combinator_{ identifier }")
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.media_tag_combinator_{ identifier }"]):
|
||||||
|
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
||||||
|
if inspect.isclass(obj) and name != 'MediaTagCombinator' and name.startswith('MediaTagCombinator'):
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getAllClassReferences():
|
||||||
|
return [MediaTagCombinator.getClassReference(i) for i in MediaTagCombinator.list()]
|
||||||
55
bin/ffx/test/media_tag_combinator_0.py
Normal file
55
bin/ffx/test/media_tag_combinator_0.py
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .media_tag_combinator import MediaTagCombinator
|
||||||
|
|
||||||
|
|
||||||
|
class MediaTagCombinator0(MediaTagCombinator):
|
||||||
|
|
||||||
|
VARIANT = 'MT0'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return MediaTagCombinator0.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
mediaTags = {}
|
||||||
|
if self.__createPresets:
|
||||||
|
mediaTags['THIS_IS'] = 'FFX'
|
||||||
|
return mediaTags
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tags'")
|
||||||
|
mediaTags = assertObj['tags']
|
||||||
|
|
||||||
|
assert ('THIS_IS' in mediaTags.keys() and mediaTags.keys()['THIS_IS'] == 'FFX'
|
||||||
|
), "Media tag 'THIS_IS' was not preserved"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
59
bin/ffx/test/media_tag_combinator_1.py
Normal file
59
bin/ffx/test/media_tag_combinator_1.py
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .media_tag_combinator import MediaTagCombinator
|
||||||
|
|
||||||
|
class MediaTagCombinator1(MediaTagCombinator):
|
||||||
|
|
||||||
|
VARIANT = 'MT1'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return MediaTagCombinator1.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
mediaTags = {'From': 'Encoders'}
|
||||||
|
if self.__createPresets:
|
||||||
|
mediaTags['THIS_IS'] = 'FFX'
|
||||||
|
return mediaTags
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tags'")
|
||||||
|
mediaTags = assertObj['tags']
|
||||||
|
|
||||||
|
assert ('From' in mediaTags.keys()
|
||||||
|
), "'From' not in media tag keys"
|
||||||
|
assert (mediaTags.keys()['From'] == 'Encoders'
|
||||||
|
), "Media tag value not 'Encoders' for key 'To'"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in mediaTags.keys() and mediaTags.keys()['THIS_IS'] == 'FFX'
|
||||||
|
), "Media tag 'THIS_IS' was not preserved"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
64
bin/ffx/test/media_tag_combinator_2.py
Normal file
64
bin/ffx/test/media_tag_combinator_2.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
|
||||||
|
from .media_tag_combinator import MediaTagCombinator
|
||||||
|
|
||||||
|
class MediaTagCombinator2(MediaTagCombinator):
|
||||||
|
|
||||||
|
VARIANT = 'MT2'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return MediaTagCombinator2.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
mediaTags = {'To': 'Fanz',
|
||||||
|
'Yolo': 'Holo'}
|
||||||
|
if self.__createPresets:
|
||||||
|
mediaTags['THIS_IS'] = 'FFX'
|
||||||
|
return mediaTags
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tags'")
|
||||||
|
mediaTags = assertObj['tags']
|
||||||
|
|
||||||
|
assert ('To' in mediaTags.keys()
|
||||||
|
), "'To' not in media tag keys"
|
||||||
|
assert (mediaTags.keys()['To'] == 'Fanz'
|
||||||
|
), "Media tag value not 'Fanz' for key 'To'"
|
||||||
|
assert ('Yolo' in mediaTags.keys()
|
||||||
|
), "'Yolo' not in media tag keys"
|
||||||
|
assert (mediaTags.keys()['Yolo'] == 'Holo'
|
||||||
|
), "Media tag value not 'Holo' for key 'Yolo'"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in mediaTags.keys() and mediaTags.keys()['THIS_IS'] == 'FFX'
|
||||||
|
), "Media tag 'THIS_IS' was not preserved"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
36
bin/ffx/test/permutation_combinator_2.py
Normal file
36
bin/ffx/test/permutation_combinator_2.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
class PermutationCombinator2():
|
||||||
|
|
||||||
|
IDENTIFIER = 'permutation2'
|
||||||
|
|
||||||
|
PERMUTATION_LIST = [
|
||||||
|
[0,1],
|
||||||
|
[1,0]
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return PermutationCombinator2.IDENTIFIER
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self, permutationIndex):
|
||||||
|
return {
|
||||||
|
'variant': f"P{permutationIndex}",
|
||||||
|
'permutation': PermutationCombinator2.PERMUTATION_LIST[permutationIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
def f(testObj = {}):
|
||||||
|
pass
|
||||||
|
return f
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
for permutationIndex in range(len(PermutationCombinator2.PERMUTATION_LIST)):
|
||||||
|
yield self.getPayload(permutationIndex)
|
||||||
37
bin/ffx/test/permutation_combinator_3.py
Normal file
37
bin/ffx/test/permutation_combinator_3.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
class PermutationCombinator3():
|
||||||
|
|
||||||
|
IDENTIFIER = 'permutation3'
|
||||||
|
|
||||||
|
PERMUTATION_LIST = [
|
||||||
|
[0,1,2],
|
||||||
|
[0,2,1],
|
||||||
|
[1,2,0]
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return PermutationCombinator3.IDENTIFIER
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self, permutationIndex):
|
||||||
|
return {
|
||||||
|
'variant': f"P{permutationIndex}",
|
||||||
|
'permutation': PermutationCombinator3.PERMUTATION_LIST[permutationIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
def f(testObj = {}):
|
||||||
|
pass
|
||||||
|
return f
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
for permutationIndex in range(len(PermutationCombinator3.PERMUTATION_LIST)):
|
||||||
|
yield self.getPayload(permutationIndex)
|
||||||
37
bin/ffx/test/release_combinator.py
Normal file
37
bin/ffx/test/release_combinator.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
class ReleaseCombinator():
|
||||||
|
|
||||||
|
IDENTIFIER = 'release'
|
||||||
|
|
||||||
|
RELEASE_LIST = [
|
||||||
|
".GerEngSub.AAC.1080pINDICATOR.WebDL.x264-Tanuki",
|
||||||
|
".German.AC3.DL.1080pINDICATOR.BluRay.x264-AST4u",
|
||||||
|
"-720pINDICATOR"
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self, context = None, indicator = ''):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
self.__indicator = indicator
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return ReleaseCombinator.IDENTIFIER
|
||||||
|
|
||||||
|
def getPayload(self, releaseIndex):
|
||||||
|
releaseStr: str = ReleaseCombinator.RELEASE_LIST[releaseIndex]
|
||||||
|
return {
|
||||||
|
'variant': f"R{releaseIndex}",
|
||||||
|
'release': releaseStr.replace('INDICATOR', f".{self.__indicator}")
|
||||||
|
if self.__indicator else releaseStr.replace('INDICATOR', '')
|
||||||
|
}
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
for releaseIndex in range(len(ReleaseCombinator.RELEASE_LIST)):
|
||||||
|
yield self.getPayload(releaseIndex)
|
||||||
154
bin/ffx/test/scenario.py
Normal file
154
bin/ffx/test/scenario.py
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
import os, glob, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.show_controller import ShowController
|
||||||
|
from ffx.pattern_controller import PatternController
|
||||||
|
from ffx.media_controller import MediaController
|
||||||
|
|
||||||
|
from ffx.test.helper import createEmptyDirectory
|
||||||
|
from ffx.database import databaseContext
|
||||||
|
|
||||||
|
class Scenario():
|
||||||
|
"""Scenarios
|
||||||
|
|
||||||
|
Scenario1: Jellyfin, MediaTags, Stream-Kombinationen, Dispositions und StreamTags per Kombinatoren
|
||||||
|
|
||||||
|
Scenario2: <pattern> mit 3 Files x Scenario1
|
||||||
|
|
||||||
|
Scenario3: <tmdb+pattern> mit 3 Files (wenn TMDB API Key verfügbar)
|
||||||
|
|
||||||
|
Naming:
|
||||||
|
|
||||||
|
1: test.mkv no tmdb, no pattern
|
||||||
|
2: test_s01e02.mkv
|
||||||
|
|
||||||
|
Operationen:
|
||||||
|
|
||||||
|
tmdb lookup: Set Showname as prefix, append episode name
|
||||||
|
pattern lookup: Set/update tags/dispositions; Filter/Reorder Tracks
|
||||||
|
jellyfin reordering: default track last (2)
|
||||||
|
|
||||||
|
MediaTag-Kombinationen (2)
|
||||||
|
|
||||||
|
0: nichs
|
||||||
|
1: Yolo=Holo
|
||||||
|
|
||||||
|
Stream-Kombinationen (8)
|
||||||
|
|
||||||
|
VA D=1 T=1 =1
|
||||||
|
VAS D=1 T=1 =1
|
||||||
|
VASS D=4 T=4 =16
|
||||||
|
VASSS D=5 T=5 =25
|
||||||
|
VAA D=4 T=4 =16
|
||||||
|
VAAS D=4 T=4 =16
|
||||||
|
VAASS D=16 T=16 =256
|
||||||
|
VAASSS D=20 T=20 =400
|
||||||
|
=731
|
||||||
|
|
||||||
|
Dispositions-Kombinationen (per TrackType)
|
||||||
|
|
||||||
|
0 = keine
|
||||||
|
1 = DEFAULT
|
||||||
|
|
||||||
|
2 Streams (4):
|
||||||
|
|
||||||
|
D1: 00
|
||||||
|
D2: 01
|
||||||
|
D3: 10
|
||||||
|
D4: 11
|
||||||
|
|
||||||
|
3 Streams (5):
|
||||||
|
|
||||||
|
D5: 000
|
||||||
|
D6: 001
|
||||||
|
D7: 010
|
||||||
|
D8: 100
|
||||||
|
D9: 101
|
||||||
|
|
||||||
|
Stream-Tag-Kombinationen (per TrackType)
|
||||||
|
|
||||||
|
0 = keine
|
||||||
|
1 = lang+title
|
||||||
|
|
||||||
|
2 Streams:
|
||||||
|
|
||||||
|
00
|
||||||
|
01
|
||||||
|
10
|
||||||
|
11
|
||||||
|
|
||||||
|
3 Streams:
|
||||||
|
|
||||||
|
000
|
||||||
|
001
|
||||||
|
010
|
||||||
|
100
|
||||||
|
101
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._testDirectory = createEmptyDirectory()
|
||||||
|
self._ffxExecutablePath = os.path.join(
|
||||||
|
os.path.dirname(
|
||||||
|
os.path.dirname(
|
||||||
|
os.path.dirname(__file__))),
|
||||||
|
'ffx.py')
|
||||||
|
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
self._testDbFilePath = os.path.join(self._testDirectory, 'test.db')
|
||||||
|
self.createEmptyTestDatabase()
|
||||||
|
|
||||||
|
# Convenience
|
||||||
|
self._niceness = self._context['resource_limits']['niceness'] if 'resource_limits' in self._context.keys() and 'niceness' in self._context['resource_limits'].keys() else 99
|
||||||
|
self._cpuPercent = self._context['resource_limits']['cpu_percent'] if 'resource_limits' in self._context.keys() and 'cpu_percent' in self._context['resource_limits'].keys() else 99
|
||||||
|
|
||||||
|
|
||||||
|
def createEmptyTestDatabase(self):
|
||||||
|
|
||||||
|
if not self._context['database'] is None:
|
||||||
|
self._context['database']['engine'].dispose()
|
||||||
|
|
||||||
|
if os.path.isfile(self._testDbFilePath):
|
||||||
|
os.unlink(self._testDbFilePath)
|
||||||
|
self._context['database'] = None
|
||||||
|
|
||||||
|
self._logger.debug(f"Creating test db with path {self._testDbFilePath}")
|
||||||
|
self._context['database'] = databaseContext(databasePath=self._testDbFilePath)
|
||||||
|
|
||||||
|
self._sc = ShowController(context = self._context)
|
||||||
|
self._pc = PatternController(context = self._context)
|
||||||
|
self._mc = MediaController(context = self._context)
|
||||||
|
|
||||||
|
|
||||||
|
def clearTestDirectory(self):
|
||||||
|
testFiles = glob.glob(f"{self._testDirectory}/*")
|
||||||
|
for f in testFiles:
|
||||||
|
os.remove(f)
|
||||||
|
|
||||||
|
def getFilePathsInTestDirectory(self):
|
||||||
|
return [f for f in glob.glob(f"{self._testDirectory}/*")]
|
||||||
|
|
||||||
|
def getFilenamesInTestDirectory(self):
|
||||||
|
return [os.path.basename(f) for f in self.getFilePathsInTestDirectory()]
|
||||||
|
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def list():
|
||||||
|
basePath = os.path.dirname(__file__)
|
||||||
|
return [os.path.basename(p)[9:-3]
|
||||||
|
for p
|
||||||
|
in glob.glob(f"{ basePath }/scenario_*.py", recursive = True)
|
||||||
|
if p != __file__]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getClassReference(identifier):
|
||||||
|
importlib.import_module(f"ffx.test.scenario_{ identifier }")
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.scenario_{ identifier }"]):
|
||||||
|
#HINT: Excluding Scenario as it seems to be included by import (?)
|
||||||
|
if inspect.isclass(obj) and name != 'Scenario' and name.startswith('Scenario'):
|
||||||
|
return obj
|
||||||
174
bin/ffx/test/scenario_1.py
Normal file
174
bin/ffx/test/scenario_1.py
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
import os, sys, click, glob
|
||||||
|
|
||||||
|
from .scenario import Scenario
|
||||||
|
|
||||||
|
from ffx.test.helper import createMediaTestFile
|
||||||
|
from ffx.process import executeProcess
|
||||||
|
|
||||||
|
from ffx.file_properties import FileProperties
|
||||||
|
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
|
||||||
|
from ffx.test.media_combinator_0 import MediaCombinator0
|
||||||
|
|
||||||
|
from ffx.test.basename_combinator import BasenameCombinator
|
||||||
|
|
||||||
|
|
||||||
|
class Scenario1(Scenario):
|
||||||
|
"""Creating file VAa, h264/aac/aac
|
||||||
|
Converting to VaA, vp9/opus/opus
|
||||||
|
No tmdb, default parameters"""
|
||||||
|
|
||||||
|
TEST_FILE_EXTENSION = 'mkv'
|
||||||
|
EXPECTED_FILE_EXTENSION = 'webm'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context):
|
||||||
|
|
||||||
|
context['use_jellyfin'] = True
|
||||||
|
context['use_tmdb'] = False
|
||||||
|
context['use_pattern'] = False
|
||||||
|
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
def getScenario(self):
|
||||||
|
return self.__class__.__name__[8:]
|
||||||
|
|
||||||
|
|
||||||
|
def job(self, yieldObj: dict):
|
||||||
|
|
||||||
|
testContext = self._context.copy()
|
||||||
|
|
||||||
|
identifier = yieldObj['identifier']
|
||||||
|
variantList = yieldObj['variants']
|
||||||
|
|
||||||
|
variantIdentifier = '-'.join(variantList)
|
||||||
|
variantLabel = f"{self.__class__.__name__} Variant {variantIdentifier}"
|
||||||
|
|
||||||
|
mc0 = MediaCombinator0(context = testContext)
|
||||||
|
sourceMediaDescriptor: MediaDescriptor = mc0.getPayload()
|
||||||
|
|
||||||
|
assertSelectorList: list = yieldObj['assertSelectors']
|
||||||
|
assertFuncList = yieldObj['assertFuncs']
|
||||||
|
shouldFail = yieldObj['shouldFail']
|
||||||
|
|
||||||
|
variantPayload = yieldObj['payload']
|
||||||
|
variantBasename = variantPayload['basename']
|
||||||
|
variantFilenameLabel = variantPayload['label']
|
||||||
|
expectedBasename = variantPayload['expectedBasename']
|
||||||
|
|
||||||
|
variantFilename = f"{variantBasename}.{Scenario1.TEST_FILE_EXTENSION}"
|
||||||
|
expectedFilename = f"{expectedBasename}.{Scenario1.EXPECTED_FILE_EXTENSION}"
|
||||||
|
|
||||||
|
|
||||||
|
if self._context['test_variant'] and not variantIdentifier.startswith(self._context['test_variant']):
|
||||||
|
return
|
||||||
|
|
||||||
|
if ((self._context['test_passed_counter'] + self._context['test_failed_counter'])
|
||||||
|
>= self._context['test_limit']):
|
||||||
|
return
|
||||||
|
|
||||||
|
self._logger.debug(f"Running Job: {variantLabel}")
|
||||||
|
|
||||||
|
|
||||||
|
# Phase 1: Setup source files
|
||||||
|
|
||||||
|
if not variantBasename:
|
||||||
|
raise ValueError(f"{variantLabel}: Testfile basename is falsy")
|
||||||
|
|
||||||
|
|
||||||
|
self.clearTestDirectory()
|
||||||
|
|
||||||
|
self._logger.debug(f"Creating test file: {variantFilename}")
|
||||||
|
mediaFilePath = createMediaTestFile(mediaDescriptor=sourceMediaDescriptor,
|
||||||
|
baseName=variantBasename,
|
||||||
|
directory=self._testDirectory,
|
||||||
|
logger=self._logger,
|
||||||
|
length = 2)
|
||||||
|
|
||||||
|
|
||||||
|
# Phase 2: Run ffx
|
||||||
|
|
||||||
|
commandSequence = [sys.executable,
|
||||||
|
self._ffxExecutablePath]
|
||||||
|
|
||||||
|
if self._context['verbosity']:
|
||||||
|
commandSequence += ['--verbose',
|
||||||
|
str(self._context['verbosity'])]
|
||||||
|
|
||||||
|
commandSequence += ['convert',
|
||||||
|
mediaFilePath,
|
||||||
|
'--no-prompt',
|
||||||
|
'--no-signature']
|
||||||
|
|
||||||
|
if variantFilenameLabel:
|
||||||
|
commandSequence += ['--label', variantFilenameLabel]
|
||||||
|
|
||||||
|
|
||||||
|
commandSequence += ['--no-pattern']
|
||||||
|
commandSequence += ['--no-tmdb']
|
||||||
|
|
||||||
|
out, err, rc = executeProcess(commandSequence, directory = self._testDirectory, context = self._context)
|
||||||
|
|
||||||
|
if out and self._context['verbosity'] >= 9:
|
||||||
|
self._logger.debug(f"{variantLabel}: Process output: {out}")
|
||||||
|
if rc:
|
||||||
|
self._logger.debug(f"{variantLabel}: Process returned ERROR {rc} ({err})")
|
||||||
|
|
||||||
|
|
||||||
|
# Phase 3: Evaluate results
|
||||||
|
|
||||||
|
resultFilenames = [rf for rf in self.getFilenamesInTestDirectory() if rf != 'ffmpeg2pass-0.log' and rf != variantFilename]
|
||||||
|
self._logger.debug(f"{variantLabel}: Result filenames: {resultFilenames}")
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
jobFailed = bool(rc)
|
||||||
|
|
||||||
|
self._logger.debug(f"{variantLabel}: Should fail: {shouldFail} / actually failed: {jobFailed}")
|
||||||
|
|
||||||
|
assert (jobFailed == shouldFail
|
||||||
|
), f"Process {'failed' if jobFailed else 'did not fail'}"
|
||||||
|
|
||||||
|
if not jobFailed:
|
||||||
|
|
||||||
|
expectedResultFilePath = os.path.join(self._testDirectory, f"{expectedFilename}")
|
||||||
|
|
||||||
|
assert (os.path.isfile(expectedResultFilePath)
|
||||||
|
), f"Result file {expectedFilename} in path '{self._testDirectory}' wasn't created"
|
||||||
|
|
||||||
|
for assertIndex in range(len(assertSelectorList)):
|
||||||
|
|
||||||
|
assertSelector = assertSelectorList[assertIndex]
|
||||||
|
assertFunc = assertFuncList[assertIndex]
|
||||||
|
assertVariant = variantList[assertIndex]
|
||||||
|
|
||||||
|
if assertSelector == 'B':
|
||||||
|
#TODO: per file find
|
||||||
|
testObj = {'filenames': resultFilenames}
|
||||||
|
assertFunc(testObj=testObj)
|
||||||
|
if assertSelector == 'L':
|
||||||
|
assertFunc()
|
||||||
|
if assertSelector == 'I':
|
||||||
|
assertFunc()
|
||||||
|
|
||||||
|
self._context['test_passed_counter'] += 1
|
||||||
|
self._reportLogger.info(f"{variantLabel}: Test passed")
|
||||||
|
|
||||||
|
except AssertionError as ae:
|
||||||
|
|
||||||
|
self._context['test_failed_counter'] += 1
|
||||||
|
self._reportLogger.error(f"{variantLabel}: Test FAILED ({ae})")
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
for BC in BasenameCombinator.getAllClassReferences():
|
||||||
|
self._logger.debug(f"BC={BC.__name__}")
|
||||||
|
bc = BC(context = self._context)
|
||||||
|
for y in bc.getYield():
|
||||||
|
self.job(y)
|
||||||
177
bin/ffx/test/scenario_2.py
Normal file
177
bin/ffx/test/scenario_2.py
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
import os, sys, click
|
||||||
|
|
||||||
|
from .scenario import Scenario
|
||||||
|
|
||||||
|
from ffx.test.helper import createMediaTestFile
|
||||||
|
from ffx.process import executeProcess
|
||||||
|
|
||||||
|
from ffx.file_properties import FileProperties
|
||||||
|
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
|
||||||
|
from ffx.test.media_combinator import MediaCombinator
|
||||||
|
|
||||||
|
|
||||||
|
class Scenario2(Scenario):
|
||||||
|
"""Creating file VAa, h264/aac/aac
|
||||||
|
Converting to VaA, vp9/opus/opus
|
||||||
|
No tmdb, default parameters"""
|
||||||
|
|
||||||
|
TEST_FILE_EXTENSION = 'mkv'
|
||||||
|
EXPECTED_FILE_EXTENSION = 'webm'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context):
|
||||||
|
|
||||||
|
context['use_jellyfin'] = True
|
||||||
|
context['use_tmdb'] = False
|
||||||
|
context['use_pattern'] = False
|
||||||
|
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
def getScenario(self):
|
||||||
|
return self.__class__.__name__[8:]
|
||||||
|
|
||||||
|
|
||||||
|
def job(self, yieldObj: dict):
|
||||||
|
|
||||||
|
testContext = self._context.copy()
|
||||||
|
|
||||||
|
targetYieldObj = yieldObj['target']
|
||||||
|
# presetYieldObj = yieldObj['preset'] # not used here
|
||||||
|
|
||||||
|
identifier = targetYieldObj['identifier']
|
||||||
|
variantList = targetYieldObj['variants']
|
||||||
|
|
||||||
|
variantIdentifier = '-'.join(variantList)
|
||||||
|
variantLabel = f"{self.__class__.__name__} Variant {variantIdentifier}"
|
||||||
|
|
||||||
|
sourceMediaDescriptor: MediaDescriptor = targetYieldObj['payload']
|
||||||
|
|
||||||
|
assertSelectorList: list = targetYieldObj['assertSelectors']
|
||||||
|
assertFuncList = targetYieldObj['assertFuncs']
|
||||||
|
shouldFail = targetYieldObj['shouldFail']
|
||||||
|
|
||||||
|
try:
|
||||||
|
jellyfinSelectorIndex = assertSelectorList.index('J')
|
||||||
|
jellyfinVariant = variantList[jellyfinSelectorIndex]
|
||||||
|
testContext['use_jellyfin'] = jellyfinVariant == 'J1'
|
||||||
|
except ValueError:
|
||||||
|
jellyfinSelectorIndex = -1
|
||||||
|
|
||||||
|
|
||||||
|
if self._context['test_variant'] and not variantIdentifier.startswith(self._context['test_variant']):
|
||||||
|
return
|
||||||
|
|
||||||
|
if ((self._context['test_passed_counter'] + self._context['test_failed_counter'])
|
||||||
|
>= self._context['test_limit']):
|
||||||
|
return
|
||||||
|
|
||||||
|
self._logger.debug(f"Running Job: {variantLabel}")
|
||||||
|
|
||||||
|
|
||||||
|
# Phase 1: Setup source files
|
||||||
|
|
||||||
|
self.clearTestDirectory()
|
||||||
|
mediaFilePath = createMediaTestFile(mediaDescriptor=sourceMediaDescriptor,
|
||||||
|
directory=self._testDirectory,
|
||||||
|
logger=self._logger,
|
||||||
|
length = 2)
|
||||||
|
|
||||||
|
|
||||||
|
# Phase 2: Run ffx
|
||||||
|
|
||||||
|
commandSequence = [sys.executable,
|
||||||
|
self._ffxExecutablePath]
|
||||||
|
|
||||||
|
if self._context['verbosity']:
|
||||||
|
commandSequence += ['--verbose',
|
||||||
|
str(self._context['verbosity'])]
|
||||||
|
|
||||||
|
commandSequence += ['convert',
|
||||||
|
mediaFilePath,
|
||||||
|
'--no-prompt',
|
||||||
|
'--no-signature']
|
||||||
|
|
||||||
|
out, err, rc = executeProcess(commandSequence, directory = self._testDirectory, context = self._context)
|
||||||
|
|
||||||
|
if out and self._context['verbosity'] >= 9:
|
||||||
|
self._logger.debug(f"{variantLabel}: Process output: {out}")
|
||||||
|
if rc:
|
||||||
|
self._logger.debug(f"{variantLabel}: Process returned ERROR {rc} ({err})")
|
||||||
|
|
||||||
|
|
||||||
|
# Phase 3: Evaluate results
|
||||||
|
|
||||||
|
resultFilenames = [rf for rf in self.getFilenamesInTestDirectory() if rf.endswith(f".{Scenario2.EXPECTED_FILE_EXTENSION}")]
|
||||||
|
|
||||||
|
self._logger.debug(f"{variantLabel}: Result filenames: {resultFilenames}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
jobFailed = bool(rc)
|
||||||
|
self._logger.debug(f"{variantLabel}: Should fail: {shouldFail} / actually failed: {jobFailed}")
|
||||||
|
|
||||||
|
assert (jobFailed == shouldFail
|
||||||
|
), f"Process {'failed' if jobFailed else 'did not fail'}"
|
||||||
|
|
||||||
|
|
||||||
|
if not jobFailed:
|
||||||
|
|
||||||
|
resultFile = os.path.join(self._testDirectory, 'media.webm')
|
||||||
|
|
||||||
|
assert (os.path.isfile(resultFile)
|
||||||
|
), f"Result file 'media.webm' in path '{self._testDirectory}' wasn't created"
|
||||||
|
|
||||||
|
resultFileProperties = FileProperties(testContext, resultFile)
|
||||||
|
resultMediaDescriptor = resultFileProperties.getMediaDescriptor()
|
||||||
|
|
||||||
|
# if testContext['use_jellyfin']:
|
||||||
|
# sourceMediaDescriptor.applyJellyfinOrder()
|
||||||
|
# resultMediaDescriptor.applySourceIndices(sourceMediaDescriptor)
|
||||||
|
|
||||||
|
resultMediaTracks = resultMediaDescriptor.getAllTrackDescriptors()
|
||||||
|
|
||||||
|
for assertIndex in range(len(assertSelectorList)):
|
||||||
|
|
||||||
|
assertSelector = assertSelectorList[assertIndex]
|
||||||
|
assertFunc = assertFuncList[assertIndex]
|
||||||
|
assertVariant = variantList[assertIndex]
|
||||||
|
|
||||||
|
if assertSelector == 'M':
|
||||||
|
assertFunc()
|
||||||
|
for variantIndex in range(len(assertVariant)):
|
||||||
|
assert (assertVariant[variantIndex].lower() == resultMediaTracks[variantIndex].getType().indicator()
|
||||||
|
), f"Stream #{variantIndex} is not of type {resultMediaTracks[variantIndex].getType().label()}"
|
||||||
|
|
||||||
|
elif assertSelector == 'AD' or assertSelector == 'AT':
|
||||||
|
assertFunc({'tracks': resultMediaDescriptor.getAudioTracks()})
|
||||||
|
|
||||||
|
elif assertSelector == 'SD' or assertSelector == 'ST':
|
||||||
|
assertFunc({'tracks': resultMediaDescriptor.getSubtitleTracks()})
|
||||||
|
|
||||||
|
elif type(assertSelector) is str:
|
||||||
|
if assertSelector == 'J':
|
||||||
|
assertFunc()
|
||||||
|
|
||||||
|
|
||||||
|
self._context['test_passed_counter'] += 1
|
||||||
|
self._reportLogger.info(f"{variantLabel}: Test passed")
|
||||||
|
|
||||||
|
except AssertionError as ae:
|
||||||
|
|
||||||
|
self._context['test_failed_counter'] += 1
|
||||||
|
self._reportLogger.error(f"{variantLabel}: Test FAILED ({ae})")
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
MC_list = MediaCombinator.getAllClassReferences()
|
||||||
|
for MC in MC_list:
|
||||||
|
self._logger.debug(f"MC={MC.__name__}")
|
||||||
|
mc = MC(context = self._context)
|
||||||
|
for y in mc.getYield():
|
||||||
|
self.job(y)
|
||||||
281
bin/ffx/test/scenario_4.py
Normal file
281
bin/ffx/test/scenario_4.py
Normal file
@@ -0,0 +1,281 @@
|
|||||||
|
import os, sys, click
|
||||||
|
|
||||||
|
from .scenario import Scenario
|
||||||
|
|
||||||
|
from ffx.test.helper import createMediaTestFile
|
||||||
|
from ffx.process import executeProcess
|
||||||
|
from ffx.database import databaseContext
|
||||||
|
from ffx.test.helper import createEmptyDirectory
|
||||||
|
|
||||||
|
from ffx.file_properties import FileProperties
|
||||||
|
|
||||||
|
from ffx.media_descriptor import MediaDescriptor
|
||||||
|
from ffx.track_descriptor import TrackDescriptor
|
||||||
|
|
||||||
|
from ffx.track_type import TrackType
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
|
||||||
|
from ffx.test.media_combinator import MediaCombinator
|
||||||
|
from ffx.test.indicator_combinator import IndicatorCombinator
|
||||||
|
|
||||||
|
from ffx.show_descriptor import ShowDescriptor
|
||||||
|
|
||||||
|
|
||||||
|
from ffx.tmdb_controller import TmdbController
|
||||||
|
from ffx.tmdb_controller import TMDB_API_KEY_NOT_PRESENT_EXCEPTION
|
||||||
|
|
||||||
|
class Scenario4(Scenario):
|
||||||
|
|
||||||
|
TEST_SHOW_IDENTIFIER = 83095
|
||||||
|
TEST_SHOW_NAME = 'The Rising of the Shield Hero'
|
||||||
|
TEST_SHOW_YEAR = 2019
|
||||||
|
|
||||||
|
TEST_FILE_LABEL = 'rotsh'
|
||||||
|
TEST_FILE_EXTENSION = 'mkv'
|
||||||
|
|
||||||
|
TEST_PATTERN = f"{TEST_FILE_LABEL}_{FileProperties.SE_INDICATOR_PATTERN}.{TEST_FILE_EXTENSION}"
|
||||||
|
|
||||||
|
EXPECTED_FILE_EXTENSION = 'webm'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__tmdbApiKey = os.environ.get('TMDB_API_KEY', None)
|
||||||
|
if self.__tmdbApiKey is None:
|
||||||
|
raise TMDB_API_KEY_NOT_PRESENT_EXCEPTION
|
||||||
|
|
||||||
|
self.__ic = IndicatorCombinator(context = context)
|
||||||
|
self.__tc = TmdbController()
|
||||||
|
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
kwargs[ShowDescriptor.ID_KEY] = Scenario4.TEST_SHOW_IDENTIFIER
|
||||||
|
kwargs[ShowDescriptor.NAME_KEY] = Scenario4.TEST_SHOW_NAME
|
||||||
|
kwargs[ShowDescriptor.YEAR_KEY] = Scenario4.TEST_SHOW_YEAR
|
||||||
|
|
||||||
|
self.__testShowDescriptor = ShowDescriptor(**kwargs)
|
||||||
|
|
||||||
|
def getScenario(self):
|
||||||
|
return self.__class__.__name__[8:]
|
||||||
|
|
||||||
|
|
||||||
|
def prepareTestDatabase(self, sourceMediaDescriptor: MediaDescriptor):
|
||||||
|
|
||||||
|
if not self._context['database'] is None:
|
||||||
|
self._context['database']['engine'].dispose()
|
||||||
|
|
||||||
|
if os.path.isfile(self._testDbFilePath):
|
||||||
|
os.unlink(self._testDbFilePath)
|
||||||
|
self._context['database'] = None
|
||||||
|
|
||||||
|
self._logger.debug(f"Creating test db with path {self._testDbFilePath}")
|
||||||
|
self._context['database'] = databaseContext(databasePath=self._testDbFilePath)
|
||||||
|
|
||||||
|
|
||||||
|
self._logger.debug(f"Adding test show '{self.__testShowDescriptor.getFilenamePrefix()}' to test db")
|
||||||
|
if not self._sc.updateShow(self.__testShowDescriptor):
|
||||||
|
raise click.ClickException('Could not create test show in db')
|
||||||
|
|
||||||
|
testPatternDescriptor = {
|
||||||
|
'show_id': Scenario4.TEST_SHOW_IDENTIFIER,
|
||||||
|
'pattern': Scenario4.TEST_PATTERN
|
||||||
|
}
|
||||||
|
patternId = self._pc.addPattern(testPatternDescriptor)
|
||||||
|
|
||||||
|
if patternId:
|
||||||
|
self._mc.setPatternMediaDescriptor(sourceMediaDescriptor, patternId)
|
||||||
|
|
||||||
|
|
||||||
|
def job(self, yieldObj: dict):
|
||||||
|
|
||||||
|
testContext = self._context.copy()
|
||||||
|
|
||||||
|
if 'preset' not in yieldObj.keys():
|
||||||
|
raise KeyError('yieldObj did not contain presets')
|
||||||
|
|
||||||
|
targetYieldObj = yieldObj['target']
|
||||||
|
presetYieldObj = yieldObj['preset']
|
||||||
|
|
||||||
|
identifier = targetYieldObj['identifier']
|
||||||
|
variantList = targetYieldObj['variants']
|
||||||
|
|
||||||
|
variantIdentifier = '-'.join(variantList)
|
||||||
|
variantLabel = f"{self.__class__.__name__} Variant {variantIdentifier}"
|
||||||
|
|
||||||
|
sourceMediaDescriptor: MediaDescriptor = targetYieldObj['payload']
|
||||||
|
presetMediaDescriptor: MediaDescriptor = presetYieldObj['payload']
|
||||||
|
|
||||||
|
assertSelectorList: list = presetYieldObj['assertSelectors']
|
||||||
|
assertFuncList = presetYieldObj['assertFuncs']
|
||||||
|
shouldFail = presetYieldObj['shouldFail']
|
||||||
|
|
||||||
|
try:
|
||||||
|
jellyfinSelectorIndex = assertSelectorList.index('J')
|
||||||
|
jellyfinVariant = variantList[jellyfinSelectorIndex]
|
||||||
|
testContext['use_jellyfin'] = jellyfinVariant == 'J1'
|
||||||
|
except ValueError:
|
||||||
|
jellyfinSelectorIndex = -1
|
||||||
|
|
||||||
|
|
||||||
|
if self._context['test_variant'] and not variantIdentifier.startswith(self._context['test_variant']):
|
||||||
|
return
|
||||||
|
|
||||||
|
if ((self._context['test_passed_counter'] + self._context['test_failed_counter'])
|
||||||
|
>= self._context['test_limit']):
|
||||||
|
return
|
||||||
|
|
||||||
|
self._logger.debug(f"Running Job: {variantLabel}")
|
||||||
|
|
||||||
|
|
||||||
|
for l in presetMediaDescriptor.getConfiguration(label = 'presetMediaDescriptor'):
|
||||||
|
self._logger.debug(l)
|
||||||
|
|
||||||
|
for l in sourceMediaDescriptor.getConfiguration(label = 'sourceMediaDescriptor'):
|
||||||
|
self._logger.debug(l)
|
||||||
|
|
||||||
|
|
||||||
|
# Phase 1: Setup source files
|
||||||
|
|
||||||
|
self.clearTestDirectory()
|
||||||
|
|
||||||
|
testFileList = []
|
||||||
|
for indicatorObj in [y for y in self.__ic.getYield() if y['indicator']]:
|
||||||
|
|
||||||
|
indicator = indicatorObj['indicator']
|
||||||
|
|
||||||
|
testFileObj = {}
|
||||||
|
testFileObj['season'] = indicatorObj['season']
|
||||||
|
testFileObj['episode'] = indicatorObj['episode']
|
||||||
|
|
||||||
|
testFileObj['basename'] = f"{Scenario4.TEST_FILE_LABEL}_{indicator}"
|
||||||
|
|
||||||
|
testFileObj['path'] = createMediaTestFile(mediaDescriptor = presetMediaDescriptor,
|
||||||
|
directory = self._testDirectory,
|
||||||
|
baseName = testFileObj['basename'],
|
||||||
|
logger=self._logger,
|
||||||
|
length = 2)
|
||||||
|
testFileObj['filename'] = f"{testFileObj['basename']}.{Scenario4.TEST_FILE_EXTENSION}"
|
||||||
|
|
||||||
|
testFileList.append(testFileObj)
|
||||||
|
|
||||||
|
|
||||||
|
# Phase 2: Prepare database
|
||||||
|
|
||||||
|
self.createEmptyTestDatabase()
|
||||||
|
self.prepareTestDatabase(sourceMediaDescriptor)
|
||||||
|
|
||||||
|
|
||||||
|
# Phase 3: Run ffx
|
||||||
|
|
||||||
|
commandSequence = [sys.executable,
|
||||||
|
self._ffxExecutablePath]
|
||||||
|
|
||||||
|
if self._context['verbosity']:
|
||||||
|
commandSequence += ['--verbose',
|
||||||
|
str(self._context['verbosity'])]
|
||||||
|
|
||||||
|
commandSequence += ['--database-file',
|
||||||
|
self._testDbFilePath,
|
||||||
|
'convert']
|
||||||
|
commandSequence += [tfo['filename'] for tfo in testFileList]
|
||||||
|
|
||||||
|
commandSequence += ['--no-prompt', '--no-signature']
|
||||||
|
|
||||||
|
out, err, rc = executeProcess(commandSequence, directory = self._testDirectory, context = self._context)
|
||||||
|
|
||||||
|
if out and self._context['verbosity'] >= 9:
|
||||||
|
self._logger.debug(f"{variantLabel}: Process output: {out}")
|
||||||
|
if rc:
|
||||||
|
self._logger.debug(f"{variantLabel}: Process returned ERROR {rc} ({err})")
|
||||||
|
|
||||||
|
|
||||||
|
# Phase 4: Evaluate results
|
||||||
|
|
||||||
|
resultFilenames = [rf for rf in self.getFilenamesInTestDirectory() if rf.endswith(f".{Scenario4.EXPECTED_FILE_EXTENSION}")]
|
||||||
|
|
||||||
|
self._logger.debug(f"{variantLabel}: Result filenames: {resultFilenames}")
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
jobFailed = bool(rc)
|
||||||
|
self._logger.debug(f"{variantLabel}: Should fail: {shouldFail} / actually failed: {jobFailed}")
|
||||||
|
|
||||||
|
assert (jobFailed == shouldFail
|
||||||
|
), f"Process {'failed' if jobFailed else 'did not fail'}"
|
||||||
|
|
||||||
|
if not jobFailed:
|
||||||
|
|
||||||
|
for tfo in testFileList:
|
||||||
|
|
||||||
|
tmdbEpisodeResult = self.__tc.queryEpisode(Scenario4.TEST_SHOW_IDENTIFIER,
|
||||||
|
tfo['season'], tfo['episode'])
|
||||||
|
|
||||||
|
expectedFileBasename = TmdbController.getEpisodeFileBasename(self.__testShowDescriptor.getFilenamePrefix(),
|
||||||
|
tmdbEpisodeResult['name'],
|
||||||
|
tfo['season'], tfo['episode'])
|
||||||
|
|
||||||
|
expectedFilename = f"{expectedFileBasename}.{Scenario4.EXPECTED_FILE_EXTENSION}"
|
||||||
|
expectedFilePath = os.path.join(self._testDirectory, expectedFilename)
|
||||||
|
|
||||||
|
assert (os.path.isfile(expectedFilePath)
|
||||||
|
), f"Result file '{expectedFilename}' in path '{self._testDirectory}' wasn't created"
|
||||||
|
|
||||||
|
|
||||||
|
rfp = FileProperties(testContext, expectedFilePath)
|
||||||
|
self._logger.debug(f"{variantLabel}: Result file properties: {rfp.getFilename()} season={rfp.getSeason()} episode={rfp.getEpisode()}")
|
||||||
|
|
||||||
|
rmd = rfp.getMediaDescriptor()
|
||||||
|
rmt = rmd.getAllTrackDescriptors()
|
||||||
|
|
||||||
|
for l in rmd.getConfiguration(label = 'resultMediaDescriptor'):
|
||||||
|
self._logger.debug(l)
|
||||||
|
|
||||||
|
# if testContext['use_jellyfin']:
|
||||||
|
# sourceMediaDescriptor.applyJellyfinOrder()
|
||||||
|
|
||||||
|
# num tracks differ
|
||||||
|
rmd.applySourceIndices(sourceMediaDescriptor)
|
||||||
|
|
||||||
|
|
||||||
|
for assertIndex in range(len(assertSelectorList)):
|
||||||
|
|
||||||
|
assertSelector = assertSelectorList[assertIndex]
|
||||||
|
assertFunc = assertFuncList[assertIndex]
|
||||||
|
assertVariant = variantList[assertIndex]
|
||||||
|
|
||||||
|
if assertSelector == 'M':
|
||||||
|
assertFunc()
|
||||||
|
for variantIndex in range(len(assertVariant)):
|
||||||
|
assert (assertVariant[variantIndex].lower() == rmt[variantIndex].getType().indicator()
|
||||||
|
), f"Stream #{variantIndex} is not of type {rmt[variantIndex].getType().label()}"
|
||||||
|
|
||||||
|
if assertSelector == 'AD' or assertSelector == 'AT':
|
||||||
|
assertFunc({'tracks': rmd.getAudioTracks()})
|
||||||
|
|
||||||
|
elif assertSelector == 'SD' or assertSelector == 'ST':
|
||||||
|
assertFunc({'tracks': rmd.getSubtitleTracks()})
|
||||||
|
|
||||||
|
elif type(assertSelector) is str:
|
||||||
|
if assertSelector == 'J':
|
||||||
|
assertFunc()
|
||||||
|
|
||||||
|
|
||||||
|
self._context['test_passed_counter'] += 1
|
||||||
|
self._reportLogger.info(f"\n{variantLabel}: Test passed\n")
|
||||||
|
|
||||||
|
except AssertionError as ae:
|
||||||
|
|
||||||
|
self._context['test_failed_counter'] += 1
|
||||||
|
self._reportLogger.error(f"\n{variantLabel}: Test FAILED ({ae})\n")
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
|
||||||
|
MC_list = [MediaCombinator.getClassReference(6)]
|
||||||
|
for MC in MC_list:
|
||||||
|
self._logger.debug(f"MC={MC.__name__}")
|
||||||
|
mc = MC(context = self._context, createPresets = True)
|
||||||
|
for y in mc.getYield():
|
||||||
|
self.job(y)
|
||||||
33
bin/ffx/test/show_combinator.py
Normal file
33
bin/ffx/test/show_combinator.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
class ShowCombinator():
|
||||||
|
|
||||||
|
IDENTIFIER = 'show'
|
||||||
|
|
||||||
|
SHOW_LIST = [
|
||||||
|
'Boruto; Naruto Next Generations (2017)',
|
||||||
|
'The Rising of the Shield Hero (2019)',
|
||||||
|
'Scrubs - Die Anfänger (2001)'
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return ShowCombinator.IDENTIFIER
|
||||||
|
|
||||||
|
def getPayload(self, showIndex):
|
||||||
|
return {
|
||||||
|
'variant': f"S{showIndex}",
|
||||||
|
'show': ShowCombinator.SHOW_LIST[showIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
for showIndex in range(len(ShowCombinator.SHOW_LIST)):
|
||||||
|
yield self.getPayload(showIndex)
|
||||||
33
bin/ffx/test/title_combinator.py
Normal file
33
bin/ffx/test/title_combinator.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
class TitleCombinator():
|
||||||
|
|
||||||
|
IDENTIFIER = 'title'
|
||||||
|
|
||||||
|
TITLE_LIST = [
|
||||||
|
'The Sound of Space',
|
||||||
|
'2001; Odyssee im Weltraum (1968)',
|
||||||
|
'Ansible 101'
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return TitleCombinator.IDENTIFIER
|
||||||
|
|
||||||
|
def getPayload(self, titleIndex):
|
||||||
|
return {
|
||||||
|
'variant': f"S{titleIndex}",
|
||||||
|
'show': TitleCombinator.TITLE_LIST[titleIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
def assertFunc(self, testObj = {}):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getYield(self):
|
||||||
|
for titleIndex in range(len(TitleCombinator.TITLE_LIST)):
|
||||||
|
yield self.getPayload(titleIndex)
|
||||||
33
bin/ffx/test/track_tag_combinator_2.py
Normal file
33
bin/ffx/test/track_tag_combinator_2.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
class TrackTagCombinator2():
|
||||||
|
|
||||||
|
IDENTIFIER = 'trackTag2'
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return TrackTagCombinator2.IDENTIFIER
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def list():
|
||||||
|
basePath = os.path.dirname(__file__)
|
||||||
|
return [os.path.basename(p)[23:-3]
|
||||||
|
for p
|
||||||
|
in glob.glob(f"{ basePath }/track_tag_combinator_2_*.py", recursive = True)
|
||||||
|
if p != __file__]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getClassReference(identifier):
|
||||||
|
importlib.import_module(f"ffx.test.track_tag_combinator_2_{ identifier }")
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.track_tag_combinator_2_{ identifier }"]):
|
||||||
|
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
||||||
|
if inspect.isclass(obj) and name != 'TrackTagCombinator2' and name.startswith('TrackTagCombinator2'):
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getAllClassReferences():
|
||||||
|
return [TrackTagCombinator2.getClassReference(i) for i in TrackTagCombinator2.list()]
|
||||||
89
bin/ffx/test/track_tag_combinator_2_0.py
Normal file
89
bin/ffx/test/track_tag_combinator_2_0.py
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||||
|
|
||||||
|
|
||||||
|
class TrackTagCombinator20(TrackTagCombinator2):
|
||||||
|
|
||||||
|
VARIANT = 'T00'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return TrackTagCombinator20.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
subTrack0 = {}
|
||||||
|
subTrack1 = {}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subTrack0['THIS_IS'] = 'track0'
|
||||||
|
subTrack1['THIS_IS'] = 'track1'
|
||||||
|
|
||||||
|
return (subTrack0,
|
||||||
|
subTrack1)
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[0].getTags().keys() and resortedTrackDescriptors[0].getTags()['THIS_IS'] == 'track0'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set title tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[1].getTags().keys() and resortedTrackDescriptors[1].getTags()['THIS_IS'] == 'track1'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
96
bin/ffx/test/track_tag_combinator_2_1.py
Normal file
96
bin/ffx/test/track_tag_combinator_2_1.py
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||||
|
|
||||||
|
|
||||||
|
class TrackTagCombinator21(TrackTagCombinator2):
|
||||||
|
|
||||||
|
VARIANT = 'T10'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return TrackTagCombinator21.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
subTrack0 = ({'language': 'rus', 'title': 'Russisch'}
|
||||||
|
if self.__createPresets else {'language': 'fra', 'title': 'Französisch'})
|
||||||
|
subTrack1 = {}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subTrack0['THIS_IS'] = 'track0'
|
||||||
|
subTrack1['THIS_IS'] = 'track1'
|
||||||
|
|
||||||
|
return (subTrack0,
|
||||||
|
subTrack1)
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert ('language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['language'] == 'fra'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'fra'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['title'] == 'Französisch'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Französisch'"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[0].getTags().keys() and resortedTrackDescriptors[0].getTags()['THIS_IS'] == 'track0'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[1].getTags().keys() and resortedTrackDescriptors[1].getTags()['THIS_IS'] == 'track1'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert ('language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['language'] == 'fra'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'fra'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['title'] == 'Französisch'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Französisch'"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
97
bin/ffx/test/track_tag_combinator_2_2.py
Normal file
97
bin/ffx/test/track_tag_combinator_2_2.py
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||||
|
|
||||||
|
|
||||||
|
class TrackTagCombinator21(TrackTagCombinator2):
|
||||||
|
|
||||||
|
VARIANT = 'T01'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return TrackTagCombinator21.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
subTrack0 = {}
|
||||||
|
subTrack1 = ({'language': 'chn', 'title': 'China'}
|
||||||
|
if self.__createPresets else {'language': 'bas', 'title': 'Baskisch'})
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subTrack0['THIS_IS'] = 'track0'
|
||||||
|
subTrack1['THIS_IS'] = 'track1'
|
||||||
|
|
||||||
|
return (subTrack0,
|
||||||
|
subTrack1)
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[0].getTags().keys() and resortedTrackDescriptors[0].getTags()['THIS_IS'] == 'track0'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert ('language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['language'] == 'bas'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set language tag 'bas'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['title'] == 'Baskisch'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set title 'Baskisch'"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[1].getTags().keys() and resortedTrackDescriptors[1].getTags()['THIS_IS'] == 'track1'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert ('language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['language'] == 'bas'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set language tag 'bas'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['title'] == 'Baskisch'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set title 'Baskisch'"
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
|
|
||||||
106
bin/ffx/test/track_tag_combinator_2_3.py
Normal file
106
bin/ffx/test/track_tag_combinator_2_3.py
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||||
|
|
||||||
|
|
||||||
|
class TrackTagCombinator23(TrackTagCombinator2):
|
||||||
|
|
||||||
|
VARIANT = 'T11'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return TrackTagCombinator23.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
subTrack0 = ({'language': 'chn', 'title': 'China'}
|
||||||
|
if self.__createPresets else {'language': 'deu', 'title': 'Deutsch'})
|
||||||
|
subTrack1 = {'language': 'jpn', 'title': 'Japanisch'}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subTrack0['THIS_IS'] = 'track0'
|
||||||
|
subTrack1['THIS_IS'] = 'track1'
|
||||||
|
|
||||||
|
return (subTrack0,
|
||||||
|
subTrack1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert ('language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['language'] == 'deu'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'deu'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['title'] == 'Deutsch'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Deutsch'"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[0].getTags().keys() and resortedTrackDescriptors[0].getTags()['THIS_IS'] == 'track0'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert ('language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['language'] == 'jpn'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'jpn'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['title'] == 'Japanisch'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Japanisch'"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[1].getTags().keys() and resortedTrackDescriptors[1].getTags()['THIS_IS'] == 'track1'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert ('language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['language'] == 'deu'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'deu'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['title'] == 'Deutsch'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Deutsch'"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert ('language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['language'] == 'jpn'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'jpn'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['title'] == 'Japanisch'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Japanisch'"
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
33
bin/ffx/test/track_tag_combinator_3.py
Normal file
33
bin/ffx/test/track_tag_combinator_3.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
import os, sys, importlib, glob, inspect, itertools
|
||||||
|
|
||||||
|
class TrackTagCombinator3():
|
||||||
|
|
||||||
|
IDENTIFIER = 'trackTag3'
|
||||||
|
|
||||||
|
def __init__(self, context = None):
|
||||||
|
self._context = context
|
||||||
|
self._logger = context['logger']
|
||||||
|
self._reportLogger = context['report_logger']
|
||||||
|
|
||||||
|
def getIdentifier(self):
|
||||||
|
return TrackTagCombinator3.IDENTIFIER
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def list():
|
||||||
|
basePath = os.path.dirname(__file__)
|
||||||
|
return [os.path.basename(p)[23:-3]
|
||||||
|
for p
|
||||||
|
in glob.glob(f"{ basePath }/track_tag_combinator_3_*.py", recursive = True)
|
||||||
|
if p != __file__]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getClassReference(identifier):
|
||||||
|
importlib.import_module(f"ffx.test.track_tag_combinator_3_{ identifier }")
|
||||||
|
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.track_tag_combinator_3_{ identifier }"]):
|
||||||
|
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
||||||
|
if inspect.isclass(obj) and name != 'TrackTagCombinator3' and name.startswith('TrackTagCombinator3'):
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getAllClassReferences():
|
||||||
|
return [TrackTagCombinator3.getClassReference(i) for i in TrackTagCombinator3.list()]
|
||||||
105
bin/ffx/test/track_tag_combinator_3_0.py
Normal file
105
bin/ffx/test/track_tag_combinator_3_0.py
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .track_tag_combinator_3 import TrackTagCombinator3
|
||||||
|
|
||||||
|
|
||||||
|
class TrackTagCombinator30(TrackTagCombinator3):
|
||||||
|
|
||||||
|
VARIANT = 'T000'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return TrackTagCombinator30.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
subtrack0 = {}
|
||||||
|
subtrack1 = {}
|
||||||
|
subtrack2 = {}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0['THIS_IS'] = 'track0'
|
||||||
|
subtrack1['THIS_IS'] = 'track1'
|
||||||
|
subtrack2['THIS_IS'] = 'track2'
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1,
|
||||||
|
subtrack2)
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert not ('language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||||
|
assert not ('title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[0].getTags().keys() and resortedTrackDescriptors[0].getTags()['THIS_IS'] == 'track0'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert not ('language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set language tag"
|
||||||
|
assert not ('title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[1].getTags().keys() and resortedTrackDescriptors[1].getTags()['THIS_IS'] == 'track1'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert not ('language' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has set language tag"
|
||||||
|
assert not ('title' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[2].getTags().keys() and resortedTrackDescriptors[2].getTags()['THIS_IS'] == 'track2'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert not ('language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||||
|
assert not ('title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert not ('language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set language tag"
|
||||||
|
assert not ('title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert not ('language' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has set language tag"
|
||||||
|
assert not ('title' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
114
bin/ffx/test/track_tag_combinator_3_1.py
Normal file
114
bin/ffx/test/track_tag_combinator_3_1.py
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .track_tag_combinator_3 import TrackTagCombinator3
|
||||||
|
|
||||||
|
|
||||||
|
class TrackTagCombinator31(TrackTagCombinator3):
|
||||||
|
|
||||||
|
VARIANT = 'T100'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return TrackTagCombinator31.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
subtrack0 = ({'language': 'rus', 'title': 'Russisch'}
|
||||||
|
if self.__createPresets else {'language': 'fra', 'title': 'Französisch'})
|
||||||
|
subtrack1 = {}
|
||||||
|
subtrack2 = {}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0['THIS_IS'] = 'track0'
|
||||||
|
subtrack1['THIS_IS'] = 'track1'
|
||||||
|
subtrack2['THIS_IS'] = 'track2'
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1,
|
||||||
|
subtrack2)
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert ('language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['language'] == 'fra'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'fra'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['title'] == 'Französisch'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Französisch'"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[0].getTags().keys() and resortedTrackDescriptors[0].getTags()['THIS_IS'] == 'track0'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[1].getTags().keys() and resortedTrackDescriptors[1].getTags()['THIS_IS'] == 'track1'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[2].getTags().keys() and resortedTrackDescriptors[2].getTags()['THIS_IS'] == 'track2'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert ('language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['language'] == 'fra'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'fra'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['title'] == 'Französisch'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Französisch'"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
114
bin/ffx/test/track_tag_combinator_3_2.py
Normal file
114
bin/ffx/test/track_tag_combinator_3_2.py
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .track_tag_combinator_3 import TrackTagCombinator3
|
||||||
|
|
||||||
|
|
||||||
|
class TrackTagCombinator32(TrackTagCombinator3):
|
||||||
|
|
||||||
|
VARIANT = 'T010'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return TrackTagCombinator32.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
subtrack0 = {}
|
||||||
|
subtrack1 = ({'language': 'chn', 'title': 'China'}
|
||||||
|
if self.__createPresets else {'language': 'bas', 'title': 'Baskisch'})
|
||||||
|
subtrack2 = {}
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0['THIS_IS'] = 'track0'
|
||||||
|
subtrack1['THIS_IS'] = 'track1'
|
||||||
|
subtrack2['THIS_IS'] = 'track2'
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1,
|
||||||
|
subtrack2)
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[0].getTags().keys() and resortedTrackDescriptors[0].getTags()['THIS_IS'] == 'track0'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert ('language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['language'] == 'bas'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'bas'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['title'] == 'Baskisch'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Baskisch'"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[1].getTags().keys() and resortedTrackDescriptors[1].getTags()['THIS_IS'] == 'track1'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[2].getTags().keys() and resortedTrackDescriptors[2].getTags()['THIS_IS'] == 'track2'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert ('language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['language'] == 'bas'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'bas'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['title'] == 'Baskisch'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Baskisch'"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
114
bin/ffx/test/track_tag_combinator_3_3.py
Normal file
114
bin/ffx/test/track_tag_combinator_3_3.py
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .track_tag_combinator_3 import TrackTagCombinator3
|
||||||
|
|
||||||
|
|
||||||
|
class TrackTagCombinator33(TrackTagCombinator3):
|
||||||
|
|
||||||
|
VARIANT = 'T001'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return TrackTagCombinator33.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
subtrack0 = {}
|
||||||
|
subtrack1 = {}
|
||||||
|
subtrack2 = ({'language': 'tkm', 'title': 'Turkmenistan'}
|
||||||
|
if self.__createPresets else {'language': 'eng', 'title': 'English'})
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0['THIS_IS'] = 'track0'
|
||||||
|
subtrack1['THIS_IS'] = 'track1'
|
||||||
|
subtrack2['THIS_IS'] = 'track2'
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1,
|
||||||
|
subtrack2)
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[0].getTags().keys() and resortedTrackDescriptors[0].getTags()['THIS_IS'] == 'track0'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[1].getTags().keys() and resortedTrackDescriptors[1].getTags()['THIS_IS'] == 'track1'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert ('language' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[2].getTags()['language'] == 'eng'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'eng'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[2].getTags()['title'] == 'English'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'English'"
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[2].getTags().keys() and resortedTrackDescriptors[2].getTags()['THIS_IS'] == 'track2'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[2].getSubIndex()}] has not preserved tag THIS_IS"
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert (not 'language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set language tag"
|
||||||
|
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set title tag"
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert ('language' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||||
|
assert (resortedTrackDescriptors[2].getTags()['language'] == 'eng'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'eng'"
|
||||||
|
assert ('title' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||||
|
assert (resortedTrackDescriptors[2].getTags()['title'] == 'English'
|
||||||
|
), f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} index={resortedTrackDescriptors[2].getIndex()} [{resortedTrackDescriptors[2].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'English'"
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
214
bin/ffx/test/track_tag_combinator_3_4.py
Normal file
214
bin/ffx/test/track_tag_combinator_3_4.py
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
import os, sys, importlib, glob, inspect
|
||||||
|
|
||||||
|
from ffx.track_disposition import TrackDisposition
|
||||||
|
from .track_tag_combinator_3 import TrackTagCombinator3
|
||||||
|
|
||||||
|
|
||||||
|
class TrackTagCombinator34(TrackTagCombinator3):
|
||||||
|
|
||||||
|
VARIANT = 'T111'
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, context = None,
|
||||||
|
createPresets: bool = False):
|
||||||
|
super().__init__(context)
|
||||||
|
|
||||||
|
self.__createPresets = createPresets
|
||||||
|
|
||||||
|
def getVariant(self):
|
||||||
|
return TrackTagCombinator34.VARIANT
|
||||||
|
|
||||||
|
|
||||||
|
def getPayload(self):
|
||||||
|
|
||||||
|
subtrack0 = {'language': 'jpn', 'title': 'Japanisch'}
|
||||||
|
subtrack1 = {'language': 'deu', 'title': 'Deutsch'}
|
||||||
|
subtrack2 = ({'language': 'tkm', 'title': 'Turkmenistan'}
|
||||||
|
if self.__createPresets else {'language': 'eng', 'title': 'English'})
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
subtrack0['THIS_IS'] = 'track0'
|
||||||
|
subtrack1['THIS_IS'] = 'track1'
|
||||||
|
subtrack2['THIS_IS'] = 'track2'
|
||||||
|
|
||||||
|
return (subtrack0,
|
||||||
|
subtrack1,
|
||||||
|
subtrack2)
|
||||||
|
|
||||||
|
|
||||||
|
def createAssertFunc(self):
|
||||||
|
|
||||||
|
if self.__createPresets:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert ('language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[0].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[0].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag")
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['language'] == 'jpn'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[0].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[0].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'jpn'")
|
||||||
|
assert ('title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[0].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[0].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag")
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['title'] == 'Japanisch'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[0].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[0].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Japanisch'")
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
and resortedTrackDescriptors[0].getTags()['THIS_IS'] == 'track0'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[0].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[0].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not preserved tag THIS_IS")
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert ('language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[1].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[1].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag")
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['language'] == 'deu'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[1].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[1].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'deu'")
|
||||||
|
assert ('title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[1].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[1].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag")
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['title'] == 'Deutsch'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[1].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[1].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Deutsch'")
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
and resortedTrackDescriptors[1].getTags()['THIS_IS'] == 'track1'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[1].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[1].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[1].getSubIndex()}] has not preserved tag THIS_IS")
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert ('language' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[2].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[2].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag")
|
||||||
|
assert (resortedTrackDescriptors[2].getTags()['language'] == 'eng'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[2].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[2].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'eng'")
|
||||||
|
assert ('title' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[2].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[2].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag")
|
||||||
|
assert (resortedTrackDescriptors[2].getTags()['title'] == 'English'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[2].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[2].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'English'")
|
||||||
|
|
||||||
|
assert ('THIS_IS' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
and resortedTrackDescriptors[2].getTags()['THIS_IS'] == 'track2'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()}"
|
||||||
|
+ f"index={resortedTrackDescriptors[2].getIndex()}"
|
||||||
|
+ f"[{resortedTrackDescriptors[1].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[2].getSubIndex()}] has not preserved tag THIS_IS")
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def f(assertObj: dict = {}):
|
||||||
|
|
||||||
|
if not 'tracks' in assertObj.keys():
|
||||||
|
raise KeyError("assertObj does not contain key 'tracks'")
|
||||||
|
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||||
|
|
||||||
|
# source subIndex 0
|
||||||
|
assert ('language' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} "
|
||||||
|
+ f"index={resortedTrackDescriptors[0].getIndex()} "
|
||||||
|
+ f"[{resortedTrackDescriptors[0].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag")
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['language'] == 'jpn'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} "
|
||||||
|
+ f"index={resortedTrackDescriptors[0].getIndex()} "
|
||||||
|
+ f"[{resortedTrackDescriptors[0].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'jpn'")
|
||||||
|
assert ('title' in resortedTrackDescriptors[0].getTags().keys()
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} "
|
||||||
|
+ f"index={resortedTrackDescriptors[0].getIndex()} "
|
||||||
|
+ f"[{resortedTrackDescriptors[0].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag")
|
||||||
|
assert (resortedTrackDescriptors[0].getTags()['title'] == 'Japanisch'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} "
|
||||||
|
+ f"index={resortedTrackDescriptors[0].getIndex()} "
|
||||||
|
+ f"[{resortedTrackDescriptors[0].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Japanisch'")
|
||||||
|
|
||||||
|
# source subIndex 1
|
||||||
|
assert ('language' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} "
|
||||||
|
+ f"index={resortedTrackDescriptors[1].getIndex()} "
|
||||||
|
+ f"[{resortedTrackDescriptors[1].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[1].getSubIndex()}] has not set language tag")
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['language'] == 'deu'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} "
|
||||||
|
+ f"index={resortedTrackDescriptors[1].getIndex()} "
|
||||||
|
+ f"[{resortedTrackDescriptors[1].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[1].getSubIndex()}] has not set language tag 'deu'")
|
||||||
|
assert ('title' in resortedTrackDescriptors[1].getTags().keys()
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} "
|
||||||
|
+ f"index={resortedTrackDescriptors[1].getIndex()} "
|
||||||
|
+ f"[{resortedTrackDescriptors[1].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[1].getSubIndex()}] has not set title tag")
|
||||||
|
assert (resortedTrackDescriptors[1].getTags()['title'] == 'Deutsch'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} "
|
||||||
|
+ f"index={resortedTrackDescriptors[1].getIndex()} "
|
||||||
|
+ f"[{resortedTrackDescriptors[1].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[1].getSubIndex()}] has not set title 'Deutsch'")
|
||||||
|
|
||||||
|
# source subIndex 2
|
||||||
|
assert ('language' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} "
|
||||||
|
+ f"index={resortedTrackDescriptors[2].getIndex()} "
|
||||||
|
+ f"[{resortedTrackDescriptors[2].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[2].getSubIndex()}] has not set language tag")
|
||||||
|
assert (resortedTrackDescriptors[2].getTags()['language'] == 'eng'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} "
|
||||||
|
+ f"index={resortedTrackDescriptors[2].getIndex()} "
|
||||||
|
+ f"[{resortedTrackDescriptors[2].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[2].getSubIndex()}] has not set language tag 'eng'")
|
||||||
|
assert ('title' in resortedTrackDescriptors[2].getTags().keys()
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} "
|
||||||
|
+ f"index={resortedTrackDescriptors[2].getIndex()} "
|
||||||
|
+ f"[{resortedTrackDescriptors[2].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[2].getSubIndex()}] has not set title tag")
|
||||||
|
assert (resortedTrackDescriptors[2].getTags()['title'] == 'English'
|
||||||
|
), (f"Stream src_index={resortedTrackDescriptors[2].getSourceIndex()} "
|
||||||
|
+ f"index={resortedTrackDescriptors[2].getIndex()} "
|
||||||
|
+ f"[{resortedTrackDescriptors[2].getType().label()}:"
|
||||||
|
+ f"{resortedTrackDescriptors[2].getSubIndex()}] has not set title 'English'")
|
||||||
|
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def shouldFail(self):
|
||||||
|
return False
|
||||||
@@ -1,19 +1,58 @@
|
|||||||
import os, click, requests, json
|
import os, click, requests, json, time, logging
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
|
||||||
|
class TMDB_REQUEST_EXCEPTION(Exception):
|
||||||
|
def __init__(self, statusCode, statusMessage):
|
||||||
|
errorMessage = f"TMDB query failed with status code {statusCode}: {statusMessage}"
|
||||||
|
super().__init__(errorMessage)
|
||||||
|
|
||||||
|
class TMDB_API_KEY_NOT_PRESENT_EXCEPTION(Exception):
|
||||||
|
def __str__(self):
|
||||||
|
return 'TMDB api key is not available, please set environment variable TMDB_API_KEY'
|
||||||
|
|
||||||
|
class TMDB_EXCESSIVE_USAGE_EXCEPTION(Exception):
|
||||||
|
def __str__(self):
|
||||||
|
return 'Rate limit was triggered too often'
|
||||||
|
|
||||||
|
|
||||||
class TmdbController():
|
class TmdbController():
|
||||||
|
|
||||||
DEFAULT_LANGUAGE = 'de-DE'
|
DEFAULT_LANGUAGE = 'de-DE'
|
||||||
|
|
||||||
def __init__(self):
|
RATE_LIMIT_WAIT_SECONDS = 10
|
||||||
|
RATE_LIMIT_RETRIES = 3
|
||||||
|
|
||||||
try:
|
def __init__(self, context = None):
|
||||||
self.__tmdbApiKey = os.environ['TMDB_API_KEY']
|
self.__context = context
|
||||||
except KeyError:
|
self.__logger = (context['logger'] if context is not None and 'logger' in context.keys()
|
||||||
click.ClickException('TMDB api key is not available, please set environment variable TMDB_API_KEY')
|
else logging.getLogger('FFX').addHandler(logging.NullHandler()))
|
||||||
|
|
||||||
|
self.__tmdbApiKey = os.environ.get('TMDB_API_KEY', None)
|
||||||
|
if self.__tmdbApiKey is None:
|
||||||
|
raise TMDB_API_KEY_NOT_PRESENT_EXCEPTION
|
||||||
|
|
||||||
self.tmdbLanguage = TmdbController.DEFAULT_LANGUAGE
|
self.tmdbLanguage = TmdbController.DEFAULT_LANGUAGE
|
||||||
|
|
||||||
|
|
||||||
|
def getTmdbRequest(self, tmdbUrl):
|
||||||
|
retries = TmdbController.RATE_LIMIT_RETRIES
|
||||||
|
while True:
|
||||||
|
response = requests.get(tmdbUrl)
|
||||||
|
if response.status_code == 429:
|
||||||
|
if not retries:
|
||||||
|
raise TMDB_EXCESSIVE_USAGE_EXCEPTION()
|
||||||
|
self.__logger.warning('TMDB Rate limit (status_code 429)')
|
||||||
|
time.sleep(TmdbController.RATE_LIMIT_WAIT_SECONDS)
|
||||||
|
retries -= 1
|
||||||
|
else:
|
||||||
|
jsonResult = response.json()
|
||||||
|
if ('success' in jsonResult.keys()
|
||||||
|
and not jsonResult['success']):
|
||||||
|
raise TMDB_REQUEST_EXCEPTION(jsonResult['status_code'], jsonResult['status_message'])
|
||||||
|
return jsonResult
|
||||||
|
|
||||||
|
|
||||||
def queryShow(self, showId):
|
def queryShow(self, showId):
|
||||||
"""
|
"""
|
||||||
First level keys in the response object:
|
First level keys in the response object:
|
||||||
@@ -53,24 +92,18 @@ class TmdbController():
|
|||||||
|
|
||||||
urlParams = f"?language={self.tmdbLanguage}&api_key={self.__tmdbApiKey}"
|
urlParams = f"?language={self.tmdbLanguage}&api_key={self.__tmdbApiKey}"
|
||||||
|
|
||||||
tmdbUrl = f"https://api.themoviedb.org/3/tv/{showId}W{urlParams}"
|
tmdbUrl = f"https://api.themoviedb.org/3/tv/{showId}{urlParams}"
|
||||||
|
|
||||||
#TODO Check for result
|
return self.getTmdbRequest(tmdbUrl)
|
||||||
try:
|
|
||||||
#TODO: Content Type aware processing
|
|
||||||
# response = requests.get(tmdbUrl)
|
|
||||||
# response.encoding = 'utf-8'
|
|
||||||
# return response.json()
|
|
||||||
# response = requests.get(tmdbUrl)
|
|
||||||
|
|
||||||
# contentType = response.headers.get('Content-Type')
|
|
||||||
# print(content_type) # Example: 'application/json; charset=UTF-8'
|
|
||||||
|
|
||||||
# decoded_content = response.content.decode('utf-8')
|
def getShowNameAndYear(self, showId: int):
|
||||||
# return json.loads(decoded_content)
|
|
||||||
return requests.get(tmdbUrl).json()
|
showResult = self.queryShow(int(showId))
|
||||||
except:
|
firstAirDate = datetime.strptime(showResult['first_air_date'], '%Y-%m-%d')
|
||||||
return {}
|
|
||||||
|
return str(showResult['name']), int(firstAirDate.year)
|
||||||
|
|
||||||
|
|
||||||
def queryEpisode(self, showId, season, episode):
|
def queryEpisode(self, showId, season, episode):
|
||||||
"""
|
"""
|
||||||
@@ -94,14 +127,11 @@ class TmdbController():
|
|||||||
|
|
||||||
tmdbUrl = f"https://api.themoviedb.org/3/tv/{showId}/season/{season}/episode/{episode}{urlParams}"
|
tmdbUrl = f"https://api.themoviedb.org/3/tv/{showId}/season/{season}/episode/{episode}{urlParams}"
|
||||||
|
|
||||||
#TODO Check for result
|
return self.getTmdbRequest(tmdbUrl)
|
||||||
try:
|
|
||||||
return requests.get(tmdbUrl).json()
|
|
||||||
except:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
def getEpisodeFileBasename(self,
|
|
||||||
showName,
|
@staticmethod
|
||||||
|
def getEpisodeFileBasename(showName,
|
||||||
episodeName,
|
episodeName,
|
||||||
season,
|
season,
|
||||||
episode,
|
episode,
|
||||||
@@ -153,3 +183,6 @@ class TmdbController():
|
|||||||
filenameTokens += ['E{num:{fill}{width}}'.format(num=episode, fill='0', width=indicatorEpisodeDigits)]
|
filenameTokens += ['E{num:{fill}{width}}'.format(num=episode, fill='0', width=indicatorEpisodeDigits)]
|
||||||
|
|
||||||
return ''.join(filenameTokens)
|
return ''.join(filenameTokens)
|
||||||
|
|
||||||
|
def importShow(self, showId: int):
|
||||||
|
pass
|
||||||
@@ -66,6 +66,8 @@ class TrackController():
|
|||||||
|
|
||||||
track : Track = q.first()
|
track : Track = q.first()
|
||||||
|
|
||||||
|
track.index = int(trackDescriptor.getIndex())
|
||||||
|
|
||||||
track.track_type = int(trackDescriptor.getType().index())
|
track.track_type = int(trackDescriptor.getType().index())
|
||||||
track.codec_name = str(trackDescriptor.getCodec())
|
track.codec_name = str(trackDescriptor.getCodec())
|
||||||
track.audio_layout = int(trackDescriptor.getAudioLayout().index())
|
track.audio_layout = int(trackDescriptor.getAudioLayout().index())
|
||||||
@@ -103,13 +105,34 @@ class TrackController():
|
|||||||
s = self.Session()
|
s = self.Session()
|
||||||
|
|
||||||
q = s.query(Track).filter(Track.pattern_id == int(patternId))
|
q = s.query(Track).filter(Track.pattern_id == int(patternId))
|
||||||
return [a for a in q.all()]
|
return sorted([t for t in q.all()], key=lambda d: d.getIndex())
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise click.ClickException(f"TrackController.findTracks(): {repr(ex)}")
|
raise click.ClickException(f"TrackController.findTracks(): {repr(ex)}")
|
||||||
finally:
|
finally:
|
||||||
s.close()
|
s.close()
|
||||||
|
|
||||||
|
|
||||||
|
def findSiblingDescriptors(self, patternId):
|
||||||
|
"""Finds all stored tracks related to a pattern, packs them in descriptors
|
||||||
|
and also setting sub indices and returns list of descriptors"""
|
||||||
|
|
||||||
|
siblingTracks = self.findTracks(patternId)
|
||||||
|
siblingDescriptors = []
|
||||||
|
|
||||||
|
subIndexCounter = {}
|
||||||
|
st: Track
|
||||||
|
for st in siblingTracks:
|
||||||
|
trackType = st.getType()
|
||||||
|
|
||||||
|
if not trackType in subIndexCounter.keys():
|
||||||
|
subIndexCounter[trackType] = 0
|
||||||
|
siblingDescriptors.append(st.getDescriptor(subIndex=subIndexCounter[trackType]))
|
||||||
|
subIndexCounter[trackType] += 1
|
||||||
|
|
||||||
|
return siblingDescriptors
|
||||||
|
|
||||||
|
|
||||||
#TODO: mit optionalem Parameter lösen ^
|
#TODO: mit optionalem Parameter lösen ^
|
||||||
def findVideoTracks(self, patternId):
|
def findVideoTracks(self, patternId):
|
||||||
|
|
||||||
@@ -233,8 +256,8 @@ class TrackController():
|
|||||||
s.close()
|
s.close()
|
||||||
|
|
||||||
|
|
||||||
def setDefaultSubTrack(self, trackType, subIndex):
|
# def setDefaultSubTrack(self, trackType, subIndex):
|
||||||
pass
|
# pass
|
||||||
|
#
|
||||||
def setForcedSubTrack(self, trackType, subIndex):
|
# def setForcedSubTrack(self, trackType, subIndex):
|
||||||
pass
|
# pass
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import click
|
import logging
|
||||||
|
from typing import Self
|
||||||
|
|
||||||
from .iso_language import IsoLanguage
|
from .iso_language import IsoLanguage
|
||||||
from .track_type import TrackType
|
from .track_type import TrackType
|
||||||
@@ -10,6 +11,8 @@ from .helper import dictDiff, setDiff
|
|||||||
|
|
||||||
class TrackDescriptor:
|
class TrackDescriptor:
|
||||||
|
|
||||||
|
CONTEXT_KEY = "context"
|
||||||
|
|
||||||
ID_KEY = "id"
|
ID_KEY = "id"
|
||||||
INDEX_KEY = "index"
|
INDEX_KEY = "index"
|
||||||
SOURCE_INDEX_KEY = "source_index"
|
SOURCE_INDEX_KEY = "source_index"
|
||||||
@@ -34,6 +37,17 @@ class TrackDescriptor:
|
|||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
|
|
||||||
|
if TrackDescriptor.CONTEXT_KEY in kwargs.keys():
|
||||||
|
if type(kwargs[TrackDescriptor.CONTEXT_KEY]) is not dict:
|
||||||
|
raise TypeError(
|
||||||
|
f"TrackDescriptor.__init__(): Argument {TrackDescriptor.CONTEXT_KEY} is required to be of type dict"
|
||||||
|
)
|
||||||
|
self.__context = kwargs[TrackDescriptor.CONTEXT_KEY]
|
||||||
|
self.__logger = self.__context['logger']
|
||||||
|
else:
|
||||||
|
self.__context = {}
|
||||||
|
self.__logger = logging.getLogger('FFX').addHandler(logging.NullHandler())
|
||||||
|
|
||||||
if TrackDescriptor.ID_KEY in kwargs.keys():
|
if TrackDescriptor.ID_KEY in kwargs.keys():
|
||||||
if type(kwargs[TrackDescriptor.ID_KEY]) is not int:
|
if type(kwargs[TrackDescriptor.ID_KEY]) is not int:
|
||||||
raise TypeError(
|
raise TypeError(
|
||||||
@@ -247,6 +261,9 @@ class TrackDescriptor:
|
|||||||
def getSourceIndex(self):
|
def getSourceIndex(self):
|
||||||
return self.__sourceIndex
|
return self.__sourceIndex
|
||||||
|
|
||||||
|
def setSourceIndex(self, sourceIndex: int):
|
||||||
|
self.__sourceIndex = int(sourceIndex)
|
||||||
|
|
||||||
def getSubIndex(self):
|
def getSubIndex(self):
|
||||||
return self.__subIndex
|
return self.__subIndex
|
||||||
|
|
||||||
@@ -257,7 +274,7 @@ class TrackDescriptor:
|
|||||||
return self.__trackType
|
return self.__trackType
|
||||||
|
|
||||||
def getCodec(self):
|
def getCodec(self):
|
||||||
return self.__codecName
|
return str(self.__codecName)
|
||||||
|
|
||||||
def getLanguage(self):
|
def getLanguage(self):
|
||||||
if "language" in self.__trackTags.keys():
|
if "language" in self.__trackTags.keys():
|
||||||
@@ -265,12 +282,21 @@ class TrackDescriptor:
|
|||||||
else:
|
else:
|
||||||
return IsoLanguage.UNDEFINED
|
return IsoLanguage.UNDEFINED
|
||||||
|
|
||||||
|
def setLanguage(self, language: IsoLanguage):
|
||||||
|
if not type(language) is IsoLanguage:
|
||||||
|
raise TypeError('language has to be of type IsoLanguage')
|
||||||
|
self.__trackTags["language"] = language
|
||||||
|
|
||||||
def getTitle(self):
|
def getTitle(self):
|
||||||
if "title" in self.__trackTags.keys():
|
if "title" in self.__trackTags.keys():
|
||||||
return str(self.__trackTags["title"])
|
return str(self.__trackTags["title"])
|
||||||
else:
|
else:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
def setTitle(self, title: str):
|
||||||
|
self.__trackTags["title"] = str(title)
|
||||||
|
|
||||||
|
|
||||||
def getAudioLayout(self):
|
def getAudioLayout(self):
|
||||||
return self.__audioLayout
|
return self.__audioLayout
|
||||||
|
|
||||||
@@ -289,7 +315,7 @@ class TrackDescriptor:
|
|||||||
else:
|
else:
|
||||||
self.__dispositionSet.discard(disposition)
|
self.__dispositionSet.discard(disposition)
|
||||||
|
|
||||||
def compare(self, vsTrackDescriptor):
|
def compare(self, vsTrackDescriptor: Self):
|
||||||
|
|
||||||
compareResult = {}
|
compareResult = {}
|
||||||
|
|
||||||
|
|||||||
@@ -99,6 +99,7 @@ class TrackDetailsScreen(Screen):
|
|||||||
self.__isNew = trackDescriptor is None
|
self.__isNew = trackDescriptor is None
|
||||||
if self.__isNew:
|
if self.__isNew:
|
||||||
self.__trackType = trackType
|
self.__trackType = trackType
|
||||||
|
self.__codec = ''
|
||||||
self.__audioLayout = AudioLayout.LAYOUT_UNDEFINED
|
self.__audioLayout = AudioLayout.LAYOUT_UNDEFINED
|
||||||
self.__index = index
|
self.__index = index
|
||||||
self.__subIndex = subIndex
|
self.__subIndex = subIndex
|
||||||
@@ -106,6 +107,7 @@ class TrackDetailsScreen(Screen):
|
|||||||
self.__pattern : Pattern = self.__pc.getPattern(patternId) if patternId is not None else {}
|
self.__pattern : Pattern = self.__pc.getPattern(patternId) if patternId is not None else {}
|
||||||
else:
|
else:
|
||||||
self.__trackType = trackDescriptor.getType()
|
self.__trackType = trackDescriptor.getType()
|
||||||
|
self.__codec = trackDescriptor.getCodec()
|
||||||
self.__audioLayout = trackDescriptor.getAudioLayout()
|
self.__audioLayout = trackDescriptor.getAudioLayout()
|
||||||
self.__index = trackDescriptor.getIndex()
|
self.__index = trackDescriptor.getIndex()
|
||||||
self.__subIndex = trackDescriptor.getSubIndex()
|
self.__subIndex = trackDescriptor.getSubIndex()
|
||||||
@@ -268,12 +270,16 @@ class TrackDetailsScreen(Screen):
|
|||||||
|
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
|
|
||||||
|
kwargs[TrackDescriptor.CONTEXT_KEY] = self.context
|
||||||
|
|
||||||
kwargs[TrackDescriptor.PATTERN_ID_KEY] = int(self.__pattern.getId())
|
kwargs[TrackDescriptor.PATTERN_ID_KEY] = int(self.__pattern.getId())
|
||||||
|
|
||||||
kwargs[TrackDescriptor.INDEX_KEY] = self.__index
|
kwargs[TrackDescriptor.INDEX_KEY] = self.__index
|
||||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = self.__subIndex #!
|
kwargs[TrackDescriptor.SUB_INDEX_KEY] = self.__subIndex #!
|
||||||
|
|
||||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.fromLabel(self.query_one("#type_select", Select).value)
|
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.fromLabel(self.query_one("#type_select", Select).value)
|
||||||
|
|
||||||
|
kwargs[TrackDescriptor.CODEC_NAME_KEY] = self.__codec
|
||||||
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(self.query_one("#audio_layout_select", Select).value)
|
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(self.query_one("#audio_layout_select", Select).value)
|
||||||
|
|
||||||
trackTags = {}
|
trackTags = {}
|
||||||
|
|||||||
@@ -5,25 +5,25 @@ from enum import Enum
|
|||||||
|
|
||||||
class TrackDisposition(Enum):
|
class TrackDisposition(Enum):
|
||||||
|
|
||||||
DEFAULT = {"name": "default", "index": 0}
|
DEFAULT = {"name": "default", "index": 0, "indicator": "DEF"}
|
||||||
FORCED = {"name": "forced", "index": 1}
|
FORCED = {"name": "forced", "index": 1, "indicator": "FOR"}
|
||||||
|
|
||||||
DUB = {"name": "dub", "index": 2}
|
DUB = {"name": "dub", "index": 2, "indicator": "DUB"}
|
||||||
ORIGINAL = {"name": "original", "index": 3}
|
ORIGINAL = {"name": "original", "index": 3, "indicator": "ORG"}
|
||||||
COMMENT = {"name": "comment", "index": 4}
|
COMMENT = {"name": "comment", "index": 4, "indicator": "COM"}
|
||||||
LYRICS = {"name": "lyrics", "index": 5}
|
LYRICS = {"name": "lyrics", "index": 5, "indicator": "LYR"}
|
||||||
KARAOKE = {"name": "karaoke", "index": 6}
|
KARAOKE = {"name": "karaoke", "index": 6, "indicator": "KAR"}
|
||||||
HEARING_IMPAIRED = {"name": "hearing_impaired", "index": 7}
|
HEARING_IMPAIRED = {"name": "hearing_impaired", "index": 7, "indicator": "HIM"}
|
||||||
VISUAL_IMPAIRED = {"name": "visual_impaired", "index": 8}
|
VISUAL_IMPAIRED = {"name": "visual_impaired", "index": 8, "indicator": "VIM"}
|
||||||
CLEAN_EFFECTS = {"name": "clean_effects", "index": 9}
|
CLEAN_EFFECTS = {"name": "clean_effects", "index": 9, "indicator": "CLE"}
|
||||||
ATTACHED_PIC = {"name": "attached_pic", "index": 10}
|
ATTACHED_PIC = {"name": "attached_pic", "index": 10, "indicator": "ATP"}
|
||||||
TIMED_THUMBNAILS = {"name": "timed_thumbnails", "index": 11}
|
TIMED_THUMBNAILS = {"name": "timed_thumbnails", "index": 11, "indicator": "TTH"}
|
||||||
NON_DIEGETICS = {"name": "non_diegetic", "index": 12}
|
NON_DIEGETICS = {"name": "non_diegetic", "index": 12, "indicator": "NOD"}
|
||||||
CAPTIONS = {"name": "captions", "index": 13}
|
CAPTIONS = {"name": "captions", "index": 13, "indicator": "CAP"}
|
||||||
DESCRIPTIONS = {"name": "descriptions", "index": 14}
|
DESCRIPTIONS = {"name": "descriptions", "index": 14, "indicator": "DES"}
|
||||||
METADATA = {"name": "metadata", "index": 15}
|
METADATA = {"name": "metadata", "index": 15, "indicator": "MED"}
|
||||||
DEPENDENT = {"name": "dependent", "index": 16}
|
DEPENDENT = {"name": "dependent", "index": 16, "indicator": "DEP"}
|
||||||
STILL_IMAGE = {"name": "still_image", "index": 17}
|
STILL_IMAGE = {"name": "still_image", "index": 17, "indicator": "STI"}
|
||||||
|
|
||||||
|
|
||||||
def label(self):
|
def label(self):
|
||||||
@@ -32,6 +32,9 @@ class TrackDisposition(Enum):
|
|||||||
def index(self):
|
def index(self):
|
||||||
return int(self.value['index'])
|
return int(self.value['index'])
|
||||||
|
|
||||||
|
def indicator(self):
|
||||||
|
return str(self.value['indicator'])
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def toFlags(dispositionSet):
|
def toFlags(dispositionSet):
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
from textual.app import App, ComposeResult
|
|
||||||
from textual.screen import Screen
|
|
||||||
from textual.widgets import Header, Footer, Placeholder, Label
|
|
||||||
|
|
||||||
class WarningScreen(Screen):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
context = self.app.getContext()
|
|
||||||
def compose(self) -> ComposeResult:
|
|
||||||
yield Label("Warning! This file is not compliant to the defined source schema!")
|
|
||||||
yield Footer()
|
|
||||||
126
bin/ffx_tests.py
Executable file
126
bin/ffx_tests.py
Executable file
@@ -0,0 +1,126 @@
|
|||||||
|
#! /usr/bin/python3
|
||||||
|
|
||||||
|
import os, logging, click
|
||||||
|
|
||||||
|
from ffx.configuration_controller import ConfigurationController
|
||||||
|
|
||||||
|
from ffx.file_properties import FileProperties
|
||||||
|
from ffx.ffx_controller import FfxController
|
||||||
|
|
||||||
|
from ffx.database import databaseContext
|
||||||
|
|
||||||
|
from ffx.test.helper import createMediaTestFile
|
||||||
|
|
||||||
|
from ffx.test.scenario import Scenario
|
||||||
|
from ffx.tmdb_controller import TMDB_API_KEY_NOT_PRESENT_EXCEPTION
|
||||||
|
|
||||||
|
|
||||||
|
@click.group()
|
||||||
|
@click.pass_context
|
||||||
|
@click.option('-v', '--verbose', type=int, default=0, help='Set verbosity of output')
|
||||||
|
@click.option("--dry-run", is_flag=True, default=False)
|
||||||
|
def ffx(ctx, verbose, dry_run):
|
||||||
|
"""FFX"""
|
||||||
|
|
||||||
|
ctx.obj = {}
|
||||||
|
|
||||||
|
ctx.obj['config'] = ConfigurationController()
|
||||||
|
|
||||||
|
ctx.obj['database'] = None
|
||||||
|
ctx.obj['dry_run'] = dry_run
|
||||||
|
|
||||||
|
ctx.obj['verbosity'] = verbose
|
||||||
|
|
||||||
|
# Critical 50
|
||||||
|
# Error 40
|
||||||
|
# Warning 30
|
||||||
|
# Info 20
|
||||||
|
# Debug 10
|
||||||
|
fileLogVerbosity = max(40 - verbose * 10, 10)
|
||||||
|
consoleLogVerbosity = max(20 - verbose * 10, 10)
|
||||||
|
|
||||||
|
ctx.obj['logger'] = logging.getLogger('FFX Tests')
|
||||||
|
ctx.obj['logger'].setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
ctx.obj['report_logger'] = logging.getLogger('FFX Test Result')
|
||||||
|
ctx.obj['report_logger'].setLevel(logging.INFO)
|
||||||
|
|
||||||
|
ffxFileHandler = logging.FileHandler(ctx.obj['config'].getLogFilePath())
|
||||||
|
ffxFileHandler.setLevel(fileLogVerbosity)
|
||||||
|
ffxConsoleHandler = logging.StreamHandler()
|
||||||
|
ffxConsoleHandler.setLevel(consoleLogVerbosity)
|
||||||
|
|
||||||
|
if os.path.isfile('ffx_test_report.log'):
|
||||||
|
os.unlink('ffx_test_report.log')
|
||||||
|
ffxTestReportFileHandler = logging.FileHandler('ffx_test_report.log')
|
||||||
|
|
||||||
|
fileFormatter = logging.Formatter(
|
||||||
|
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||||
|
ffxFileHandler.setFormatter(fileFormatter)
|
||||||
|
consoleFormatter = logging.Formatter(
|
||||||
|
'%(message)s')
|
||||||
|
ffxConsoleHandler.setFormatter(consoleFormatter)
|
||||||
|
reportFormatter = logging.Formatter(
|
||||||
|
'%(message)s')
|
||||||
|
ffxTestReportFileHandler.setFormatter(reportFormatter)
|
||||||
|
|
||||||
|
ctx.obj['logger'].addHandler(ffxConsoleHandler)
|
||||||
|
ctx.obj['logger'].addHandler(ffxFileHandler)
|
||||||
|
|
||||||
|
ctx.obj['report_logger'].addHandler(ffxConsoleHandler)
|
||||||
|
ctx.obj['report_logger'].addHandler(ffxTestReportFileHandler)
|
||||||
|
|
||||||
|
|
||||||
|
# Another subcommand
|
||||||
|
@ffx.command()
|
||||||
|
@click.pass_context
|
||||||
|
@click.option('--scenario', type=str, default='', help='Only run tests from this scenario')
|
||||||
|
@click.option('--variant', type=str, default='', help='Only run variants beginning like this')
|
||||||
|
@click.option('--limit', type=int, default=0, help='Only run this number of tests')
|
||||||
|
def run(ctx, scenario, variant, limit):
|
||||||
|
"""Run ffx test sequences"""
|
||||||
|
|
||||||
|
ctx.obj['logger'].info('Starting FFX test runs')
|
||||||
|
ctx.obj['test_passed_counter'] = 0
|
||||||
|
ctx.obj['test_failed_counter'] = 0
|
||||||
|
|
||||||
|
ctx.obj['test_variant'] = variant
|
||||||
|
ctx.obj['test_limit'] = limit
|
||||||
|
|
||||||
|
for si in Scenario.list():
|
||||||
|
|
||||||
|
try:
|
||||||
|
SCEN = Scenario.getClassReference(si)
|
||||||
|
scen = SCEN(ctx.obj)
|
||||||
|
|
||||||
|
if scenario and scenario != scen.getScenario():
|
||||||
|
continue
|
||||||
|
|
||||||
|
ctx.obj['logger'].debug(f"Running scenario {si}")
|
||||||
|
|
||||||
|
scen.run()
|
||||||
|
|
||||||
|
except TMDB_API_KEY_NOT_PRESENT_EXCEPTION:
|
||||||
|
ctx.obj['logger'].info(f"TMDB_API_KEY not set: Skipping {SCEN.__class__.__name__}")
|
||||||
|
|
||||||
|
ctx.obj['logger'].info(f"\n{ctx.obj['test_passed_counter']} tests passed")
|
||||||
|
ctx.obj['logger'].info(f"{ctx.obj['test_failed_counter']} test failed")
|
||||||
|
ctx.obj['logger'].info('\nDone.')
|
||||||
|
|
||||||
|
|
||||||
|
@ffx.command()
|
||||||
|
@click.pass_context
|
||||||
|
@click.argument('paths', nargs=-1)
|
||||||
|
def dupe(ctx, paths):
|
||||||
|
|
||||||
|
existingSourcePaths = [p for p in paths if os.path.isfile(p) and p.split('.')[-1] in FfxController.INPUT_FILE_EXTENSIONS]
|
||||||
|
|
||||||
|
for sourcePath in existingSourcePaths:
|
||||||
|
|
||||||
|
sourceFileProperties = FileProperties(ctx.obj, sourcePath)
|
||||||
|
sourceMediaDescriptor = sourceFileProperties.getMediaDescriptor()
|
||||||
|
|
||||||
|
createMediaTestFile(sourceMediaDescriptor, baseName='dupe')
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
ffx()
|
||||||
Reference in New Issue
Block a user