Removes build artifacts from branch
parent
2d03a3bb10
commit
fd5ad3ed56
@ -1,71 +0,0 @@
|
||||
from enum import Enum
|
||||
from .track_type import TrackType
|
||||
|
||||
class AudioLayout(Enum):
|
||||
|
||||
LAYOUT_STEREO = {"label": "stereo", "index": 1}
|
||||
LAYOUT_5_1 = {"label": "5.1(side)", "index": 2}
|
||||
LAYOUT_6_1 = {"label": "6.1", "index": 3}
|
||||
LAYOUT_7_1 = {"label": "7.1", "index": 4} #TODO: Does this exist?
|
||||
|
||||
LAYOUT_6CH = {"label": "6ch", "index": 5}
|
||||
LAYOUT_5_0 = {"label": "5.0(side)", "index": 6}
|
||||
|
||||
LAYOUT_UNDEFINED = {"label": "undefined", "index": 0}
|
||||
|
||||
|
||||
def label(self):
|
||||
"""Returns the audio layout as string"""
|
||||
return str(self.value['label'])
|
||||
|
||||
def index(self):
|
||||
"""Returns the audio layout as integer"""
|
||||
return int(self.value['index'])
|
||||
|
||||
@staticmethod
|
||||
def fromLabel(label : str):
|
||||
try:
|
||||
|
||||
return [a for a in AudioLayout if a.value['label'] == str(label)][0]
|
||||
except:
|
||||
return AudioLayout.LAYOUT_UNDEFINED
|
||||
|
||||
# @staticmethod
|
||||
# def fromIndex(index : int):
|
||||
# try:
|
||||
# target_index = int(index)
|
||||
# except (TypeError, ValueError):
|
||||
# return AudioLayout.LAYOUT_UNDEFINED
|
||||
# return next((a for a in AudioLayout if a.value['index'] == target_index),
|
||||
# AudioLayout.LAYOUT_UNDEFINED)
|
||||
|
||||
@staticmethod
|
||||
def fromIndex(index : int):
|
||||
try:
|
||||
return [a for a in AudioLayout if a.value['index'] == int(index)][0]
|
||||
except:
|
||||
return AudioLayout.LAYOUT_UNDEFINED
|
||||
|
||||
@staticmethod
|
||||
def identify(streamObj):
|
||||
|
||||
FFPROBE_LAYOUT_KEY = 'channel_layout'
|
||||
FFPROBE_CHANNELS_KEY = 'channels'
|
||||
FFPROBE_CODEC_TYPE_KEY = 'codec_type'
|
||||
|
||||
if (type(streamObj) is not dict
|
||||
or FFPROBE_CODEC_TYPE_KEY not in streamObj.keys()
|
||||
or streamObj[FFPROBE_CODEC_TYPE_KEY] != TrackType.AUDIO.label()):
|
||||
raise Exception('Not an ffprobe audio stream object')
|
||||
|
||||
if FFPROBE_LAYOUT_KEY in streamObj.keys():
|
||||
matchingLayouts = [l for l in AudioLayout if l.label() == streamObj[FFPROBE_LAYOUT_KEY]]
|
||||
if matchingLayouts:
|
||||
return matchingLayouts[0]
|
||||
|
||||
if (FFPROBE_CHANNELS_KEY in streamObj.keys()
|
||||
and int(streamObj[FFPROBE_CHANNELS_KEY]) == 6):
|
||||
|
||||
return AudioLayout.LAYOUT_6CH
|
||||
|
||||
return AudioLayout.LAYOUT_UNDEFINED
|
||||
@ -1,142 +0,0 @@
|
||||
import os, json
|
||||
|
||||
class ConfigurationController():
|
||||
|
||||
CONFIG_FILENAME = 'ffx.json'
|
||||
DATABASE_FILENAME = 'ffx.db'
|
||||
LOG_FILENAME = 'ffx.log'
|
||||
|
||||
DATABASE_PATH_CONFIG_KEY = 'databasePath'
|
||||
LOG_DIRECTORY_CONFIG_KEY = 'logDirectory'
|
||||
OUTPUT_FILENAME_TEMPLATE_KEY = 'outputFilenameTemplate'
|
||||
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.__homeDir = os.path.expanduser("~")
|
||||
self.__localVarDir = os.path.join(self.__homeDir, '.local', 'var')
|
||||
self.__localEtcDir = os.path.join(self.__homeDir, '.local', 'etc')
|
||||
|
||||
self.__configurationData = {}
|
||||
|
||||
# .local/etc/ffx.json
|
||||
self.__configFilePath = os.path.join(self.__localEtcDir, ConfigurationController.CONFIG_FILENAME)
|
||||
if os.path.isfile(self.__configFilePath):
|
||||
with open(self.__configFilePath, 'r') as configurationFile:
|
||||
self.__configurationData = json.load(configurationFile)
|
||||
|
||||
if ConfigurationController.DATABASE_PATH_CONFIG_KEY in self.__configurationData.keys():
|
||||
self.__databaseFilePath = self.__configurationData[ConfigurationController.DATABASE_PATH_CONFIG_KEY]
|
||||
os.makedirs(os.path.dirname(self.__databaseFilePath), exist_ok=True)
|
||||
else:
|
||||
ffxVarDir = os.path.join(self.__localVarDir, 'ffx')
|
||||
os.makedirs(ffxVarDir, exist_ok=True)
|
||||
self.__databaseFilePath = os.path.join(ffxVarDir, ConfigurationController.DATABASE_FILENAME)
|
||||
|
||||
if ConfigurationController.LOG_DIRECTORY_CONFIG_KEY in self.__configurationData.keys():
|
||||
self.__logDir = self.__configurationData[ConfigurationController.LOG_DIRECTORY_CONFIG_KEY]
|
||||
else:
|
||||
self.__logDir = os.path.join(self.__localVarDir, 'log')
|
||||
os.makedirs(self.__logDir, exist_ok=True)
|
||||
|
||||
|
||||
def getHomeDirectory(self):
|
||||
return self.__homeDir
|
||||
|
||||
def getLogFilePath(self):
|
||||
return os.path.join(self.__logDir, ConfigurationController.LOG_FILENAME)
|
||||
|
||||
def getDatabaseFilePath(self):
|
||||
return self.__databaseFilePath
|
||||
|
||||
|
||||
def getData(self):
|
||||
return self.__configurationData
|
||||
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
# def addPattern(self, patternDescriptor):
|
||||
#
|
||||
# try:
|
||||
#
|
||||
# s = self.Session()
|
||||
# q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']),
|
||||
# Pattern.pattern == str(patternDescriptor['pattern']))
|
||||
#
|
||||
# if not q.count():
|
||||
# pattern = Pattern(show_id = int(patternDescriptor['show_id']),
|
||||
# pattern = str(patternDescriptor['pattern']))
|
||||
# s.add(pattern)
|
||||
# s.commit()
|
||||
# return pattern.getId()
|
||||
# else:
|
||||
# return 0
|
||||
#
|
||||
# except Exception as ex:
|
||||
# raise click.ClickException(f"PatternController.addPattern(): {repr(ex)}")
|
||||
# finally:
|
||||
# s.close()
|
||||
#
|
||||
#
|
||||
# def updatePattern(self, patternId, patternDescriptor):
|
||||
#
|
||||
# try:
|
||||
# s = self.Session()
|
||||
# q = s.query(Pattern).filter(Pattern.id == int(patternId))
|
||||
#
|
||||
# if q.count():
|
||||
#
|
||||
# pattern = q.first()
|
||||
#
|
||||
# pattern.show_id = int(patternDescriptor['show_id'])
|
||||
# pattern.pattern = str(patternDescriptor['pattern'])
|
||||
#
|
||||
# s.commit()
|
||||
# return True
|
||||
#
|
||||
# else:
|
||||
# return False
|
||||
#
|
||||
# except Exception as ex:
|
||||
# raise click.ClickException(f"PatternController.updatePattern(): {repr(ex)}")
|
||||
# finally:
|
||||
# s.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
# def findPattern(self, patternDescriptor):
|
||||
#
|
||||
# try:
|
||||
# s = self.Session()
|
||||
# q = s.query(Pattern).filter(Pattern.show_id == int(patternDescriptor['show_id']), Pattern.pattern == str(patternDescriptor['pattern']))
|
||||
#
|
||||
# if q.count():
|
||||
# pattern = q.first()
|
||||
# return int(pattern.id)
|
||||
# else:
|
||||
# return None
|
||||
#
|
||||
# except Exception as ex:
|
||||
# raise click.ClickException(f"PatternController.findPattern(): {repr(ex)}")
|
||||
# finally:
|
||||
# s.close()
|
||||
#
|
||||
#
|
||||
# def getPattern(self, patternId : int):
|
||||
#
|
||||
# if type(patternId) is not int:
|
||||
# raise ValueError(f"PatternController.getPattern(): Argument patternId is required to be of type int")
|
||||
#
|
||||
# try:
|
||||
# s = self.Session()
|
||||
# q = s.query(Pattern).filter(Pattern.id == int(patternId))
|
||||
#
|
||||
# return q.first() if q.count() else None
|
||||
#
|
||||
# except Exception as ex:
|
||||
# raise click.ClickException(f"PatternController.getPattern(): {repr(ex)}")
|
||||
# finally:
|
||||
# s.close()
|
||||
#
|
||||
@ -1,15 +0,0 @@
|
||||
VERSION='0.2.3'
|
||||
DATABASE_VERSION = 2
|
||||
|
||||
DEFAULT_QUALITY = 32
|
||||
DEFAULT_AV1_PRESET = 5
|
||||
|
||||
DEFAULT_STEREO_BANDWIDTH = "112"
|
||||
DEFAULT_AC3_BANDWIDTH = "256"
|
||||
DEFAULT_DTS_BANDWIDTH = "320"
|
||||
DEFAULT_7_1_BANDWIDTH = "384"
|
||||
|
||||
DEFAULT_cut_start = 60
|
||||
DEFAULT_cut_length = 180
|
||||
|
||||
DEFAULT_OUTPUT_FILENAME_TEMPLATE = '{{ ffx_show_name }} - {{ ffx_index }}{{ ffx_index_separator }}{{ ffx_episode_name }}{{ ffx_indicator_separator }}{{ ffx_indicator }}'
|
||||
@ -1,102 +0,0 @@
|
||||
import os, click
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
from ffx.model.show import Base
|
||||
|
||||
from ffx.model.property import Property
|
||||
|
||||
from ffx.constants import DATABASE_VERSION
|
||||
|
||||
|
||||
DATABASE_VERSION_KEY = 'database_version'
|
||||
|
||||
class DatabaseVersionException(Exception):
|
||||
def __init__(self, errorMessage):
|
||||
super().__init__(errorMessage)
|
||||
|
||||
def databaseContext(databasePath: str = ''):
|
||||
|
||||
databaseContext = {}
|
||||
|
||||
if databasePath is None:
|
||||
# sqlite:///:memory:
|
||||
databasePath = ':memory:'
|
||||
elif not databasePath:
|
||||
homeDir = os.path.expanduser("~")
|
||||
ffxVarDir = os.path.join(homeDir, '.local', 'var', 'ffx')
|
||||
if not os.path.exists(ffxVarDir):
|
||||
os.makedirs(ffxVarDir)
|
||||
databasePath = os.path.join(ffxVarDir, 'ffx.db')
|
||||
|
||||
databaseContext['url'] = f"sqlite:///{databasePath}"
|
||||
databaseContext['engine'] = create_engine(databaseContext['url'])
|
||||
databaseContext['session'] = sessionmaker(bind=databaseContext['engine'])
|
||||
|
||||
Base.metadata.create_all(databaseContext['engine'])
|
||||
|
||||
# isSyncronuous = False
|
||||
# while not isSyncronuous:
|
||||
# while True:
|
||||
# try:
|
||||
# with databaseContext['database_engine'].connect() as connection:
|
||||
# connection.execute(sqlalchemy.text('PRAGMA foreign_keys=ON;'))
|
||||
# #isSyncronuous = True
|
||||
# break
|
||||
# except sqlite3.OperationalError:
|
||||
# time.sleep(0.1)
|
||||
|
||||
ensureDatabaseVersion(databaseContext)
|
||||
|
||||
return databaseContext
|
||||
|
||||
def ensureDatabaseVersion(databaseContext):
|
||||
|
||||
currentDatabaseVersion = getDatabaseVersion(databaseContext)
|
||||
if currentDatabaseVersion:
|
||||
if currentDatabaseVersion != DATABASE_VERSION:
|
||||
raise DatabaseVersionException(f"Current database version ({currentDatabaseVersion}) does not match required ({DATABASE_VERSION})")
|
||||
else:
|
||||
setDatabaseVersion(databaseContext, DATABASE_VERSION)
|
||||
|
||||
|
||||
def getDatabaseVersion(databaseContext):
|
||||
|
||||
try:
|
||||
|
||||
Session = databaseContext['session']
|
||||
s = Session()
|
||||
q = s.query(Property).filter(Property.key == DATABASE_VERSION_KEY)
|
||||
|
||||
return int(q.first().value) if q.count() else 0
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"getDatabaseVersion(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def setDatabaseVersion(databaseContext, databaseVersion: int):
|
||||
|
||||
try:
|
||||
Session = databaseContext['session']
|
||||
s = Session()
|
||||
|
||||
q = s.query(Property).filter(Property.key == DATABASE_VERSION_KEY)
|
||||
|
||||
dbVersion = int(databaseVersion)
|
||||
|
||||
versionProperty = q.first()
|
||||
if versionProperty:
|
||||
versionProperty.value = str(dbVersion)
|
||||
else:
|
||||
versionProperty = Property(key = DATABASE_VERSION_KEY,
|
||||
value = str(dbVersion))
|
||||
s.add(versionProperty)
|
||||
s.commit()
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"setDatabaseVersion(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
@ -1,815 +0,0 @@
|
||||
#! /usr/bin/python3
|
||||
|
||||
import os, click, time, logging, shutil
|
||||
|
||||
from ffx.configuration_controller import ConfigurationController
|
||||
|
||||
from ffx.file_properties import FileProperties
|
||||
|
||||
from ffx.ffx_app import FfxApp
|
||||
from ffx.ffx_controller import FfxController
|
||||
from ffx.tmdb_controller import TmdbController
|
||||
|
||||
from ffx.database import databaseContext
|
||||
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.show_descriptor import ShowDescriptor
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
from ffx.video_encoder import VideoEncoder
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from ffx.track_codec import TrackCodec
|
||||
|
||||
from ffx.process import executeProcess
|
||||
from ffx.helper import filterFilename, substituteTmdbFilename
|
||||
from ffx.helper import getEpisodeFileBasename
|
||||
|
||||
from ffx.constants import DEFAULT_STEREO_BANDWIDTH, DEFAULT_AC3_BANDWIDTH, DEFAULT_DTS_BANDWIDTH, DEFAULT_7_1_BANDWIDTH
|
||||
|
||||
from ffx.filter.quality_filter import QualityFilter
|
||||
from ffx.filter.preset_filter import PresetFilter
|
||||
|
||||
from ffx.filter.crop_filter import CropFilter
|
||||
from ffx.filter.nlmeans_filter import NlmeansFilter
|
||||
from ffx.filter.deinterlace_filter import DeinterlaceFilter
|
||||
|
||||
from ffx.constants import VERSION
|
||||
|
||||
from ffx.shifted_season_controller import ShiftedSeasonController
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.pass_context
|
||||
@click.option('--database-file', type=str, default='', help='Path to database file')
|
||||
@click.option('-v', '--verbose', type=int, default=0, help='Set verbosity of output')
|
||||
@click.option("--dry-run", is_flag=True, default=False)
|
||||
def ffx(ctx, database_file, verbose, dry_run):
|
||||
"""FFX"""
|
||||
|
||||
ctx.obj = {}
|
||||
|
||||
ctx.obj['config'] = ConfigurationController()
|
||||
|
||||
ctx.obj['database'] = databaseContext(databasePath=database_file
|
||||
if database_file else ctx.obj['config'].getDatabaseFilePath())
|
||||
|
||||
ctx.obj['dry_run'] = dry_run
|
||||
ctx.obj['verbosity'] = verbose
|
||||
|
||||
# Critical 50
|
||||
# Error 40
|
||||
# Warning 30
|
||||
# Info 20
|
||||
# Debug 10
|
||||
fileLogVerbosity = max(40 - verbose * 10, 10)
|
||||
consoleLogVerbosity = max(20 - verbose * 10, 10)
|
||||
|
||||
ctx.obj['logger'] = logging.getLogger('FFX')
|
||||
ctx.obj['logger'].setLevel(logging.DEBUG)
|
||||
|
||||
ffxFileHandler = logging.FileHandler(ctx.obj['config'].getLogFilePath())
|
||||
ffxFileHandler.setLevel(fileLogVerbosity)
|
||||
ffxConsoleHandler = logging.StreamHandler()
|
||||
ffxConsoleHandler.setLevel(consoleLogVerbosity)
|
||||
|
||||
fileFormatter = logging.Formatter(
|
||||
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
ffxFileHandler.setFormatter(fileFormatter)
|
||||
consoleFormatter = logging.Formatter(
|
||||
'%(message)s')
|
||||
ffxConsoleHandler.setFormatter(consoleFormatter)
|
||||
|
||||
ctx.obj['logger'].addHandler(ffxConsoleHandler)
|
||||
ctx.obj['logger'].addHandler(ffxFileHandler)
|
||||
|
||||
|
||||
# Define a subcommand
|
||||
@ffx.command()
|
||||
def version():
|
||||
click.echo(VERSION)
|
||||
|
||||
|
||||
# Another subcommand
|
||||
@ffx.command()
|
||||
def help():
|
||||
click.echo(f"ffx {VERSION}\n")
|
||||
click.echo(f"Usage: ffx [input file] [output file] [vp9|av1] [q=[nn[,nn,...]]] [p=nn] [a=nnn[k]] [ac3=nnn[k]] [dts=nnn[k]] [crop]")
|
||||
|
||||
|
||||
@ffx.command()
|
||||
@click.pass_context
|
||||
@click.argument('filename', nargs=1)
|
||||
def inspect(ctx, filename):
|
||||
|
||||
ctx.obj['command'] = 'inspect'
|
||||
ctx.obj['arguments'] = {}
|
||||
ctx.obj['arguments']['filename'] = filename
|
||||
|
||||
app = FfxApp(ctx.obj)
|
||||
app.run()
|
||||
|
||||
|
||||
def getUnmuxSequence(trackDescriptor: TrackDescriptor, sourcePath, targetPrefix, targetDirectory = ''):
|
||||
|
||||
# executable and input file
|
||||
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
|
||||
|
||||
trackType = trackDescriptor.getType()
|
||||
|
||||
targetPathBase = os.path.join(targetDirectory, targetPrefix) if targetDirectory else targetPrefix
|
||||
|
||||
# mapping
|
||||
commandTokens += ['-map',
|
||||
f"0:{trackType.indicator()}:{trackDescriptor.getSubIndex()}",
|
||||
'-c',
|
||||
'copy']
|
||||
|
||||
trackCodec = trackDescriptor.getCodec()
|
||||
|
||||
# output format
|
||||
codecFormat = trackCodec.format()
|
||||
if codecFormat is not None:
|
||||
commandTokens += ['-f', codecFormat]
|
||||
|
||||
# output filename
|
||||
commandTokens += [f"{targetPathBase}.{trackCodec.extension()}"]
|
||||
|
||||
return commandTokens
|
||||
|
||||
|
||||
@ffx.command()
|
||||
@click.pass_context
|
||||
|
||||
@click.argument('paths', nargs=-1)
|
||||
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
|
||||
@click.option("-o", "--output-directory", type=str, default='')
|
||||
@click.option("-s", "--subtitles-only", is_flag=True, default=False)
|
||||
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
|
||||
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
|
||||
def unmux(ctx,
|
||||
paths,
|
||||
label,
|
||||
output_directory,
|
||||
subtitles_only,
|
||||
nice,
|
||||
cpu):
|
||||
|
||||
existingSourcePaths = [p for p in paths if os.path.isfile(p)]
|
||||
ctx.obj['logger'].debug(f"\nUnmuxing {len(existingSourcePaths)} files")
|
||||
|
||||
ctx.obj['resource_limits'] = {}
|
||||
ctx.obj['resource_limits']['niceness'] = nice
|
||||
ctx.obj['resource_limits']['cpu_percent'] = cpu
|
||||
|
||||
for sourcePath in existingSourcePaths:
|
||||
|
||||
fp = FileProperties(ctx.obj, sourcePath)
|
||||
|
||||
|
||||
try:
|
||||
sourceMediaDescriptor = fp.getMediaDescriptor()
|
||||
|
||||
season = fp.getSeason()
|
||||
episode = fp.getEpisode()
|
||||
|
||||
#TODO: Recognition für alle Formate anpassen
|
||||
targetLabel = label if label else fp.getFileBasename()
|
||||
targetIndicator = f"_S{season}E{episode}" if label and season != -1 and episode != -1 else ''
|
||||
|
||||
if label and not targetIndicator:
|
||||
ctx.obj['logger'].warning(f"Skipping file {fp.getFilename()}: Label set but no indicator recognized")
|
||||
continue
|
||||
else:
|
||||
ctx.obj['logger'].info(f"\nUnmuxing file {fp.getFilename()}\n")
|
||||
|
||||
# for trackDescriptor in sourceMediaDescriptor.getAllTrackDescriptors():
|
||||
for trackDescriptor in sourceMediaDescriptor.getTrackDescriptors():
|
||||
|
||||
if trackDescriptor.getType() == TrackType.SUBTITLE or not subtitles_only:
|
||||
|
||||
# SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
|
||||
targetPrefix = f"{targetLabel}{targetIndicator}_{trackDescriptor.getIndex()}_{trackDescriptor.getLanguage().threeLetter()}"
|
||||
|
||||
td: TrackDisposition
|
||||
for td in sorted(trackDescriptor.getDispositionSet(), key=lambda d: d.index()):
|
||||
targetPrefix += f"_{td.indicator()}"
|
||||
|
||||
unmuxSequence = getUnmuxSequence(trackDescriptor, sourcePath, targetPrefix, targetDirectory = output_directory)
|
||||
|
||||
if unmuxSequence:
|
||||
if not ctx.obj['dry_run']:
|
||||
|
||||
#TODO #425: Codec Enum
|
||||
ctx.obj['logger'].info(f"Unmuxing stream {trackDescriptor.getIndex()} into file {targetPrefix}.{trackDescriptor.getCodec().extension()}")
|
||||
|
||||
ctx.obj['logger'].debug(f"Executing unmuxing sequence")
|
||||
|
||||
out, err, rc = executeProcess(unmuxSequence, context = ctx.obj)
|
||||
if rc:
|
||||
ctx.obj['logger'].error(f"Unmuxing of stream {trackDescriptor.getIndex()} failed with error ({rc}) {err}")
|
||||
else:
|
||||
ctx.obj['logger'].warning(f"Skipping stream with unknown codec")
|
||||
except Exception as ex:
|
||||
ctx.obj['logger'].warning(f"Skipping File {sourcePath} ({ex})")
|
||||
|
||||
|
||||
@ffx.command()
|
||||
@click.pass_context
|
||||
|
||||
@click.argument('paths', nargs=-1)
|
||||
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
|
||||
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
|
||||
def cropdetect(ctx,
|
||||
paths,
|
||||
nice,
|
||||
cpu):
|
||||
|
||||
existingSourcePaths = [p for p in paths if os.path.isfile(p)]
|
||||
ctx.obj['logger'].debug(f"\nUnmuxing {len(existingSourcePaths)} files")
|
||||
|
||||
ctx.obj['resource_limits'] = {}
|
||||
ctx.obj['resource_limits']['niceness'] = nice
|
||||
ctx.obj['resource_limits']['cpu_percent'] = cpu
|
||||
|
||||
for sourcePath in existingSourcePaths:
|
||||
|
||||
|
||||
try:
|
||||
|
||||
fp = FileProperties(ctx.obj, sourcePath)
|
||||
cropParams = fp.findCropParams()
|
||||
|
||||
click.echo(cropParams)
|
||||
|
||||
except Exception as ex:
|
||||
ctx.obj['logger'].warning(f"Skipping File {sourcePath} ({ex})")
|
||||
|
||||
|
||||
@ffx.command()
|
||||
@click.pass_context
|
||||
|
||||
def shows(ctx):
|
||||
|
||||
ctx.obj['command'] = 'shows'
|
||||
|
||||
app = FfxApp(ctx.obj)
|
||||
app.run()
|
||||
|
||||
|
||||
def checkUniqueDispositions(context, mediaDescriptor: MediaDescriptor):
|
||||
|
||||
# Check for multiple default or forced dispositions if not set by user input or database requirements
|
||||
#
|
||||
# Query user for the correct sub indices, then configure flags in track descriptors associated with media descriptor accordingly.
|
||||
# The correct tokens should then be created by
|
||||
if len([v for v in mediaDescriptor.getVideoTracks() if v.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
||||
if context['no_prompt']:
|
||||
raise click.ClickException('More than one default video stream detected and no prompt set')
|
||||
defaultVideoTrackSubIndex = click.prompt("More than one default video stream detected! Please select stream", type=int)
|
||||
mediaDescriptor.setDefaultSubTrack(TrackType.VIDEO, defaultVideoTrackSubIndex)
|
||||
|
||||
if len([v for v in mediaDescriptor.getVideoTracks() if v.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
||||
if context['no_prompt']:
|
||||
raise click.ClickException('More than one forced video stream detected and no prompt set')
|
||||
forcedVideoTrackSubIndex = click.prompt("More than one forced video stream detected! Please select stream", type=int)
|
||||
mediaDescriptor.setForcedSubTrack(TrackType.VIDEO, forcedVideoTrackSubIndex)
|
||||
|
||||
if len([a for a in mediaDescriptor.getAudioTracks() if a.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
||||
if context['no_prompt']:
|
||||
raise click.ClickException('More than one default audio stream detected and no prompt set')
|
||||
defaultAudioTrackSubIndex = click.prompt("More than one default audio stream detected! Please select stream", type=int)
|
||||
mediaDescriptor.setDefaultSubTrack(TrackType.AUDIO, defaultAudioTrackSubIndex)
|
||||
|
||||
if len([a for a in mediaDescriptor.getAudioTracks() if a.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
||||
if context['no_prompt']:
|
||||
raise click.ClickException('More than one forced audio stream detected and no prompt set')
|
||||
forcedAudioTrackSubIndex = click.prompt("More than one forced audio stream detected! Please select stream", type=int)
|
||||
mediaDescriptor.setForcedSubTrack(TrackType.AUDIO, forcedAudioTrackSubIndex)
|
||||
|
||||
if len([s for s in mediaDescriptor.getSubtitleTracks() if s.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
||||
if context['no_prompt']:
|
||||
raise click.ClickException('More than one default subtitle stream detected and no prompt set')
|
||||
defaultSubtitleTrackSubIndex = click.prompt("More than one default subtitle stream detected! Please select stream", type=int)
|
||||
mediaDescriptor.setDefaultSubTrack(TrackType.SUBTITLE, defaultSubtitleTrackSubIndex)
|
||||
|
||||
if len([s for s in mediaDescriptor.getSubtitleTracks() if s.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
||||
if context['no_prompt']:
|
||||
raise click.ClickException('More than one forced subtitle stream detected and no prompt set')
|
||||
forcedSubtitleTrackSubIndex = click.prompt("More than one forced subtitle stream detected! Please select stream", type=int)
|
||||
mediaDescriptor.setForcedSubTrack(TrackType.SUBTITLE, forcedSubtitleTrackSubIndex)
|
||||
|
||||
|
||||
@ffx.command()
|
||||
@click.pass_context
|
||||
|
||||
@click.argument('paths', nargs=-1)
|
||||
|
||||
@click.option('-l', '--label', type=str, default='', help='Label to be used as filename prefix')
|
||||
|
||||
@click.option('-v', '--video-encoder', type=str, default=FfxController.DEFAULT_VIDEO_ENCODER, help=f"Target video encoder (vp9, av1 or h264)", show_default=True)
|
||||
|
||||
@click.option('-q', '--quality', type=str, default="", help=f"Quality settings to be used with VP9/H264 encoder")
|
||||
@click.option('-p', '--preset', type=str, default="", help=f"Quality preset to be used with AV1 encoder")
|
||||
|
||||
@click.option('-a', '--stereo-bitrate', type=int, default=DEFAULT_STEREO_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode stereo audio streams", show_default=True)
|
||||
@click.option('--ac3', type=int, default=DEFAULT_AC3_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode 5.1 audio streams", show_default=True)
|
||||
@click.option('--dts', type=int, default=DEFAULT_DTS_BANDWIDTH, help=f"Bitrate in kbit/s to be used to encode 6.1 audio streams", show_default=True)
|
||||
|
||||
@click.option('--subtitle-directory', type=str, default='', help='Load subtitles from here')
|
||||
@click.option('--subtitle-prefix', type=str, default='', help='Subtitle filename prefix')
|
||||
|
||||
@click.option('--language', type=str, multiple=True, help='Set stream language. Use format <stream index>:<3 letter iso code>')
|
||||
@click.option('--title', type=str, multiple=True, help='Set stream title. Use format <stream index>:<title>')
|
||||
|
||||
@click.option('--default-video', type=int, default=-1, help='Index of default video stream')
|
||||
@click.option('--forced-video', type=int, default=-1, help='Index of forced video stream')
|
||||
@click.option('--default-audio', type=int, default=-1, help='Index of default audio stream')
|
||||
@click.option('--forced-audio', type=int, default=-1, help='Index of forced audio stream')
|
||||
@click.option('--default-subtitle', type=int, default=-1, help='Index of default subtitle stream')
|
||||
@click.option('--forced-subtitle', type=int, default=-1, help='Index of forced subtitle stream')
|
||||
|
||||
@click.option('--rearrange-streams', type=str, default="", help='Rearrange output streams order. Use format comma separated integers')
|
||||
|
||||
@click.option("--crop", is_flag=False, flag_value="auto", default="none")
|
||||
@click.option("--cut", is_flag=False, flag_value="default", default="none")
|
||||
|
||||
@click.option("--output-directory", type=str, default='')
|
||||
|
||||
@click.option("--deinterlace", is_flag=False, flag_value="default", default="none")
|
||||
|
||||
@click.option("--denoise", is_flag=False, flag_value="default", default="none")
|
||||
@click.option("--denoise-use-hw", is_flag=True, default=False)
|
||||
@click.option('--denoise-strength', type=str, default='', help='Denoising strength, more blurring vs more details.')
|
||||
@click.option('--denoise-patch-size', type=str, default='', help='Subimage size to apply filtering on luminosity plane. Reduces broader noise patterns but costly.')
|
||||
@click.option('--denoise-chroma-patch-size', type=str, default='', help='Subimage size to apply filtering on chroma planes.')
|
||||
@click.option('--denoise-research-window', type=str, default='', help='Range to search for comparable patches on luminosity plane. Better filtering but costly.')
|
||||
@click.option('--denoise-chroma-research-window', type=str, default='', help='Range to search for comparable patches on chroma planes.')
|
||||
|
||||
@click.option('--show', type=int, default=-1, help='Set TMDB show identifier')
|
||||
@click.option('--season', type=int, default=-1, help='Set season of show')
|
||||
@click.option('--episode', type=int, default=-1, help='Set episode of show')
|
||||
|
||||
@click.option("--no-tmdb", is_flag=True, default=False)
|
||||
@click.option("--no-pattern", is_flag=True, default=False)
|
||||
|
||||
@click.option("--dont-pass-dispositions", is_flag=True, default=False)
|
||||
|
||||
@click.option("--no-prompt", is_flag=True, default=False)
|
||||
@click.option("--no-signature", is_flag=True, default=False)
|
||||
@click.option("--keep-mkvmerge-metadata", is_flag=True, default=False)
|
||||
|
||||
@click.option('--nice', type=int, default=99, help='Niceness of started processes')
|
||||
@click.option('--cpu', type=int, default=0, help='Limit CPU for started processes to percent')
|
||||
|
||||
@click.option('--rename-only', is_flag=True, default=False, help='Only renaming, no recoding')
|
||||
|
||||
def convert(ctx,
|
||||
paths,
|
||||
label,
|
||||
video_encoder,
|
||||
quality,
|
||||
preset,
|
||||
stereo_bitrate,
|
||||
ac3,
|
||||
dts,
|
||||
|
||||
subtitle_directory,
|
||||
subtitle_prefix,
|
||||
|
||||
language,
|
||||
title,
|
||||
|
||||
default_video,
|
||||
forced_video,
|
||||
default_audio,
|
||||
forced_audio,
|
||||
default_subtitle,
|
||||
forced_subtitle,
|
||||
|
||||
rearrange_streams,
|
||||
|
||||
crop,
|
||||
cut,
|
||||
|
||||
output_directory,
|
||||
|
||||
deinterlace,
|
||||
|
||||
denoise,
|
||||
denoise_use_hw,
|
||||
denoise_strength,
|
||||
denoise_patch_size,
|
||||
denoise_chroma_patch_size,
|
||||
denoise_research_window,
|
||||
denoise_chroma_research_window,
|
||||
|
||||
show,
|
||||
season,
|
||||
episode,
|
||||
|
||||
no_tmdb,
|
||||
no_pattern,
|
||||
dont_pass_dispositions,
|
||||
no_prompt,
|
||||
no_signature,
|
||||
keep_mkvmerge_metadata,
|
||||
|
||||
nice,
|
||||
cpu,
|
||||
rename_only):
|
||||
"""Batch conversion of audiovideo files in format suitable for web playback, e.g. jellyfin
|
||||
|
||||
Files found under PATHS will be converted according to parameters.
|
||||
Filename extensions will be changed appropriately.
|
||||
Suffices will we appended to filename in case of multiple created files
|
||||
or if the filename has not changed."""
|
||||
|
||||
startTime = time.perf_counter()
|
||||
|
||||
context = ctx.obj
|
||||
|
||||
context['video_encoder'] = VideoEncoder.fromLabel(video_encoder)
|
||||
|
||||
#HINT: quick and dirty override for h264, todo improve
|
||||
targetFormat = '' if context['video_encoder'] == VideoEncoder.H264 else FfxController.DEFAULT_FILE_FORMAT
|
||||
targetExtension = 'mkv' if context['video_encoder'] == VideoEncoder.H264 else FfxController.DEFAULT_FILE_EXTENSION
|
||||
|
||||
context['use_tmdb'] = not no_tmdb
|
||||
context['use_pattern'] = not no_pattern
|
||||
context['no_prompt'] = no_prompt
|
||||
context['no_signature'] = no_signature
|
||||
context['keep_mkvmerge_metadata'] = keep_mkvmerge_metadata
|
||||
|
||||
|
||||
context['resource_limits'] = {}
|
||||
context['resource_limits']['niceness'] = nice
|
||||
context['resource_limits']['cpu_percent'] = cpu
|
||||
|
||||
|
||||
context['import_subtitles'] = (subtitle_directory and subtitle_prefix)
|
||||
if context['import_subtitles']:
|
||||
context['subtitle_directory'] = subtitle_directory
|
||||
context['subtitle_prefix'] = subtitle_prefix
|
||||
|
||||
|
||||
existingSourcePaths = [p for p in paths if os.path.isfile(p) and p.split('.')[-1] in FfxController.INPUT_FILE_EXTENSIONS]
|
||||
|
||||
|
||||
# CLI Overrides
|
||||
|
||||
cliOverrides = {}
|
||||
|
||||
if language:
|
||||
cliOverrides['languages'] = {}
|
||||
for overLang in language:
|
||||
olTokens = overLang.split(':')
|
||||
if len(olTokens) == 2:
|
||||
try:
|
||||
cliOverrides['languages'][int(olTokens[0])] = olTokens[1]
|
||||
except ValueError:
|
||||
ctx.obj['logger'].warning(f"Ignoring non-integer language index {olTokens[0]}")
|
||||
continue
|
||||
|
||||
if title:
|
||||
cliOverrides['titles'] = {}
|
||||
for overTitle in title:
|
||||
otTokens = overTitle.split(':')
|
||||
if len(otTokens) == 2:
|
||||
try:
|
||||
cliOverrides['titles'][int(otTokens[0])] = otTokens[1]
|
||||
except ValueError:
|
||||
ctx.obj['logger'].warning(f"Ignoring non-integer title index {otTokens[0]}")
|
||||
continue
|
||||
|
||||
if default_video != -1:
|
||||
cliOverrides['default_video'] = default_video
|
||||
if forced_video != -1:
|
||||
cliOverrides['forced_video'] = forced_video
|
||||
if default_audio != -1:
|
||||
cliOverrides['default_audio'] = default_audio
|
||||
if forced_audio != -1:
|
||||
cliOverrides['forced_audio'] = forced_audio
|
||||
if default_subtitle != -1:
|
||||
cliOverrides['default_subtitle'] = default_subtitle
|
||||
if forced_subtitle != -1:
|
||||
cliOverrides['forced_subtitle'] = forced_subtitle
|
||||
|
||||
if show != -1 or season != -1 or episode != -1:
|
||||
if len(existingSourcePaths) > 1:
|
||||
context['logger'].warning(f"Ignoring TMDB show, season, episode overrides, not supported for multiple source files")
|
||||
else:
|
||||
cliOverrides['tmdb'] = {}
|
||||
if show != -1:
|
||||
cliOverrides['tmdb']['show'] = show
|
||||
if season != -1:
|
||||
cliOverrides['tmdb']['season'] = season
|
||||
if episode != -1:
|
||||
cliOverrides['tmdb']['episode'] = episode
|
||||
|
||||
if cliOverrides:
|
||||
context['overrides'] = cliOverrides
|
||||
|
||||
|
||||
if rearrange_streams:
|
||||
try:
|
||||
cliOverrides['stream_order'] = [int(si) for si in rearrange_streams.split(",")]
|
||||
except ValueError as ve:
|
||||
errorMessage = "Non-integer in rearrange stream parameter"
|
||||
ctx.obj['logger'].error(errorMessage)
|
||||
raise click.Abort()
|
||||
|
||||
|
||||
ctx.obj['logger'].debug(f"\nVideo encoder: {video_encoder}")
|
||||
|
||||
|
||||
context['bitrates'] = {}
|
||||
context['bitrates']['stereo'] = str(stereo_bitrate) if str(stereo_bitrate).endswith('k') else f"{stereo_bitrate}k"
|
||||
context['bitrates']['ac3'] = str(ac3) if str(ac3).endswith('k') else f"{ac3}k"
|
||||
context['bitrates']['dts'] = str(dts) if str(dts).endswith('k') else f"{dts}k"
|
||||
|
||||
ctx.obj['logger'].debug(f"Stereo bitrate: {context['bitrates']['stereo']}")
|
||||
ctx.obj['logger'].debug(f"AC3 bitrate: {context['bitrates']['ac3']}")
|
||||
ctx.obj['logger'].debug(f"DTS bitrate: {context['bitrates']['dts']}")
|
||||
|
||||
#->
|
||||
# Process cut parameters
|
||||
context['perform_cut'] = (cut != 'none')
|
||||
if context['perform_cut']:
|
||||
cutTokens = cut.split(',')
|
||||
if cutTokens and len(cutTokens) == 2:
|
||||
context['cut_start'] = int(cutTokens[0])
|
||||
context['cut_length'] = int(cutTokens[1])
|
||||
ctx.obj['logger'].debug(f"Cut start={context['cut_start']} length={context['cut_length']}")
|
||||
|
||||
|
||||
tc = TmdbController() if context['use_tmdb'] else None
|
||||
|
||||
|
||||
qualityKwargs = {QualityFilter.QUALITY_KEY: str(quality)}
|
||||
qf = QualityFilter(**qualityKwargs)
|
||||
|
||||
|
||||
|
||||
if context['video_encoder'] == VideoEncoder.AV1 and preset:
|
||||
presetKwargs = {PresetFilter.PRESET_KEY: preset}
|
||||
PresetFilter(**presetKwargs)
|
||||
|
||||
cf = None
|
||||
# if crop != 'none':
|
||||
if crop == 'auto':
|
||||
cropKwargs = {}
|
||||
cf = CropFilter(**cropKwargs)
|
||||
|
||||
denoiseKwargs = {}
|
||||
if denoise_strength:
|
||||
denoiseKwargs[NlmeansFilter.STRENGTH_KEY] = denoise_strength
|
||||
if denoise_patch_size:
|
||||
denoiseKwargs[NlmeansFilter.PATCH_SIZE_KEY] = denoise_patch_size
|
||||
if denoise_chroma_patch_size:
|
||||
denoiseKwargs[NlmeansFilter.CHROMA_PATCH_SIZE_KEY] = denoise_chroma_patch_size
|
||||
if denoise_research_window:
|
||||
denoiseKwargs[NlmeansFilter.RESEARCH_WINDOW_KEY] = denoise_research_window
|
||||
if denoise_chroma_research_window:
|
||||
denoiseKwargs[NlmeansFilter.CHROMA_RESEARCH_WINDOW_KEY] = denoise_chroma_research_window
|
||||
if denoise != 'none' or denoiseKwargs:
|
||||
NlmeansFilter(**denoiseKwargs)
|
||||
|
||||
if deinterlace != 'none':
|
||||
DeinterlaceFilter()
|
||||
|
||||
chainYield = list(qf.getChainYield())
|
||||
|
||||
ctx.obj['logger'].info(f"\nRunning {len(existingSourcePaths) * len(chainYield)} jobs")
|
||||
|
||||
jobIndex = 0
|
||||
|
||||
for sourcePath in existingSourcePaths:
|
||||
|
||||
# Separate basedir, basename and extension for current source file
|
||||
sourceDirectory = os.path.dirname(sourcePath)
|
||||
sourceFilename = os.path.basename(sourcePath)
|
||||
sourcePathTokens = sourceFilename.split('.')
|
||||
|
||||
sourceFileBasename = '.'.join(sourcePathTokens[:-1])
|
||||
sourceFilenameExtension = sourcePathTokens[-1]
|
||||
|
||||
ctx.obj['logger'].info(f"\nProcessing file {sourcePath}")
|
||||
|
||||
targetSuffices = {}
|
||||
|
||||
mediaFileProperties = FileProperties(context, sourcePath)
|
||||
|
||||
|
||||
# if not cf is None:
|
||||
#
|
||||
cropArguments = {} if cf is None else mediaFileProperties.findCropArguments()
|
||||
#
|
||||
# ctx.obj['logger'].info(f"\nSetting crop arguments: ouput width: {cropArguments[CropFilter.OUTPUT_WIDTH_KEY]} "
|
||||
# + f"height: {cropArguments[CropFilter.OUTPUT_HEIGHT_KEY]} "
|
||||
# + f"offset x: {cropArguments[CropFilter.OFFSET_X_KEY]} "
|
||||
# + f"y: {cropArguments[CropFilter.OFFSET_Y_KEY]}")
|
||||
#
|
||||
# cf.setArguments(**cropArguments)
|
||||
|
||||
|
||||
ssc = ShiftedSeasonController(context)
|
||||
|
||||
showId = mediaFileProperties.getShowId()
|
||||
|
||||
#HINT: -1 if not set
|
||||
if 'tmdb' in cliOverrides.keys() and 'season' in cliOverrides['tmdb']:
|
||||
showSeason = cliOverrides['tmdb']['season']
|
||||
else:
|
||||
showSeason = mediaFileProperties.getSeason()
|
||||
|
||||
if 'tmdb' in cliOverrides.keys() and 'episode' in cliOverrides['tmdb']:
|
||||
showEpisode = cliOverrides['tmdb']['episode']
|
||||
else:
|
||||
showEpisode = mediaFileProperties.getEpisode()
|
||||
|
||||
ctx.obj['logger'].debug(f"Season={showSeason} Episode={showEpisode}")
|
||||
|
||||
|
||||
sourceMediaDescriptor = mediaFileProperties.getMediaDescriptor()
|
||||
|
||||
#HINT: This is None if the filename did not match anything in database
|
||||
currentPattern = mediaFileProperties.getPattern() if context['use_pattern'] else None
|
||||
|
||||
ctx.obj['logger'].debug(f"Pattern matching: {'No' if currentPattern is None else 'Yes'}")
|
||||
|
||||
# Setup FfxController accordingly depending on pattern matching is enabled and a pattern was matched
|
||||
if currentPattern is None:
|
||||
|
||||
checkUniqueDispositions(context, sourceMediaDescriptor)
|
||||
currentShowDescriptor = None
|
||||
|
||||
if context['import_subtitles']:
|
||||
sourceMediaDescriptor.importSubtitles(context['subtitle_directory'],
|
||||
context['subtitle_prefix'],
|
||||
showSeason,
|
||||
showEpisode)
|
||||
|
||||
if cliOverrides:
|
||||
sourceMediaDescriptor.applyOverrides(cliOverrides)
|
||||
|
||||
fc = FfxController(context, sourceMediaDescriptor)
|
||||
|
||||
else:
|
||||
targetMediaDescriptor = currentPattern.getMediaDescriptor(ctx.obj)
|
||||
checkUniqueDispositions(context, targetMediaDescriptor)
|
||||
currentShowDescriptor = currentPattern.getShowDescriptor(ctx.obj)
|
||||
|
||||
|
||||
# Check if source and target track descriptors match
|
||||
sourceTrackDescriptorList = sourceMediaDescriptor.getTrackDescriptors()
|
||||
targetTrackDescriptorList = targetMediaDescriptor.getTrackDescriptors()
|
||||
|
||||
for ttd in targetTrackDescriptorList:
|
||||
|
||||
tti = ttd.getIndex()
|
||||
ttsi = ttd.getSourceIndex()
|
||||
|
||||
stList = [st for st in sourceTrackDescriptorList if st.getIndex() == ttsi]
|
||||
std = stList[0] if stList else None
|
||||
|
||||
if std is None:
|
||||
raise click.ClickException(f"Target track #{tti} refering to non-existent source track #{ttsi}")
|
||||
|
||||
ttType = ttd.getType()
|
||||
stType = std.getType()
|
||||
|
||||
if ttType != stType:
|
||||
raise click.ClickException(f"Target track #{tti} type ({ttType.label()}) not matching source track #{ttsi} type ({stType.label()})")
|
||||
|
||||
|
||||
if context['import_subtitles']:
|
||||
targetMediaDescriptor.importSubtitles(context['subtitle_directory'],
|
||||
context['subtitle_prefix'],
|
||||
showSeason,
|
||||
showEpisode)
|
||||
|
||||
# ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
||||
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getTrackDescriptors()]}")
|
||||
|
||||
if cliOverrides:
|
||||
targetMediaDescriptor.applyOverrides(cliOverrides)
|
||||
|
||||
# ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getAllTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getAllTrackDescriptors()]}")
|
||||
ctx.obj['logger'].debug(f"tmd subindices: {[t.getIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getSubIndex() for t in targetMediaDescriptor.getTrackDescriptors()]} {[t.getDispositionFlag(TrackDisposition.DEFAULT) for t in targetMediaDescriptor.getTrackDescriptors()]}")
|
||||
|
||||
ctx.obj['logger'].debug(f"Input mapping tokens (2nd pass): {targetMediaDescriptor.getInputMappingTokens()}")
|
||||
|
||||
fc = FfxController(context, targetMediaDescriptor, sourceMediaDescriptor)
|
||||
|
||||
|
||||
indexSeasonDigits = currentShowDescriptor.getIndexSeasonDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDEX_SEASON_DIGITS
|
||||
indexEpisodeDigits = currentShowDescriptor.getIndexEpisodeDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDEX_EPISODE_DIGITS
|
||||
indicatorSeasonDigits = currentShowDescriptor.getIndicatorSeasonDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDICATOR_SEASON_DIGITS
|
||||
indicatorEpisodeDigits = currentShowDescriptor.getIndicatorEpisodeDigits() if not currentPattern is None else ShowDescriptor.DEFAULT_INDICATOR_EPISODE_DIGITS
|
||||
|
||||
|
||||
# Shift season and episode if defined for this show
|
||||
if ('tmdb' not in cliOverrides.keys() and showId != -1
|
||||
and showSeason != -1 and showEpisode != -1):
|
||||
shiftedShowSeason, shiftedShowEpisode = ssc.shiftSeason(showId,
|
||||
season=showSeason,
|
||||
episode=showEpisode)
|
||||
else:
|
||||
shiftedShowSeason = showSeason
|
||||
shiftedShowEpisode = showEpisode
|
||||
|
||||
# Assemble target filename accordingly depending on TMDB lookup is enabled
|
||||
#HINT: -1 if not set
|
||||
showId = cliOverrides['tmdb']['show'] if 'tmdb' in cliOverrides.keys() and 'show' in cliOverrides['tmdb'] else (-1 if currentShowDescriptor is None else currentShowDescriptor.getId())
|
||||
|
||||
if context['use_tmdb'] and showId != -1 and shiftedShowSeason != -1 and shiftedShowEpisode != -1:
|
||||
|
||||
ctx.obj['logger'].debug(f"Querying TMDB for show_id={showId} season={shiftedShowSeason} episode{shiftedShowEpisode}")
|
||||
|
||||
if currentPattern is None:
|
||||
sName, showYear = tc.getShowNameAndYear(showId)
|
||||
showName = filterFilename(sName)
|
||||
showFilenamePrefix = f"{showName} ({str(showYear)})"
|
||||
else:
|
||||
showFilenamePrefix = currentShowDescriptor.getFilenamePrefix()
|
||||
|
||||
tmdbEpisodeResult = tc.queryEpisode(showId, shiftedShowSeason, shiftedShowEpisode)
|
||||
|
||||
ctx.obj['logger'].debug(f"tmdbEpisodeResult={tmdbEpisodeResult}")
|
||||
|
||||
if tmdbEpisodeResult:
|
||||
substitutedEpisodeName = filterFilename(substituteTmdbFilename(tmdbEpisodeResult['name']))
|
||||
sourceFileBasename = getEpisodeFileBasename(showFilenamePrefix,
|
||||
substitutedEpisodeName,
|
||||
shiftedShowSeason,
|
||||
shiftedShowEpisode,
|
||||
indexSeasonDigits,
|
||||
indexEpisodeDigits,
|
||||
indicatorSeasonDigits,
|
||||
indicatorEpisodeDigits,
|
||||
context=ctx.obj)
|
||||
|
||||
if label:
|
||||
if shiftedShowSeason > -1 and shiftedShowEpisode > -1:
|
||||
targetSuffices['se'] = f"S{shiftedShowSeason:0{indicatorSeasonDigits}d}E{shiftedShowEpisode:0{indicatorEpisodeDigits}d}"
|
||||
elif shiftedShowEpisode > -1:
|
||||
targetSuffices['se'] = f"E{shiftedShowEpisode:0{indicatorEpisodeDigits}d}"
|
||||
else:
|
||||
if 'se' in targetSuffices.keys():
|
||||
del targetSuffices['se']
|
||||
|
||||
ctx.obj['logger'].debug(f"fileBasename={sourceFileBasename}")
|
||||
|
||||
|
||||
for chainIteration in chainYield:
|
||||
|
||||
ctx.obj['logger'].debug(f"\nchain iteration: {chainIteration}\n")
|
||||
|
||||
chainVariant = '-'.join([fy['variant'] for fy in chainIteration])
|
||||
|
||||
ctx.obj['logger'].debug(f"\nRunning job {jobIndex} file={sourcePath} variant={chainVariant}")
|
||||
jobIndex += 1
|
||||
|
||||
ctx.obj['logger'].debug(f"label={label if label else 'Falsy'}")
|
||||
ctx.obj['logger'].debug(f"sourceFileBasename={sourceFileBasename}")
|
||||
|
||||
targetFileBasename = sourceFileBasename if context['use_tmdb'] and not label else label
|
||||
|
||||
targetFilenameTokens = [targetFileBasename]
|
||||
|
||||
if 'se' in targetSuffices.keys():
|
||||
targetFilenameTokens += [targetSuffices['se']]
|
||||
|
||||
for filterYield in chainIteration:
|
||||
targetFilenameTokens += filterYield['suffices']
|
||||
|
||||
targetFilename = f"{'_'.join(targetFilenameTokens)}.{sourceFilenameExtension if rename_only else targetExtension}"
|
||||
|
||||
if sourceFilename == targetFilename:
|
||||
targetFilename = f"out_{targetFilename}"
|
||||
|
||||
|
||||
targetPath = os.path.join(output_directory, targetFilename) if output_directory else targetFilename
|
||||
|
||||
ctx.obj['logger'].info(f"Creating file {targetFilename}")
|
||||
|
||||
|
||||
if rename_only:
|
||||
shutil.copyfile(sourcePath, targetPath)
|
||||
else:
|
||||
fc.runJob(sourcePath,
|
||||
targetPath,
|
||||
targetFormat,
|
||||
chainIteration,
|
||||
cropArguments,
|
||||
currentPattern)
|
||||
|
||||
|
||||
|
||||
endTime = time.perf_counter()
|
||||
ctx.obj['logger'].info(f"\nDONE\nTime elapsed {endTime - startTime}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
ffx()
|
||||
@ -1,38 +0,0 @@
|
||||
from textual.app import App
|
||||
|
||||
from .shows_screen import ShowsScreen
|
||||
from .media_details_screen import MediaDetailsScreen
|
||||
|
||||
|
||||
class FfxApp(App):
|
||||
|
||||
TITLE = "FFX"
|
||||
|
||||
BINDINGS = [
|
||||
("q", "quit()", "Quit"),
|
||||
("h", "switch_mode('help')", "Help"),
|
||||
]
|
||||
|
||||
|
||||
def __init__(self, context = {}):
|
||||
super().__init__()
|
||||
|
||||
# Data 'input' variable
|
||||
self.context = context
|
||||
|
||||
|
||||
def on_mount(self) -> None:
|
||||
|
||||
if 'command' in self.context.keys():
|
||||
|
||||
if self.context['command'] == 'shows':
|
||||
self.push_screen(ShowsScreen())
|
||||
|
||||
if self.context['command'] == 'inspect':
|
||||
self.push_screen(MediaDetailsScreen())
|
||||
|
||||
|
||||
def getContext(self):
|
||||
"""Data 'output' method"""
|
||||
return self.context
|
||||
|
||||
@ -1,381 +0,0 @@
|
||||
import os, click
|
||||
|
||||
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
|
||||
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
from ffx.audio_layout import AudioLayout
|
||||
from ffx.track_type import TrackType
|
||||
from ffx.track_codec import TrackCodec
|
||||
from ffx.video_encoder import VideoEncoder
|
||||
from ffx.process import executeProcess
|
||||
|
||||
from ffx.constants import DEFAULT_cut_start, DEFAULT_cut_length
|
||||
|
||||
from ffx.filter.quality_filter import QualityFilter
|
||||
from ffx.filter.preset_filter import PresetFilter
|
||||
from ffx.filter.crop_filter import CropFilter
|
||||
|
||||
from ffx.model.pattern import Pattern
|
||||
|
||||
|
||||
class FfxController():
|
||||
|
||||
COMMAND_TOKENS = ['ffmpeg', '-y']
|
||||
NULL_TOKENS = ['-f', 'null', '/dev/null'] # -f null /dev/null
|
||||
|
||||
TEMP_FILE_NAME = "ffmpeg2pass-0.log"
|
||||
|
||||
DEFAULT_VIDEO_ENCODER = VideoEncoder.VP9.label()
|
||||
|
||||
DEFAULT_FILE_FORMAT = 'webm'
|
||||
DEFAULT_FILE_EXTENSION = 'webm'
|
||||
|
||||
INPUT_FILE_EXTENSIONS = ['mkv', 'mp4', 'avi', 'flv', 'webm']
|
||||
|
||||
CHANNEL_MAP_5_1 = 'FL-FL|FR-FR|FC-FC|LFE-LFE|SL-BL|SR-BR:5.1'
|
||||
|
||||
# SIGNATURE_TAGS = {'RECODED_WITH': 'FFX'}
|
||||
|
||||
def __init__(self,
|
||||
context : dict,
|
||||
targetMediaDescriptor : MediaDescriptor,
|
||||
sourceMediaDescriptor : MediaDescriptor = None):
|
||||
|
||||
self.__context = context
|
||||
|
||||
self.__targetMediaDescriptor = targetMediaDescriptor
|
||||
self.__mdcs = MediaDescriptorChangeSet(context,
|
||||
targetMediaDescriptor,
|
||||
sourceMediaDescriptor)
|
||||
|
||||
self.__logger = context['logger']
|
||||
|
||||
|
||||
def generateAV1Tokens(self, quality, preset, subIndex : int = 0):
|
||||
|
||||
return [f"-c:v:{int(subIndex)}", 'libsvtav1',
|
||||
'-svtav1-params', f"crf={quality}:preset={preset}:tune=0:enable-overlays=1:scd=1:scm=0",
|
||||
'-pix_fmt', 'yuv420p10le']
|
||||
|
||||
|
||||
# -c:v libx264 -preset slow -crf 17
|
||||
def generateH264Tokens(self, quality, subIndex : int = 0):
|
||||
|
||||
return [f"-c:v:{int(subIndex)}", 'libx264',
|
||||
"-preset", "slow",
|
||||
'-crf', str(quality)]
|
||||
|
||||
|
||||
# -c:v:0 libvpx-vp9 -row-mt 1 -crf 32 -pass 1 -speed 4 -frame-parallel 0 -g 9999 -aq-mode 0
|
||||
def generateVP9Pass1Tokens(self, quality, subIndex : int = 0):
|
||||
|
||||
return [f"-c:v:{int(subIndex)}",
|
||||
'libvpx-vp9',
|
||||
'-row-mt', '1',
|
||||
'-crf', str(quality),
|
||||
'-pass', '1',
|
||||
'-speed', '4',
|
||||
'-frame-parallel', '0',
|
||||
'-g', '9999',
|
||||
'-aq-mode', '0']
|
||||
|
||||
# -c:v:0 libvpx-vp9 -row-mt 1 -crf 32 -pass 2 -frame-parallel 0 -g 9999 -aq-mode 0 -auto-alt-ref 1 -lag-in-frames 25
|
||||
def generateVP9Pass2Tokens(self, quality, subIndex : int = 0):
|
||||
|
||||
return [f"-c:v:{int(subIndex)}",
|
||||
'libvpx-vp9',
|
||||
'-row-mt', '1',
|
||||
'-crf', str(quality),
|
||||
'-pass', '2',
|
||||
'-frame-parallel', '0',
|
||||
'-g', '9999',
|
||||
'-aq-mode', '0',
|
||||
'-auto-alt-ref', '1',
|
||||
'-lag-in-frames', '25']
|
||||
|
||||
def generateVideoCopyTokens(self, subIndex):
|
||||
return [f"-c:v:{int(subIndex)}",
|
||||
'copy']
|
||||
|
||||
|
||||
def generateCropTokens(self):
|
||||
|
||||
if 'cut_start' in self.__context.keys() and 'cut_length' in self.__context.keys():
|
||||
cropStart = int(self.__context['cut_start'])
|
||||
cropLength = int(self.__context['cut_length'])
|
||||
else:
|
||||
cropStart = DEFAULT_cut_start
|
||||
cropLength = DEFAULT_cut_length
|
||||
|
||||
return ['-ss', str(cropStart), '-t', str(cropLength)]
|
||||
|
||||
|
||||
def generateOutputTokens(self, filePathBase, format = '', ext = ''):
|
||||
|
||||
self.__logger.debug(f"FfxController.generateOutputTokens(): base='{filePathBase}' format='{format}' ext='{ext}'")
|
||||
|
||||
outputFilePath = f"{filePathBase}{('.'+str(ext)) if ext else ''}"
|
||||
if format:
|
||||
return ['-f', format, outputFilePath]
|
||||
else:
|
||||
return [outputFilePath]
|
||||
|
||||
|
||||
def generateAudioEncodingTokens(self):
|
||||
"""Generates ffmpeg options audio streams including channel remapping, codec and bitrate"""
|
||||
|
||||
audioTokens = []
|
||||
|
||||
# targetAudioTrackDescriptors = [td for td in self.__targetMediaDescriptor.getAllTrackDescriptors() if td.getType() == TrackType.AUDIO]
|
||||
targetAudioTrackDescriptors = self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.AUDIO)
|
||||
|
||||
trackSubIndex = 0
|
||||
for trackDescriptor in targetAudioTrackDescriptors:
|
||||
|
||||
trackAudioLayout = trackDescriptor.getAudioLayout()
|
||||
|
||||
if trackAudioLayout == AudioLayout.LAYOUT_6_1:
|
||||
audioTokens += [f"-c:a:{trackSubIndex}",
|
||||
'libopus',
|
||||
f"-filter:a:{trackSubIndex}",
|
||||
'channelmap=channel_layout=6.1',
|
||||
f"-b:a:{trackSubIndex}",
|
||||
self.__context['bitrates']['dts']]
|
||||
|
||||
if trackAudioLayout == AudioLayout.LAYOUT_5_1:
|
||||
audioTokens += [f"-c:a:{trackSubIndex}",
|
||||
'libopus',
|
||||
f"-filter:a:{trackSubIndex}",
|
||||
f"channelmap={FfxController.CHANNEL_MAP_5_1}",
|
||||
f"-b:a:{trackSubIndex}",
|
||||
self.__context['bitrates']['ac3']]
|
||||
|
||||
if trackAudioLayout == AudioLayout.LAYOUT_STEREO:
|
||||
audioTokens += [f"-c:a:{trackSubIndex}",
|
||||
'libopus',
|
||||
f"-b:a:{trackSubIndex}",
|
||||
self.__context['bitrates']['stereo']]
|
||||
|
||||
if trackAudioLayout == AudioLayout.LAYOUT_6CH:
|
||||
audioTokens += [f"-c:a:{trackSubIndex}",
|
||||
'libopus',
|
||||
f"-filter:a:{trackSubIndex}",
|
||||
f"channelmap={FfxController.CHANNEL_MAP_5_1}",
|
||||
f"-b:a:{trackSubIndex}",
|
||||
self.__context['bitrates']['ac3']]
|
||||
|
||||
# -ac 5 ?
|
||||
if trackAudioLayout == AudioLayout.LAYOUT_5_0:
|
||||
audioTokens += [f"-c:a:{trackSubIndex}",
|
||||
'libopus',
|
||||
f"-filter:a:{trackSubIndex}",
|
||||
'channelmap=channel_layout=5.0',
|
||||
f"-b:a:{trackSubIndex}",
|
||||
self.__context['bitrates']['ac3']]
|
||||
|
||||
trackSubIndex += 1
|
||||
return audioTokens
|
||||
|
||||
|
||||
def runJob(self,
|
||||
sourcePath,
|
||||
targetPath,
|
||||
targetFormat: str = '',
|
||||
chainIteration: list = [],
|
||||
cropArguments: dict = {},
|
||||
currentPattern: Pattern = None):
|
||||
# quality: int = DEFAULT_QUALITY,
|
||||
# preset: int = DEFAULT_AV1_PRESET):
|
||||
|
||||
|
||||
videoEncoder: VideoEncoder = self.__context.get('video_encoder', VideoEncoder.VP9)
|
||||
|
||||
|
||||
qualityFilters = [fy for fy in chainIteration if fy['identifier'] == 'quality']
|
||||
presetFilters = [fy for fy in chainIteration if fy['identifier'] == 'preset']
|
||||
|
||||
cropFilters = [fy for fy in chainIteration if fy['identifier'] == 'crop']
|
||||
denoiseFilters = [fy for fy in chainIteration if fy['identifier'] == 'nlmeans']
|
||||
deinterlaceFilters = [fy for fy in chainIteration if fy['identifier'] == 'bwdif']
|
||||
|
||||
|
||||
if qualityFilters and (quality := qualityFilters[0]['parameters']['quality']):
|
||||
self.__logger.info(f"Setting quality {quality} from filter")
|
||||
elif (quality := currentPattern.quality):
|
||||
self.__logger.info(f"Setting quality {quality} from pattern default")
|
||||
else:
|
||||
quality = (QualityFilter.DEFAULT_H264_QUALITY
|
||||
if (videoEncoder == VideoEncoder.H264)
|
||||
else QualityFilter.DEFAULT_VP9_QUALITY)
|
||||
self.__logger.info(f"Setting quality {quality} from default")
|
||||
|
||||
|
||||
preset = presetFilters[0]['parameters']['preset'] if presetFilters else PresetFilter.DEFAULT_PRESET
|
||||
|
||||
|
||||
filterParamTokens = []
|
||||
|
||||
if cropArguments:
|
||||
|
||||
cropParams = (f"crop="
|
||||
+ f"{cropArguments[CropFilter.OUTPUT_WIDTH_KEY]}"
|
||||
+ f":{cropArguments[CropFilter.OUTPUT_HEIGHT_KEY]}"
|
||||
+ f":{cropArguments[CropFilter.OFFSET_X_KEY]}"
|
||||
+ f":{cropArguments[CropFilter.OFFSET_Y_KEY]}")
|
||||
|
||||
filterParamTokens.append(cropParams)
|
||||
|
||||
filterParamTokens.extend(denoiseFilters[0]['tokens'] if denoiseFilters else [])
|
||||
filterParamTokens.extend(deinterlaceFilters[0]['tokens'] if deinterlaceFilters else [])
|
||||
|
||||
deinterlaceFilters
|
||||
|
||||
filterTokens = ['-vf', ', '.join(filterParamTokens)] if filterParamTokens else []
|
||||
|
||||
|
||||
commandTokens = FfxController.COMMAND_TOKENS + ['-i', sourcePath]
|
||||
|
||||
if videoEncoder == VideoEncoder.AV1:
|
||||
|
||||
commandSequence = (commandTokens
|
||||
+ self.__targetMediaDescriptor.getImportFileTokens()
|
||||
+ self.__targetMediaDescriptor.getInputMappingTokens()
|
||||
+ self.__mdcs.generateDispositionTokens())
|
||||
|
||||
# Optional tokens
|
||||
commandSequence += self.__mdcs.generateMetadataTokens()
|
||||
commandSequence += filterTokens
|
||||
|
||||
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
|
||||
#HINT: Attached thumbnails are not supported by .webm container format
|
||||
if td.getCodec != TrackCodec.PNG:
|
||||
commandSequence += self.generateAV1Tokens(int(quality), int(preset))
|
||||
|
||||
commandSequence += self.generateAudioEncodingTokens()
|
||||
|
||||
if self.__context['perform_cut']:
|
||||
commandSequence += self.generateCropTokens()
|
||||
|
||||
commandSequence += self.generateOutputTokens(targetPath,
|
||||
targetFormat)
|
||||
|
||||
self.__logger.debug(f"FfxController.runJob(): Running command sequence")
|
||||
|
||||
if not self.__context['dry_run']:
|
||||
executeProcess(commandSequence, context = self.__context)
|
||||
|
||||
|
||||
if videoEncoder == VideoEncoder.H264:
|
||||
|
||||
commandSequence = (commandTokens
|
||||
+ self.__targetMediaDescriptor.getImportFileTokens()
|
||||
+ self.__targetMediaDescriptor.getInputMappingTokens()
|
||||
+ self.__mdcs.generateDispositionTokens())
|
||||
|
||||
# Optional tokens
|
||||
commandSequence += self.__mdcs.generateMetadataTokens()
|
||||
commandSequence += filterTokens
|
||||
|
||||
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
|
||||
#HINT: Attached thumbnails are not supported by .webm container format
|
||||
if td.getCodec != TrackCodec.PNG:
|
||||
commandSequence += self.generateH264Tokens(int(quality))
|
||||
|
||||
commandSequence += self.generateAudioEncodingTokens()
|
||||
|
||||
if self.__context['perform_cut']:
|
||||
commandSequence += self.generateCropTokens()
|
||||
|
||||
commandSequence += self.generateOutputTokens(targetPath,
|
||||
targetFormat)
|
||||
|
||||
self.__logger.debug(f"FfxController.runJob(): Running command sequence")
|
||||
|
||||
if not self.__context['dry_run']:
|
||||
executeProcess(commandSequence, context = self.__context)
|
||||
|
||||
|
||||
|
||||
if videoEncoder == VideoEncoder.VP9:
|
||||
|
||||
commandSequence1 = (commandTokens
|
||||
+ self.__targetMediaDescriptor.getInputMappingTokens(only_video=True))
|
||||
|
||||
# Optional tokens
|
||||
#NOTE: Filters and so needs to run on the first pass as well, as here
|
||||
# the required bitrate for the second run is determined and recorded
|
||||
# TODO: Results seems to be slightly better with first pass omitted,
|
||||
# Confirm or find better filter settings for 2-pass
|
||||
# commandSequence1 += self.__context['denoiser'].generatefilterTokens()
|
||||
|
||||
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
|
||||
#HINT: Attached thumbnails are not supported by .webm container format
|
||||
if td.getCodec != TrackCodec.PNG:
|
||||
commandSequence1 += self.generateVP9Pass1Tokens(int(quality))
|
||||
|
||||
if self.__context['perform_cut']:
|
||||
commandSequence1 += self.generateCropTokens()
|
||||
|
||||
commandSequence1 += FfxController.NULL_TOKENS
|
||||
|
||||
if os.path.exists(FfxController.TEMP_FILE_NAME):
|
||||
os.remove(FfxController.TEMP_FILE_NAME)
|
||||
|
||||
self.__logger.debug(f"FfxController.runJob(): Running command sequence 1")
|
||||
|
||||
if not self.__context['dry_run']:
|
||||
executeProcess(commandSequence1, context = self.__context)
|
||||
|
||||
commandSequence2 = (commandTokens
|
||||
+ self.__targetMediaDescriptor.getImportFileTokens()
|
||||
+ self.__targetMediaDescriptor.getInputMappingTokens()
|
||||
+ self.__mdcs.generateDispositionTokens())
|
||||
|
||||
# Optional tokens
|
||||
commandSequence2 += self.__mdcs.generateMetadataTokens()
|
||||
commandSequence2 += filterTokens
|
||||
|
||||
for td in self.__targetMediaDescriptor.getTrackDescriptors(trackType=TrackType.VIDEO):
|
||||
#HINT: Attached thumbnails are not supported by .webm container format
|
||||
if td.getCodec != TrackCodec.PNG:
|
||||
commandSequence2 += self.generateVP9Pass2Tokens(int(quality))
|
||||
|
||||
commandSequence2 += self.generateAudioEncodingTokens()
|
||||
|
||||
if self.__context['perform_cut']:
|
||||
commandSequence2 += self.generateCropTokens()
|
||||
|
||||
commandSequence2 += self.generateOutputTokens(targetPath,
|
||||
targetFormat)
|
||||
|
||||
self.__logger.debug(f"FfxController.runJob(): Running command sequence 2")
|
||||
|
||||
if not self.__context['dry_run']:
|
||||
out, err, rc = executeProcess(commandSequence2, context = self.__context)
|
||||
if rc:
|
||||
raise click.ClickException(f"Command resulted in error: rc={rc} error={err}")
|
||||
|
||||
|
||||
|
||||
def createEmptyFile(self,
|
||||
path: str = 'empty.mkv',
|
||||
sizeX: int = 1280,
|
||||
sizeY: int = 720,
|
||||
rate: int = 25,
|
||||
length: int = 10):
|
||||
|
||||
commandTokens = FfxController.COMMAND_TOKENS
|
||||
|
||||
commandTokens += ['-f',
|
||||
'lavfi',
|
||||
'-i',
|
||||
f"color=size={sizeX}x{sizeY}:rate={rate}:color=black",
|
||||
'-f',
|
||||
'lavfi',
|
||||
'-i',
|
||||
'anullsrc=channel_layout=stereo:sample_rate=44100',
|
||||
'-t',
|
||||
str(length),
|
||||
path]
|
||||
|
||||
out, err, rc = executeProcess(commandTokens, context = self.__context)
|
||||
@ -1,124 +0,0 @@
|
||||
#! /usr/bin/python3
|
||||
|
||||
import os, logging, click
|
||||
|
||||
from ffx.configuration_controller import ConfigurationController
|
||||
|
||||
from ffx.file_properties import FileProperties
|
||||
from ffx.ffx_controller import FfxController
|
||||
|
||||
from ffx.test.helper import createMediaTestFile
|
||||
|
||||
from ffx.test.scenario import Scenario
|
||||
from ffx.tmdb_controller import TMDB_API_KEY_NOT_PRESENT_EXCEPTION
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.pass_context
|
||||
@click.option('-v', '--verbose', type=int, default=0, help='Set verbosity of output')
|
||||
@click.option("--dry-run", is_flag=True, default=False)
|
||||
def ffx(ctx, verbose, dry_run):
|
||||
"""FFX"""
|
||||
|
||||
ctx.obj = {}
|
||||
|
||||
ctx.obj['config'] = ConfigurationController()
|
||||
|
||||
ctx.obj['database'] = None
|
||||
ctx.obj['dry_run'] = dry_run
|
||||
|
||||
ctx.obj['verbosity'] = verbose
|
||||
|
||||
# Critical 50
|
||||
# Error 40
|
||||
# Warning 30
|
||||
# Info 20
|
||||
# Debug 10
|
||||
fileLogVerbosity = max(40 - verbose * 10, 10)
|
||||
consoleLogVerbosity = max(20 - verbose * 10, 10)
|
||||
|
||||
ctx.obj['logger'] = logging.getLogger('FFX Tests')
|
||||
ctx.obj['logger'].setLevel(logging.DEBUG)
|
||||
|
||||
ctx.obj['report_logger'] = logging.getLogger('FFX Test Result')
|
||||
ctx.obj['report_logger'].setLevel(logging.INFO)
|
||||
|
||||
ffxFileHandler = logging.FileHandler(ctx.obj['config'].getLogFilePath())
|
||||
ffxFileHandler.setLevel(fileLogVerbosity)
|
||||
ffxConsoleHandler = logging.StreamHandler()
|
||||
ffxConsoleHandler.setLevel(consoleLogVerbosity)
|
||||
|
||||
if os.path.isfile('ffx_test_report.log'):
|
||||
os.unlink('ffx_test_report.log')
|
||||
ffxTestReportFileHandler = logging.FileHandler('ffx_test_report.log')
|
||||
|
||||
fileFormatter = logging.Formatter(
|
||||
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
ffxFileHandler.setFormatter(fileFormatter)
|
||||
consoleFormatter = logging.Formatter(
|
||||
'%(message)s')
|
||||
ffxConsoleHandler.setFormatter(consoleFormatter)
|
||||
reportFormatter = logging.Formatter(
|
||||
'%(message)s')
|
||||
ffxTestReportFileHandler.setFormatter(reportFormatter)
|
||||
|
||||
ctx.obj['logger'].addHandler(ffxConsoleHandler)
|
||||
ctx.obj['logger'].addHandler(ffxFileHandler)
|
||||
|
||||
ctx.obj['report_logger'].addHandler(ffxConsoleHandler)
|
||||
ctx.obj['report_logger'].addHandler(ffxTestReportFileHandler)
|
||||
|
||||
|
||||
# Another subcommand
|
||||
@ffx.command()
|
||||
@click.pass_context
|
||||
@click.option('--scenario', type=str, default='', help='Only run tests from this scenario')
|
||||
@click.option('--variant', type=str, default='', help='Only run variants beginning like this')
|
||||
@click.option('--limit', type=int, default=0, help='Only run this number of tests')
|
||||
def run(ctx, scenario, variant, limit):
|
||||
"""Run ffx test sequences"""
|
||||
|
||||
ctx.obj['logger'].info('Starting FFX test runs')
|
||||
ctx.obj['test_passed_counter'] = 0
|
||||
ctx.obj['test_failed_counter'] = 0
|
||||
|
||||
ctx.obj['test_variant'] = variant
|
||||
ctx.obj['test_limit'] = limit
|
||||
|
||||
for si in Scenario.list():
|
||||
|
||||
try:
|
||||
SCEN = Scenario.getClassReference(si)
|
||||
scen = SCEN(ctx.obj)
|
||||
|
||||
if scenario and scenario != scen.getScenario():
|
||||
continue
|
||||
|
||||
ctx.obj['logger'].debug(f"Running scenario {si}")
|
||||
|
||||
scen.run()
|
||||
|
||||
except TMDB_API_KEY_NOT_PRESENT_EXCEPTION:
|
||||
ctx.obj['logger'].info(f"TMDB_API_KEY not set: Skipping {SCEN.__class__.__name__}")
|
||||
|
||||
ctx.obj['logger'].info(f"\n{ctx.obj['test_passed_counter']} tests passed")
|
||||
ctx.obj['logger'].info(f"{ctx.obj['test_failed_counter']} test failed")
|
||||
ctx.obj['logger'].info('\nDone.')
|
||||
|
||||
|
||||
@ffx.command()
|
||||
@click.pass_context
|
||||
@click.argument('paths', nargs=-1)
|
||||
def dupe(ctx, paths):
|
||||
|
||||
existingSourcePaths = [p for p in paths if os.path.isfile(p) and p.split('.')[-1] in FfxController.INPUT_FILE_EXTENSIONS]
|
||||
|
||||
for sourcePath in existingSourcePaths:
|
||||
|
||||
sourceFileProperties = FileProperties(ctx.obj, sourcePath)
|
||||
sourceMediaDescriptor = sourceFileProperties.getMediaDescriptor()
|
||||
|
||||
createMediaTestFile(sourceMediaDescriptor, baseName='dupe')
|
||||
|
||||
if __name__ == '__main__':
|
||||
ffx()
|
||||
@ -1,248 +0,0 @@
|
||||
import os, re, json
|
||||
|
||||
from .media_descriptor import MediaDescriptor
|
||||
from .pattern_controller import PatternController
|
||||
|
||||
from ffx.filter.crop_filter import CropFilter
|
||||
|
||||
from .process import executeProcess
|
||||
|
||||
from ffx.model.pattern import Pattern
|
||||
|
||||
|
||||
class FileProperties():
|
||||
|
||||
FILE_EXTENSIONS = ['mkv', 'mp4', 'avi', 'flv', 'webm']
|
||||
|
||||
SE_INDICATOR_PATTERN = '([sS][0-9]+[eE][0-9]+)'
|
||||
SEASON_EPISODE_INDICATOR_MATCH = '[sS]([0-9]+)[eE]([0-9]+)'
|
||||
EPISODE_INDICATOR_MATCH = '[eE]([0-9]+)'
|
||||
|
||||
CROPDETECT_PATTERN = 'crop=[0-9]+:[0-9]+:[0-9]+:[0-9]+$'
|
||||
|
||||
DEFAULT_INDEX_DIGITS = 3
|
||||
|
||||
def __init__(self, context, sourcePath):
|
||||
|
||||
self.context = context
|
||||
|
||||
self.__logger = context['logger']
|
||||
|
||||
# Separate basedir, basename and extension for current source file
|
||||
self.__sourcePath = sourcePath
|
||||
|
||||
self.__sourceDirectory = os.path.dirname(self.__sourcePath)
|
||||
self.__sourceFilename = os.path.basename(self.__sourcePath)
|
||||
|
||||
sourcePathTokens = self.__sourceFilename.split('.')
|
||||
|
||||
if sourcePathTokens[-1] in FileProperties.FILE_EXTENSIONS:
|
||||
self.__sourceFileBasename = '.'.join(sourcePathTokens[:-1])
|
||||
self.__sourceFilenameExtension = sourcePathTokens[-1]
|
||||
else:
|
||||
self.__sourceFileBasename = self.__sourceFilename
|
||||
self.__sourceFilenameExtension = ''
|
||||
|
||||
self.__pc = PatternController(context)
|
||||
|
||||
# Checking if database contains matching pattern
|
||||
matchResult = self.__pc.matchFilename(self.__sourceFilename)
|
||||
|
||||
self.__logger.debug(f"FileProperties.__init__(): Match result: {matchResult}")
|
||||
|
||||
self.__pattern: Pattern = matchResult['pattern'] if matchResult else None
|
||||
|
||||
if matchResult:
|
||||
databaseMatchedGroups = matchResult['match'].groups()
|
||||
self.__logger.debug(f"FileProperties.__init__(): Matched groups: {databaseMatchedGroups}")
|
||||
|
||||
seIndicator = databaseMatchedGroups[0]
|
||||
|
||||
se_match = re.search(FileProperties.SEASON_EPISODE_INDICATOR_MATCH, seIndicator)
|
||||
e_match = re.search(FileProperties.EPISODE_INDICATOR_MATCH, seIndicator)
|
||||
|
||||
else:
|
||||
self.__logger.debug(f"FileProperties.__init__(): Checking file name for indicator {self.__sourceFilename}")
|
||||
|
||||
se_match = re.search(FileProperties.SEASON_EPISODE_INDICATOR_MATCH, self.__sourceFilename)
|
||||
e_match = re.search(FileProperties.EPISODE_INDICATOR_MATCH, self.__sourceFilename)
|
||||
|
||||
if se_match is not None:
|
||||
self.__season = int(se_match.group(1))
|
||||
self.__episode = int(se_match.group(2))
|
||||
elif e_match is not None:
|
||||
self.__season = -1
|
||||
self.__episode = int(e_match.group(1))
|
||||
else:
|
||||
self.__season = -1
|
||||
self.__episode = -1
|
||||
|
||||
|
||||
def getFormatData(self):
|
||||
"""
|
||||
"format": {
|
||||
"filename": "Downloads/nagatoro_s02/nagatoro_s01e02.mkv",
|
||||
"nb_streams": 18,
|
||||
"nb_programs": 0,
|
||||
"nb_stream_groups": 0,
|
||||
"format_name": "matroska,webm",
|
||||
"format_long_name": "Matroska / WebM",
|
||||
"start_time": "0.000000",
|
||||
"duration": "1420.063000",
|
||||
"size": "1489169824",
|
||||
"bit_rate": "8389316",
|
||||
"probe_score": 100,
|
||||
"tags": {
|
||||
"PUBLISHER": "Crunchyroll",
|
||||
"ENCODER": "Lavf58.29.100"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
# ffprobe -hide_banner -show_format -of json
|
||||
ffprobeOutput, ffprobeError, returnCode = executeProcess(["ffprobe",
|
||||
"-hide_banner",
|
||||
"-show_format",
|
||||
"-of", "json",
|
||||
self.__sourcePath]) #,
|
||||
#context = self.context)
|
||||
|
||||
if 'Invalid data found when processing input' in ffprobeError:
|
||||
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
|
||||
|
||||
if returnCode != 0:
|
||||
raise Exception(f"ffprobe returned with error {returnCode}")
|
||||
|
||||
return json.loads(ffprobeOutput)['format']
|
||||
|
||||
|
||||
def getStreamData(self):
|
||||
"""Returns ffprobe stream data as array with elements according to the following example
|
||||
{
|
||||
"index": 4,
|
||||
"codec_name": "hdmv_pgs_subtitle",
|
||||
"codec_long_name": "HDMV Presentation Graphic Stream subtitles",
|
||||
"codec_type": "subtitle",
|
||||
"codec_tag_string": "[0][0][0][0]",
|
||||
"codec_tag": "0x0000",
|
||||
"r_frame_rate": "0/0",
|
||||
"avg_frame_rate": "0/0",
|
||||
"time_base": "1/1000",
|
||||
"start_pts": 0,
|
||||
"start_time": "0.000000",
|
||||
"duration_ts": 1421035,
|
||||
"duration": "1421.035000",
|
||||
"disposition": {
|
||||
"default": 1,
|
||||
"dub": 0,
|
||||
"original": 0,
|
||||
"comment": 0,
|
||||
"lyrics": 0,
|
||||
"karaoke": 0,
|
||||
"forced": 0,
|
||||
"hearing_impaired": 0,
|
||||
"visual_impaired": 0,
|
||||
"clean_effects": 0,
|
||||
"attached_pic": 0,
|
||||
"timed_thumbnails": 0,
|
||||
"non_diegetic": 0,
|
||||
"captions": 0,
|
||||
"descriptions": 0,
|
||||
"metadata": 0,
|
||||
"dependent": 0,
|
||||
"still_image": 0
|
||||
},
|
||||
"tags": {
|
||||
"language": "ger",
|
||||
"title": "German Full"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
# ffprobe -hide_banner -show_streams -of json
|
||||
ffprobeOutput, ffprobeError, returnCode = executeProcess(["ffprobe",
|
||||
"-hide_banner",
|
||||
"-show_streams",
|
||||
"-of", "json",
|
||||
self.__sourcePath]) #,
|
||||
#context = self.context)
|
||||
|
||||
if 'Invalid data found when processing input' in ffprobeError:
|
||||
raise Exception(f"File {self.__sourcePath} does not contain valid stream data")
|
||||
|
||||
|
||||
if returnCode != 0:
|
||||
raise Exception(f"ffprobe returned with error {returnCode}")
|
||||
|
||||
|
||||
return json.loads(ffprobeOutput)['streams']
|
||||
|
||||
|
||||
|
||||
def findCropArguments(self):
|
||||
""""""
|
||||
|
||||
# ffmpeg -i <input.file> -vf cropdetect -f null -
|
||||
ffprobeOutput, ffprobeError, returnCode = executeProcess(["ffmpeg", "-i",
|
||||
self.__sourcePath,
|
||||
"-vf", "cropdetect",
|
||||
"-ss", "60",
|
||||
"-t", "180",
|
||||
"-f", "null", "-"
|
||||
])
|
||||
|
||||
errorLines = ffprobeError.split('\n')
|
||||
|
||||
crops = {}
|
||||
for el in errorLines:
|
||||
|
||||
cropdetect_match = re.search(FileProperties.CROPDETECT_PATTERN, el)
|
||||
|
||||
if cropdetect_match is not None:
|
||||
cropParam = str(cropdetect_match.group(0))
|
||||
|
||||
crops[cropParam] = crops.get(cropParam, 0) + 1
|
||||
|
||||
if crops:
|
||||
cropHistogram = sorted(crops, reverse=True)
|
||||
cropString = cropHistogram[0]
|
||||
|
||||
cropTokens = cropString.split('=')
|
||||
cropValueTokens = cropTokens[1]
|
||||
cropValues = cropValueTokens.split(':')
|
||||
|
||||
return {
|
||||
CropFilter.OUTPUT_WIDTH_KEY: cropValues[0],
|
||||
CropFilter.OUTPUT_HEIGHT_KEY: cropValues[1],
|
||||
CropFilter.OFFSET_X_KEY: cropValues[2],
|
||||
CropFilter.OFFSET_Y_KEY: cropValues[3]
|
||||
}
|
||||
else:
|
||||
return {}
|
||||
|
||||
|
||||
def getMediaDescriptor(self):
|
||||
return MediaDescriptor.fromFfprobe(self.context, self.getFormatData(), self.getStreamData())
|
||||
|
||||
|
||||
def getShowId(self) -> int:
|
||||
"""Result is -1 if the filename did not match anything in database"""
|
||||
return self.__pattern.getShowId() if self.__pattern is not None else -1
|
||||
|
||||
def getPattern(self) -> Pattern:
|
||||
"""Result is None if the filename did not match anything in database"""
|
||||
return self.__pattern
|
||||
|
||||
|
||||
def getSeason(self) -> int:
|
||||
return int(self.__season)
|
||||
|
||||
def getEpisode(self) -> int:
|
||||
return int(self.__episode)
|
||||
|
||||
|
||||
def getFilename(self):
|
||||
return self.__sourceFilename
|
||||
|
||||
def getFileBasename(self):
|
||||
return self.__sourceFileBasename
|
||||
@ -1,51 +0,0 @@
|
||||
import itertools
|
||||
|
||||
from .filter import Filter
|
||||
|
||||
|
||||
class CropFilter(Filter):
|
||||
|
||||
IDENTIFIER = 'crop'
|
||||
|
||||
OUTPUT_WIDTH_KEY = 'output_width'
|
||||
OUTPUT_HEIGHT_KEY = 'output_height'
|
||||
OFFSET_X_KEY = 'x_offset'
|
||||
OFFSET_Y_KEY = 'y_offset'
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
self.__outputWidth = int(kwargs.get(CropFilter.OUTPUT_WIDTH_KEY, 0))
|
||||
self.__outputHeight = int(kwargs.get(CropFilter.OUTPUT_HEIGHT_KEY, 0))
|
||||
self.__offsetX = int(kwargs.get(CropFilter.OFFSET_X_KEY, 0))
|
||||
self.__offsetY = int(kwargs.get(CropFilter.OFFSET_Y_KEY, 0))
|
||||
|
||||
super().__init__(self)
|
||||
|
||||
def setArguments(self, **kwargs):
|
||||
self.__outputWidth = int(kwargs.get(CropFilter.OUTPUT_WIDTH_KEY))
|
||||
self.__outputHeight = int(kwargs.get(CropFilter.OUTPUT_HEIGHT_KEY))
|
||||
self.__offsetX = int(kwargs.get(CropFilter.OFFSET_X_KEY,))
|
||||
self.__offsetY = int(kwargs.get(CropFilter.OFFSET_Y_KEY,))
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
payload = {'identifier': CropFilter.IDENTIFIER,
|
||||
'parameters': {
|
||||
CropFilter.OUTPUT_WIDTH_KEY: self.__outputWidth,
|
||||
CropFilter.OUTPUT_HEIGHT_KEY: self.__outputHeight,
|
||||
CropFilter.OFFSET_X_KEY: self.__offsetX,
|
||||
CropFilter.OFFSET_Y_KEY: self.__offsetY
|
||||
},
|
||||
'suffices': [],
|
||||
'variant': f"C{self.__outputWidth}-{self.__outputHeight}-{self.__offsetX}-{self.__offsetY}",
|
||||
'tokens': ['crop='
|
||||
+ f"{self.__outputWidth}"
|
||||
+ f":{self.__outputHeight}"
|
||||
+ f":{self.__offsetX}"
|
||||
+ f":{self.__offsetY}"]}
|
||||
|
||||
return payload
|
||||
|
||||
|
||||
def getYield(self):
|
||||
yield self.getPayload()
|
||||
@ -1,140 +0,0 @@
|
||||
import itertools
|
||||
|
||||
from .filter import Filter
|
||||
|
||||
|
||||
class DeinterlaceFilter(Filter):
|
||||
|
||||
IDENTIFIER = 'bwdif'
|
||||
|
||||
# DEFAULT_STRENGTH: float = 2.8
|
||||
# DEFAULT_PATCH_SIZE: int = 13
|
||||
# DEFAULT_CHROMA_PATCH_SIZE: int = 9
|
||||
# DEFAULT_RESEARCH_WINDOW: int = 23
|
||||
# DEFAULT_CHROMA_RESEARCH_WINDOW: int= 17
|
||||
|
||||
# STRENGTH_KEY = 'strength'
|
||||
# PATCH_SIZE_KEY = 'patch_size'
|
||||
# CHROMA_PATCH_SIZE_KEY = 'chroma_patch_size'
|
||||
# RESEARCH_WINDOW_KEY = 'research_window'
|
||||
# CHROMA_RESEARCH_WINDOW_KEY = 'chroma_research_window'
|
||||
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
# self.__useHardware = kwargs.get('use_hardware', False)
|
||||
|
||||
# self.__strengthList = []
|
||||
# strength = kwargs.get(NlmeansFilter.STRENGTH_KEY, '')
|
||||
# if strength:
|
||||
# strengthTokens = strength.split(',')
|
||||
# for st in strengthTokens:
|
||||
# try:
|
||||
# strengthValue = float(st)
|
||||
# except:
|
||||
# raise ValueError('NlmeansFilter: Strength value has to be of type float')
|
||||
# if strengthValue < 1.0 or strengthValue > 30.0:
|
||||
# raise ValueError('NlmeansFilter: Strength value has to be between 1.0 and 30.0')
|
||||
# self.__strengthList.append(strengthValue)
|
||||
# else:
|
||||
# self.__strengthList = [NlmeansFilter.DEFAULT_STRENGTH]
|
||||
|
||||
# self.__patchSizeList = []
|
||||
# patchSize = kwargs.get(NlmeansFilter.PATCH_SIZE_KEY, '')
|
||||
# if patchSize:
|
||||
# patchSizeTokens = patchSize.split(',')
|
||||
# for pst in patchSizeTokens:
|
||||
# try:
|
||||
# patchSizeValue = int(pst)
|
||||
# except:
|
||||
# raise ValueError('NlmeansFilter: Patch size value has to be of type int')
|
||||
# if patchSizeValue < 0 or patchSizeValue > 99:
|
||||
# raise ValueError('NlmeansFilter: Patch size value has to be between 0 and 99')
|
||||
# if patchSizeValue % 2 == 0:
|
||||
# raise ValueError('NlmeansFilter: Patch size value has to an odd number')
|
||||
# self.__patchSizeList.append(patchSizeValue)
|
||||
# else:
|
||||
# self.__patchSizeList = [NlmeansFilter.DEFAULT_PATCH_SIZE]
|
||||
|
||||
# self.__chromaPatchSizeList = []
|
||||
# chromaPatchSize = kwargs.get(NlmeansFilter.CHROMA_PATCH_SIZE_KEY, '')
|
||||
# if chromaPatchSize:
|
||||
# chromaPatchSizeTokens = chromaPatchSize.split(',')
|
||||
# for cpst in chromaPatchSizeTokens:
|
||||
# try:
|
||||
# chromaPatchSizeValue = int(pst)
|
||||
# except:
|
||||
# raise ValueError('NlmeansFilter: Chroma patch size value has to be of type int')
|
||||
# if chromaPatchSizeValue < 0 or chromaPatchSizeValue > 99:
|
||||
# raise ValueError('NlmeansFilter: Chroma patch value has to be between 0 and 99')
|
||||
# if chromaPatchSizeValue % 2 == 0:
|
||||
# raise ValueError('NlmeansFilter: Chroma patch value has to an odd number')
|
||||
# self.__chromaPatchSizeList.append(chromaPatchSizeValue)
|
||||
# else:
|
||||
# self.__chromaPatchSizeList = [NlmeansFilter.DEFAULT_CHROMA_PATCH_SIZE]
|
||||
|
||||
# self.__researchWindowList = []
|
||||
# researchWindow = kwargs.get(NlmeansFilter.RESEARCH_WINDOW_KEY, '')
|
||||
# if researchWindow:
|
||||
# researchWindowTokens = researchWindow.split(',')
|
||||
# for rwt in researchWindowTokens:
|
||||
# try:
|
||||
# researchWindowValue = int(rwt)
|
||||
# except:
|
||||
# raise ValueError('NlmeansFilter: Research window value has to be of type int')
|
||||
# if researchWindowValue < 0 or researchWindowValue > 99:
|
||||
# raise ValueError('NlmeansFilter: Research window value has to be between 0 and 99')
|
||||
# if researchWindowValue % 2 == 0:
|
||||
# raise ValueError('NlmeansFilter: Research window value has to an odd number')
|
||||
# self.__researchWindowList.append(researchWindowValue)
|
||||
# else:
|
||||
# self.__researchWindowList = [NlmeansFilter.DEFAULT_RESEARCH_WINDOW]
|
||||
|
||||
# self.__chromaResearchWindowList = []
|
||||
# chromaResearchWindow = kwargs.get(NlmeansFilter.CHROMA_RESEARCH_WINDOW_KEY, '')
|
||||
# if chromaResearchWindow:
|
||||
# chromaResearchWindowTokens = chromaResearchWindow.split(',')
|
||||
# for crwt in chromaResearchWindowTokens:
|
||||
# try:
|
||||
# chromaResearchWindowValue = int(crwt)
|
||||
# except:
|
||||
# raise ValueError('NlmeansFilter: Chroma research window value has to be of type int')
|
||||
# if chromaResearchWindowValue < 0 or chromaResearchWindowValue > 99:
|
||||
# raise ValueError('NlmeansFilter: Chroma research window value has to be between 0 and 99')
|
||||
# if chromaResearchWindowValue % 2 == 0:
|
||||
# raise ValueError('NlmeansFilter: Chroma research window value has to an odd number')
|
||||
# self.__chromaResearchWindowList.append(chromaResearchWindowValue)
|
||||
# else:
|
||||
# self.__chromaResearchWindowList = [NlmeansFilter.DEFAULT_CHROMA_RESEARCH_WINDOW]
|
||||
|
||||
super().__init__(self)
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
# strength = iteration[0]
|
||||
# patchSize = iteration[1]
|
||||
# chromaPatchSize = iteration[2]
|
||||
# researchWindow = iteration[3]
|
||||
# chromaResearchWindow = iteration[4]
|
||||
|
||||
suffices = []
|
||||
|
||||
# filterName = 'nlmeans_opencl' if self.__useHardware else 'nlmeans'
|
||||
|
||||
payload = {'identifier': DeinterlaceFilter.IDENTIFIER,
|
||||
'parameters': {},
|
||||
'suffices': suffices,
|
||||
'variant': f"DEINT",
|
||||
'tokens': ['bwdif=mode=1']}
|
||||
|
||||
return payload
|
||||
|
||||
|
||||
def getYield(self):
|
||||
# for it in itertools.product(self.__strengthList,
|
||||
# self.__patchSizeList,
|
||||
# self.__chromaPatchSizeList,
|
||||
# self.__researchWindowList,
|
||||
# self.__chromaResearchWindowList):
|
||||
yield self.getPayload()
|
||||
@ -1,17 +0,0 @@
|
||||
import itertools
|
||||
|
||||
|
||||
class Filter():
|
||||
|
||||
filterChain: list = []
|
||||
|
||||
def __init__(self, filter):
|
||||
|
||||
self.filterChain.append(filter)
|
||||
|
||||
def getFilterChain(self):
|
||||
return self.filterChain
|
||||
|
||||
def getChainYield(self):
|
||||
for fy in itertools.product(*[f.getYield() for f in self.filterChain]):
|
||||
yield fy
|
||||
@ -1,162 +0,0 @@
|
||||
import itertools
|
||||
|
||||
from .filter import Filter
|
||||
|
||||
|
||||
class NlmeansFilter(Filter):
|
||||
|
||||
IDENTIFIER = 'nlmeans'
|
||||
|
||||
DEFAULT_STRENGTH: float = 2.8
|
||||
DEFAULT_PATCH_SIZE: int = 13
|
||||
DEFAULT_CHROMA_PATCH_SIZE: int = 9
|
||||
DEFAULT_RESEARCH_WINDOW: int = 23
|
||||
DEFAULT_CHROMA_RESEARCH_WINDOW: int= 17
|
||||
|
||||
STRENGTH_KEY = 'strength'
|
||||
PATCH_SIZE_KEY = 'patch_size'
|
||||
CHROMA_PATCH_SIZE_KEY = 'chroma_patch_size'
|
||||
RESEARCH_WINDOW_KEY = 'research_window'
|
||||
CHROMA_RESEARCH_WINDOW_KEY = 'chroma_research_window'
|
||||
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
self.__useHardware = kwargs.get('use_hardware', False)
|
||||
|
||||
self.__strengthList = []
|
||||
strength = kwargs.get(NlmeansFilter.STRENGTH_KEY, '')
|
||||
if strength:
|
||||
strengthTokens = strength.split(',')
|
||||
for st in strengthTokens:
|
||||
try:
|
||||
strengthValue = float(st)
|
||||
except:
|
||||
raise ValueError('NlmeansFilter: Strength value has to be of type float')
|
||||
if strengthValue < 1.0 or strengthValue > 30.0:
|
||||
raise ValueError('NlmeansFilter: Strength value has to be between 1.0 and 30.0')
|
||||
self.__strengthList.append(strengthValue)
|
||||
else:
|
||||
self.__strengthList = [NlmeansFilter.DEFAULT_STRENGTH]
|
||||
|
||||
self.__patchSizeList = []
|
||||
patchSize = kwargs.get(NlmeansFilter.PATCH_SIZE_KEY, '')
|
||||
if patchSize:
|
||||
patchSizeTokens = patchSize.split(',')
|
||||
for pst in patchSizeTokens:
|
||||
try:
|
||||
patchSizeValue = int(pst)
|
||||
except:
|
||||
raise ValueError('NlmeansFilter: Patch size value has to be of type int')
|
||||
if patchSizeValue < 0 or patchSizeValue > 99:
|
||||
raise ValueError('NlmeansFilter: Patch size value has to be between 0 and 99')
|
||||
if patchSizeValue % 2 == 0:
|
||||
raise ValueError('NlmeansFilter: Patch size value has to an odd number')
|
||||
self.__patchSizeList.append(patchSizeValue)
|
||||
else:
|
||||
self.__patchSizeList = [NlmeansFilter.DEFAULT_PATCH_SIZE]
|
||||
|
||||
self.__chromaPatchSizeList = []
|
||||
chromaPatchSize = kwargs.get(NlmeansFilter.CHROMA_PATCH_SIZE_KEY, '')
|
||||
if chromaPatchSize:
|
||||
chromaPatchSizeTokens = chromaPatchSize.split(',')
|
||||
for cpst in chromaPatchSizeTokens:
|
||||
try:
|
||||
chromaPatchSizeValue = int(pst)
|
||||
except:
|
||||
raise ValueError('NlmeansFilter: Chroma patch size value has to be of type int')
|
||||
if chromaPatchSizeValue < 0 or chromaPatchSizeValue > 99:
|
||||
raise ValueError('NlmeansFilter: Chroma patch value has to be between 0 and 99')
|
||||
if chromaPatchSizeValue % 2 == 0:
|
||||
raise ValueError('NlmeansFilter: Chroma patch value has to an odd number')
|
||||
self.__chromaPatchSizeList.append(chromaPatchSizeValue)
|
||||
else:
|
||||
self.__chromaPatchSizeList = [NlmeansFilter.DEFAULT_CHROMA_PATCH_SIZE]
|
||||
|
||||
self.__researchWindowList = []
|
||||
researchWindow = kwargs.get(NlmeansFilter.RESEARCH_WINDOW_KEY, '')
|
||||
if researchWindow:
|
||||
researchWindowTokens = researchWindow.split(',')
|
||||
for rwt in researchWindowTokens:
|
||||
try:
|
||||
researchWindowValue = int(rwt)
|
||||
except:
|
||||
raise ValueError('NlmeansFilter: Research window value has to be of type int')
|
||||
if researchWindowValue < 0 or researchWindowValue > 99:
|
||||
raise ValueError('NlmeansFilter: Research window value has to be between 0 and 99')
|
||||
if researchWindowValue % 2 == 0:
|
||||
raise ValueError('NlmeansFilter: Research window value has to an odd number')
|
||||
self.__researchWindowList.append(researchWindowValue)
|
||||
else:
|
||||
self.__researchWindowList = [NlmeansFilter.DEFAULT_RESEARCH_WINDOW]
|
||||
|
||||
self.__chromaResearchWindowList = []
|
||||
chromaResearchWindow = kwargs.get(NlmeansFilter.CHROMA_RESEARCH_WINDOW_KEY, '')
|
||||
if chromaResearchWindow:
|
||||
chromaResearchWindowTokens = chromaResearchWindow.split(',')
|
||||
for crwt in chromaResearchWindowTokens:
|
||||
try:
|
||||
chromaResearchWindowValue = int(crwt)
|
||||
except:
|
||||
raise ValueError('NlmeansFilter: Chroma research window value has to be of type int')
|
||||
if chromaResearchWindowValue < 0 or chromaResearchWindowValue > 99:
|
||||
raise ValueError('NlmeansFilter: Chroma research window value has to be between 0 and 99')
|
||||
if chromaResearchWindowValue % 2 == 0:
|
||||
raise ValueError('NlmeansFilter: Chroma research window value has to an odd number')
|
||||
self.__chromaResearchWindowList.append(chromaResearchWindowValue)
|
||||
else:
|
||||
self.__chromaResearchWindowList = [NlmeansFilter.DEFAULT_CHROMA_RESEARCH_WINDOW]
|
||||
|
||||
super().__init__(self)
|
||||
|
||||
|
||||
def getPayload(self, iteration):
|
||||
|
||||
strength = iteration[0]
|
||||
patchSize = iteration[1]
|
||||
chromaPatchSize = iteration[2]
|
||||
researchWindow = iteration[3]
|
||||
chromaResearchWindow = iteration[4]
|
||||
|
||||
suffices = []
|
||||
|
||||
if len(self.__strengthList) > 1:
|
||||
suffices += [f"ds{strength}"]
|
||||
if len(self.__patchSizeList) > 1:
|
||||
suffices += [f"dp{patchSize}"]
|
||||
if len(self.__chromaPatchSizeList) > 1:
|
||||
suffices += [f"dpc{chromaPatchSize}"]
|
||||
if len(self.__researchWindowList) > 1:
|
||||
suffices += [f"dr{researchWindow}"]
|
||||
if len(self.__chromaResearchWindowList) > 1:
|
||||
suffices += [f"drc{chromaResearchWindow}"]
|
||||
|
||||
filterName = 'nlmeans_opencl' if self.__useHardware else 'nlmeans'
|
||||
|
||||
payload = {'identifier': NlmeansFilter.IDENTIFIER,
|
||||
'parameters': {
|
||||
'strength': strength,
|
||||
'patch_size': patchSize,
|
||||
'chroma_patch_size': chromaPatchSize,
|
||||
'research_window': researchWindow,
|
||||
'chroma_research_window': chromaResearchWindow
|
||||
},
|
||||
'suffices': suffices,
|
||||
'variant': f"DS{strength}-DP{patchSize}-DPC{chromaPatchSize}"
|
||||
+ f"-DR{researchWindow}-DRC{chromaResearchWindow}",
|
||||
'tokens': [f"{filterName}=s={strength}"
|
||||
+ f":p={patchSize}"
|
||||
+ f":pc={chromaPatchSize}"
|
||||
+ f":r={researchWindow}"
|
||||
+ f":rc={chromaResearchWindow}"]}
|
||||
|
||||
return payload
|
||||
|
||||
|
||||
def getYield(self):
|
||||
for it in itertools.product(self.__strengthList,
|
||||
self.__patchSizeList,
|
||||
self.__chromaPatchSizeList,
|
||||
self.__researchWindowList,
|
||||
self.__chromaResearchWindowList):
|
||||
yield self.getPayload(it)
|
||||
@ -1,54 +0,0 @@
|
||||
import itertools
|
||||
|
||||
from .filter import Filter
|
||||
|
||||
|
||||
class PresetFilter(Filter):
|
||||
|
||||
IDENTIFIER = 'preset'
|
||||
|
||||
DEFAULT_PRESET = 5
|
||||
|
||||
PRESET_KEY = 'preset'
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
self.__presetsList = []
|
||||
presets = str(kwargs.get(PresetFilter.PRESET_KEY, ''))
|
||||
if presets:
|
||||
presetTokens = presets.split(',')
|
||||
for q in presetTokens:
|
||||
try:
|
||||
presetValue = int(q)
|
||||
except:
|
||||
raise ValueError('PresetFilter: Preset value has to be of type int')
|
||||
if presetValue < 0 or presetValue > 13:
|
||||
raise ValueError('PresetFilter: Preset value has to be between 0 and 13')
|
||||
self.__presetsList.append(presetValue)
|
||||
else:
|
||||
self.__presetsList = [PresetFilter.DEFAULT_PRESET]
|
||||
|
||||
super().__init__(self)
|
||||
|
||||
|
||||
def getPayload(self, preset):
|
||||
|
||||
suffices = []
|
||||
|
||||
if len(self.__presetsList) > 1:
|
||||
suffices += [f"p{preset}"]
|
||||
|
||||
payload = {'identifier': PresetFilter.IDENTIFIER,
|
||||
'parameters': {
|
||||
'preset': preset
|
||||
},
|
||||
'suffices': suffices,
|
||||
'variant': f"P{preset}",
|
||||
'tokens': []}
|
||||
|
||||
return payload
|
||||
|
||||
|
||||
def getYield(self):
|
||||
for q in self.__presetsList:
|
||||
yield self.getPayload(q)
|
||||
@ -1,62 +0,0 @@
|
||||
import click
|
||||
|
||||
from .filter import Filter
|
||||
|
||||
from ffx.video_encoder import VideoEncoder
|
||||
|
||||
|
||||
class QualityFilter(Filter):
|
||||
|
||||
IDENTIFIER = 'quality'
|
||||
|
||||
DEFAULT_VP9_QUALITY = 32
|
||||
DEFAULT_H264_QUALITY = 17
|
||||
|
||||
QUALITY_KEY = 'quality'
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
context = click.get_current_context().obj
|
||||
|
||||
|
||||
self.__qualitiesList = []
|
||||
qualities = kwargs.get(QualityFilter.QUALITY_KEY, '')
|
||||
if qualities:
|
||||
qualityTokens = qualities.split(',')
|
||||
for q in qualityTokens:
|
||||
try:
|
||||
qualityValue = int(q)
|
||||
except:
|
||||
raise ValueError('QualityFilter: Quality value has to be of type int')
|
||||
if qualityValue < 0 or qualityValue > 63:
|
||||
raise ValueError('QualityFilter: Quality value has to be between 0 and 63')
|
||||
self.__qualitiesList.append(qualityValue)
|
||||
else:
|
||||
|
||||
self.__qualitiesList = [None]
|
||||
|
||||
|
||||
super().__init__(self)
|
||||
|
||||
|
||||
def getPayload(self, quality):
|
||||
|
||||
suffices = []
|
||||
|
||||
if len(self.__qualitiesList) > 1:
|
||||
suffices += [f"q{quality}"]
|
||||
|
||||
payload = {'identifier': QualityFilter.IDENTIFIER,
|
||||
'parameters': {
|
||||
'quality': quality
|
||||
},
|
||||
'suffices': suffices,
|
||||
'variant': f"Q{quality}",
|
||||
'tokens': []}
|
||||
|
||||
return payload
|
||||
|
||||
|
||||
def getYield(self):
|
||||
for q in self.__qualitiesList:
|
||||
yield self.getPayload(q)
|
||||
@ -1,6 +0,0 @@
|
||||
from .filter import Filter
|
||||
|
||||
class ScaleFilter(Filter):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(self)
|
||||
@ -1,13 +0,0 @@
|
||||
from textual.app import ComposeResult
|
||||
from textual.screen import Screen
|
||||
from textual.widgets import Footer, Placeholder
|
||||
|
||||
class HelpScreen(Screen):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
context = self.app.getContext()
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
yield Placeholder("Help Screen")
|
||||
yield Footer()
|
||||
|
||||
@ -1,107 +0,0 @@
|
||||
from enum import Enum
|
||||
import difflib
|
||||
|
||||
class IsoLanguage(Enum):
|
||||
|
||||
AFRIKAANS = {"name": "Afrikaans", "iso639_1": "af", "iso639_2": ["afr"]}
|
||||
ALBANIAN = {"name": "Albanian", "iso639_1": "sq", "iso639_2": ["alb"]}
|
||||
ARABIC = {"name": "Arabic", "iso639_1": "ar", "iso639_2": ["ara"]}
|
||||
ARMENIAN = {"name": "Armenian", "iso639_1": "hy", "iso639_2": ["arm"]}
|
||||
AZERBAIJANI = {"name": "Azerbaijani", "iso639_1": "az", "iso639_2": ["aze"]}
|
||||
BASQUE = {"name": "Basque", "iso639_1": "eu", "iso639_2": ["baq"]}
|
||||
BELARUSIAN = {"name": "Belarusian", "iso639_1": "be", "iso639_2": ["bel"]}
|
||||
BOKMAL = {"name": "Bokmål", "iso639_1": "nb", "iso639_2": ["nob"]} # Norwegian Bokmål
|
||||
BULGARIAN = {"name": "Bulgarian", "iso639_1": "bg", "iso639_2": ["bul"]}
|
||||
CATALAN = {"name": "Catalan", "iso639_1": "ca", "iso639_2": ["cat"]}
|
||||
CHINESE = {"name": "Chinese", "iso639_1": "zh", "iso639_2": ["zho", "chi"]}
|
||||
CROATIAN = {"name": "Croatian", "iso639_1": "hr", "iso639_2": ["hrv"]}
|
||||
CZECH = {"name": "Czech", "iso639_1": "cs", "iso639_2": ["cze"]}
|
||||
DANISH = {"name": "Danish", "iso639_1": "da", "iso639_2": ["dan"]}
|
||||
DUTCH = {"name": "Dutch", "iso639_1": "nl", "iso639_2": ["nld", "dut"]}
|
||||
ENGLISH = {"name": "English", "iso639_1": "en", "iso639_2": ["eng"]}
|
||||
ESTONIAN = {"name": "Estonian", "iso639_1": "et", "iso639_2": ["est"]}
|
||||
FILIPINO = {"name": "Filipino", "iso639_1": "tl", "iso639_2": ["fil"]} # Tagalog
|
||||
FINNISH = {"name": "Finnish", "iso639_1": "fi", "iso639_2": ["fin"]}
|
||||
FRENCH = {"name": "French", "iso639_1": "fr", "iso639_2": ["fra", "fre"]}
|
||||
GEORGIAN = {"name": "Georgian", "iso639_1": "ka", "iso639_2": ["geo"]}
|
||||
GERMAN = {"name": "German", "iso639_1": "de", "iso639_2": ["deu", "ger"]}
|
||||
GREEK = {"name": "Greek", "iso639_1": "el", "iso639_2": ["gre"]}
|
||||
HEBREW = {"name": "Hebrew", "iso639_1": "he", "iso639_2": ["heb"]}
|
||||
HINDI = {"name": "Hindi", "iso639_1": "hi", "iso639_2": ["hin"]}
|
||||
HUNGARIAN = {"name": "Hungarian", "iso639_1": "hu", "iso639_2": ["hun"]}
|
||||
ICELANDIC = {"name": "Icelandic", "iso639_1": "is", "iso639_2": ["ice"]}
|
||||
INDONESIAN = {"name": "Indonesian", "iso639_1": "id", "iso639_2": ["ind"]}
|
||||
IRISH = {"name": "Irish", "iso639_1": "ga", "iso639_2": ["gle"]}
|
||||
ITALIAN = {"name": "Italian", "iso639_1": "it", "iso639_2": ["ita"]}
|
||||
JAPANESE = {"name": "Japanese", "iso639_1": "ja", "iso639_2": ["jpn"]}
|
||||
KAZAKH = {"name": "Kazakh", "iso639_1": "kk", "iso639_2": ["kaz"]}
|
||||
KOREAN = {"name": "Korean", "iso639_1": "ko", "iso639_2": ["kor"]}
|
||||
LATIN = {"name": "Latin", "iso639_1": "la", "iso639_2": ["lat"]}
|
||||
LATVIAN = {"name": "Latvian", "iso639_1": "lv", "iso639_2": ["lav"]}
|
||||
LITHUANIAN = {"name": "Lithuanian", "iso639_1": "lt", "iso639_2": ["lit"]}
|
||||
MACEDONIAN = {"name": "Macedonian", "iso639_1": "mk", "iso639_2": ["mac"]}
|
||||
MALAY = {"name": "Malay", "iso639_1": "ms", "iso639_2": ["may"]}
|
||||
MALTESE = {"name": "Maltese", "iso639_1": "mt", "iso639_2": ["mlt"]}
|
||||
NORWEGIAN = {"name": "Norwegian", "iso639_1": "no", "iso639_2": ["nor"]}
|
||||
PERSIAN = {"name": "Persian", "iso639_1": "fa", "iso639_2": ["per"]}
|
||||
POLISH = {"name": "Polish", "iso639_1": "pl", "iso639_2": ["pol"]}
|
||||
PORTUGUESE = {"name": "Portuguese", "iso639_1": "pt", "iso639_2": ["por"]}
|
||||
ROMANIAN = {"name": "Romanian", "iso639_1": "ro", "iso639_2": ["rum"]}
|
||||
RUSSIAN = {"name": "Russian", "iso639_1": "ru", "iso639_2": ["rus"]}
|
||||
NORTHERN_SAMI = {"name": "Northern Sami", "iso639_1": "se", "iso639_2": ["sme"]}
|
||||
SAMOAN = {"name": "Samoan", "iso639_1": "sm", "iso639_2": ["smo"]}
|
||||
SANGO = {"name": "Sango", "iso639_1": "sg", "iso639_2": ["sag"]}
|
||||
SANSKRIT = {"name": "Sanskrit", "iso639_1": "sa", "iso639_2": ["san"]}
|
||||
SARDINIAN = {"name": "Sardinian", "iso639_1": "sc", "iso639_2": ["srd"]}
|
||||
SERBIAN = {"name": "Serbian", "iso639_1": "sr", "iso639_2": ["srp"]}
|
||||
SHONA = {"name": "Shona", "iso639_1": "sn", "iso639_2": ["sna"]}
|
||||
SINDHI = {"name": "Sindhi", "iso639_1": "sd", "iso639_2": ["snd"]}
|
||||
SINHALA = {"name": "Sinhala", "iso639_1": "si", "iso639_2": ["sin"]}
|
||||
SLOVAK = {"name": "Slovak", "iso639_1": "sk", "iso639_2": ["slo", "slk"]}
|
||||
SLOVENIAN = {"name": "Slovenian", "iso639_1": "sl", "iso639_2": ["slv"]}
|
||||
SOMALI = {"name": "Somali", "iso639_1": "so", "iso639_2": ["som"]}
|
||||
SOUTHERN_SOTHO = {"name": "Southern Sotho", "iso639_1": "st", "iso639_2": ["sot"]}
|
||||
SPANISH = {"name": "Spanish", "iso639_1": "es", "iso639_2": ["spa"]}
|
||||
SUNDANESE = {"name": "Sundanese", "iso639_1": "su", "iso639_2": ["sun"]}
|
||||
SWAHILI = {"name": "Swahili", "iso639_1": "sw", "iso639_2": ["swa"]}
|
||||
SWATI = {"name": "Swati", "iso639_1": "ss", "iso639_2": ["ssw"]}
|
||||
SWEDISH = {"name": "Swedish", "iso639_1": "sv", "iso639_2": ["swe"]}
|
||||
TAGALOG = {"name": "Tagalog", "iso639_1": "tl", "iso639_2": ["tgl"]}
|
||||
TAMIL = {"name": "Tamil", "iso639_1": "ta", "iso639_2": ["tam"]}
|
||||
THAI = {"name": "Thai", "iso639_1": "th", "iso639_2": ["tha"]}
|
||||
TURKISH = {"name": "Turkish", "iso639_1": "tr", "iso639_2": ["tur"]}
|
||||
UKRAINIAN = {"name": "Ukrainian", "iso639_1": "uk", "iso639_2": ["ukr"]}
|
||||
URDU = {"name": "Urdu", "iso639_1": "ur", "iso639_2": ["urd"]}
|
||||
VIETNAMESE = {"name": "Vietnamese", "iso639_1": "vi", "iso639_2":[ "vie"]}
|
||||
WELSH = {"name": "Welsh", "iso639_1": "cy", "iso639_2": ["wel"]}
|
||||
|
||||
UNDEFINED = {"name": "undefined", "iso639_1": "xx", "iso639_2": ["und"]}
|
||||
|
||||
|
||||
@staticmethod
|
||||
def find(label : str):
|
||||
|
||||
closestMatches = difflib.get_close_matches(label, [l.value["name"] for l in IsoLanguage], n=1)
|
||||
|
||||
if closestMatches:
|
||||
foundLangs = [l for l in IsoLanguage if l.value['name'] == closestMatches[0]]
|
||||
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
|
||||
else:
|
||||
return IsoLanguage.UNDEFINED
|
||||
|
||||
@staticmethod
|
||||
def findThreeLetter(theeLetter : str):
|
||||
foundLangs = [l for l in IsoLanguage if str(theeLetter) in l.value['iso639_2']]
|
||||
return foundLangs[0] if foundLangs else IsoLanguage.UNDEFINED
|
||||
|
||||
|
||||
def label(self):
|
||||
return str(self.value['name'])
|
||||
|
||||
def twoLetter(self):
|
||||
return str(self.value['iso639_1'])
|
||||
|
||||
def threeLetter(self):
|
||||
return str(self.value['iso639_2'][0])
|
||||
|
||||
|
||||
@ -1,48 +0,0 @@
|
||||
import click
|
||||
|
||||
from ffx.model.pattern import Pattern
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from ffx.tag_controller import TagController
|
||||
from ffx.track_controller import TrackController
|
||||
|
||||
class MediaController():
|
||||
|
||||
def __init__(self, context):
|
||||
|
||||
self.context = context
|
||||
self.Session = self.context['database']['session'] # convenience
|
||||
|
||||
self.__logger = context['logger']
|
||||
|
||||
self.__tc = TrackController(context = context)
|
||||
self.__tac = TagController(context = context)
|
||||
|
||||
def setPatternMediaDescriptor(self, mediaDescriptor: MediaDescriptor, patternId: int):
|
||||
|
||||
try:
|
||||
|
||||
pid = int(patternId)
|
||||
|
||||
s = self.Session()
|
||||
q = s.query(Pattern).filter(Pattern.id == pid)
|
||||
|
||||
if q.count():
|
||||
pattern = q.first
|
||||
|
||||
for mediaTagKey, mediaTagValue in mediaDescriptor.getTags():
|
||||
self.__tac.updateMediaTag(pid, mediaTagKey, mediaTagValue)
|
||||
# for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
|
||||
for trackDescriptor in mediaDescriptor.getTrackDescriptors():
|
||||
self.__tc.addTrack(trackDescriptor, patternId = pid)
|
||||
|
||||
s.commit()
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
except Exception as ex:
|
||||
self.__logger.error(f"MediaController.setPatternMediaDescriptor(): {repr(ex)}")
|
||||
raise click.ClickException(f"MediaController.setPatternMediaDescriptor(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
@ -1,512 +0,0 @@
|
||||
import os, re, click, logging
|
||||
|
||||
from typing import List, Self
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
from ffx.iso_language import IsoLanguage
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from ffx.track_codec import TrackCodec
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
|
||||
|
||||
class MediaDescriptor:
|
||||
"""This class represents the structural content of a media file including streams and metadata"""
|
||||
|
||||
CONTEXT_KEY = "context"
|
||||
|
||||
TAGS_KEY = "tags"
|
||||
TRACKS_KEY = "tracks"
|
||||
|
||||
TRACK_DESCRIPTOR_LIST_KEY = "track_descriptors"
|
||||
CLEAR_TAGS_FLAG_KEY = "clear_tags"
|
||||
|
||||
FFPROBE_DISPOSITION_KEY = "disposition"
|
||||
FFPROBE_TAGS_KEY = "tags"
|
||||
FFPROBE_CODEC_TYPE_KEY = "codec_type"
|
||||
|
||||
#407 remove as well
|
||||
EXCLUDED_MEDIA_TAGS = ["creation_time"]
|
||||
|
||||
SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH = '[sS]([0-9]+)[eE]([0-9]+)_([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
|
||||
STREAM_LANGUAGE_DISPOSITIONS_MATCH = '([0-9]+)_([a-z]{3})(?:_([A-Z]{3}))*'
|
||||
|
||||
SUBTITLE_FILE_EXTENSION = 'vtt'
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
if MediaDescriptor.CONTEXT_KEY in kwargs.keys():
|
||||
if type(kwargs[MediaDescriptor.CONTEXT_KEY]) is not dict:
|
||||
raise TypeError(
|
||||
f"MediaDescriptor.__init__(): Argument {MediaDescriptor.CONTEXT_KEY} is required to be of type dict"
|
||||
)
|
||||
self.__context = kwargs[MediaDescriptor.CONTEXT_KEY]
|
||||
self.__logger = self.__context['logger']
|
||||
else:
|
||||
self.__context = {}
|
||||
self.__logger = logging.getLogger('FFX')
|
||||
self.__logger.addHandler(logging.NullHandler())
|
||||
|
||||
if MediaDescriptor.TAGS_KEY in kwargs.keys():
|
||||
if type(kwargs[MediaDescriptor.TAGS_KEY]) is not dict:
|
||||
raise TypeError(
|
||||
f"MediaDescriptor.__init__(): Argument {MediaDescriptor.TAGS_KEY} is required to be of type dict"
|
||||
)
|
||||
self.__mediaTags = kwargs[MediaDescriptor.TAGS_KEY]
|
||||
else:
|
||||
self.__mediaTags = {}
|
||||
|
||||
if MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY in kwargs.keys():
|
||||
if (
|
||||
type(kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY]) is not list
|
||||
): # Use List typehint for TrackDescriptor as well if it works
|
||||
raise TypeError(
|
||||
f"MediaDescriptor.__init__(): Argument {MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY} is required to be of type list"
|
||||
)
|
||||
for d in kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY]:
|
||||
if type(d) is not TrackDescriptor:
|
||||
raise TypeError(
|
||||
f"TrackDesciptor.__init__(): All elements of argument list {MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY} are required to be of type TrackDescriptor"
|
||||
)
|
||||
self.__trackDescriptors = kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY]
|
||||
else:
|
||||
self.__trackDescriptors = []
|
||||
|
||||
def setTrackLanguage(self, language: str, index: int, trackType: TrackType = None):
|
||||
|
||||
trackLanguage = IsoLanguage.findThreeLetter(language)
|
||||
if trackLanguage == IsoLanguage.UNDEFINED:
|
||||
self.__logger.warning('MediaDescriptor.setTrackLanguage(): Parameter language does not contain a registered '
|
||||
+ f"ISO 639 3-letter language code, skipping to set language for"
|
||||
+ str('' if trackType is None else trackType.label()) + f"track {index}")
|
||||
|
||||
trackList = self.getTrackDescriptors(trackType=trackType)
|
||||
|
||||
if index < 0 or index > len(trackList) - 1:
|
||||
self.__logger.warning(f"MediaDescriptor.setTrackLanguage(): Parameter index ({index}) is "
|
||||
+ f"out of range of {'' if trackType is None else trackType.label()}track list")
|
||||
|
||||
td: TrackDescriptor = trackList[index]
|
||||
td.setLanguage(trackLanguage)
|
||||
|
||||
return
|
||||
|
||||
|
||||
def setTrackTitle(self, title: str, index: int, trackType: TrackType = None):
|
||||
|
||||
trackList = self.getTrackDescriptors(trackType=trackType)
|
||||
|
||||
if index < 0 or index > len(trackList) - 1:
|
||||
self.__logger.error(f"MediaDescriptor.setTrackTitle(): Parameter index ({index}) is "
|
||||
+ f"out of range of {'' if trackType is None else trackType.label()}track list")
|
||||
raise click.Abort()
|
||||
|
||||
td: TrackDescriptor = trackList[index]
|
||||
td.setTitle(title)
|
||||
|
||||
|
||||
def setDefaultSubTrack(self, trackType: TrackType, subIndex: int):
|
||||
# for t in self.getAllTrackDescriptors():
|
||||
for t in self.getTrackDescriptors():
|
||||
if t.getType() == trackType:
|
||||
t.setDispositionFlag(
|
||||
TrackDisposition.DEFAULT, t.getSubIndex() == int(subIndex)
|
||||
)
|
||||
|
||||
def setForcedSubTrack(self, trackType: TrackType, subIndex: int):
|
||||
# for t in self.getAllTrackDescriptors():
|
||||
for t in self.getTrackDescriptors():
|
||||
if t.getType() == trackType:
|
||||
t.setDispositionFlag(
|
||||
TrackDisposition.FORCED, t.getSubIndex() == int(subIndex)
|
||||
)
|
||||
|
||||
def checkConfiguration(self):
|
||||
|
||||
videoTracks = self.getVideoTracks()
|
||||
audioTracks = self.getAudioTracks()
|
||||
subtitleTracks = self.getSubtitleTracks()
|
||||
|
||||
if len([v for v in videoTracks if v.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
||||
raise ValueError('More than one default video track')
|
||||
if len([a for a in audioTracks if a.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
||||
raise ValueError('More than one default audio track')
|
||||
if len([s for s in subtitleTracks if s.getDispositionFlag(TrackDisposition.DEFAULT)]) > 1:
|
||||
raise ValueError('More than one default subtitle track')
|
||||
|
||||
if len([v for v in videoTracks if v.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
||||
raise ValueError('More than one forced video track')
|
||||
if len([a for a in audioTracks if a.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
||||
raise ValueError('More than one forced audio track')
|
||||
if len([s for s in subtitleTracks if s.getDispositionFlag(TrackDisposition.FORCED)]) > 1:
|
||||
raise ValueError('More than one forced subtitle track')
|
||||
|
||||
trackDescriptors = videoTracks + audioTracks + subtitleTracks
|
||||
sourceIndices = [
|
||||
t.getSourceIndex() for t in trackDescriptors
|
||||
]
|
||||
if len(set(sourceIndices)) < len(trackDescriptors):
|
||||
raise ValueError('Multiple streams originating from the same source stream')
|
||||
|
||||
|
||||
def applyOverrides(self, overrides: dict):
|
||||
|
||||
if 'languages' in overrides.keys():
|
||||
for trackIndex in overrides['languages'].keys():
|
||||
self.setTrackLanguage(overrides['languages'][trackIndex], trackIndex)
|
||||
|
||||
if 'titles' in overrides.keys():
|
||||
for trackIndex in overrides['titles'].keys():
|
||||
self.setTrackTitle(overrides['titles'][trackIndex], trackIndex)
|
||||
|
||||
if 'forced_video' in overrides.keys():
|
||||
sti = int(overrides['forced_video'])
|
||||
self.setForcedSubTrack(TrackType.VIDEO, sti)
|
||||
self.setDefaultSubTrack(TrackType.VIDEO, sti)
|
||||
|
||||
elif 'default_video' in overrides.keys():
|
||||
sti = int(overrides['default_video'])
|
||||
self.setDefaultSubTrack(TrackType.VIDEO, sti)
|
||||
|
||||
if 'forced_audio' in overrides.keys():
|
||||
sti = int(overrides['forced_audio'])
|
||||
self.setForcedSubTrack(TrackType.AUDIO, sti)
|
||||
self.setDefaultSubTrack(TrackType.AUDIO, sti)
|
||||
|
||||
elif 'default_audio' in overrides.keys():
|
||||
sti = int(overrides['default_audio'])
|
||||
self.setDefaultSubTrack(TrackType.AUDIO, sti)
|
||||
|
||||
if 'forced_subtitle' in overrides.keys():
|
||||
sti = int(overrides['forced_subtitle'])
|
||||
self.setForcedSubTrack(TrackType.SUBTITLE, sti)
|
||||
self.setDefaultSubTrack(TrackType.SUBTITLE, sti)
|
||||
|
||||
elif 'default_subtitle' in overrides.keys():
|
||||
sti = int(overrides['default_subtitle'])
|
||||
self.setDefaultSubTrack(TrackType.SUBTITLE, sti)
|
||||
|
||||
if 'stream_order' in overrides.keys():
|
||||
self.rearrangeTrackDescriptors(overrides['stream_order'])
|
||||
|
||||
|
||||
def applySourceIndices(self, sourceMediaDescriptor: Self):
|
||||
# sourceTrackDescriptors = sourceMediaDescriptor.getAllTrackDescriptors()
|
||||
sourceTrackDescriptors = sourceMediaDescriptor.getTrackDescriptors()
|
||||
|
||||
numTrackDescriptors = len(self.__trackDescriptors)
|
||||
if len(sourceTrackDescriptors) != numTrackDescriptors:
|
||||
raise ValueError('MediaDescriptor.applySourceIndices (): Number of track descriptors does not match')
|
||||
|
||||
for trackIndex in range(numTrackDescriptors):
|
||||
self.__trackDescriptors[trackIndex].setSourceIndex(sourceTrackDescriptors[trackIndex].getSourceIndex())
|
||||
|
||||
|
||||
def rearrangeTrackDescriptors(self, newOrder: List[int]):
|
||||
if len(newOrder) != len(self.__trackDescriptors):
|
||||
raise ValueError('Length of list with reordered indices does not match number of track descriptors')
|
||||
reorderedTrackDescriptors = {}
|
||||
for oldIndex in newOrder:
|
||||
reorderedTrackDescriptors.append(self.__trackDescriptors[oldIndex])
|
||||
self.__trackDescriptors = reorderedTrackDescriptors
|
||||
self.reindexSubIndices()
|
||||
self.reindexIndices()
|
||||
|
||||
|
||||
@classmethod
|
||||
def fromFfprobe(cls, context, formatData, streamData):
|
||||
|
||||
kwargs = {}
|
||||
|
||||
kwargs[MediaDescriptor.CONTEXT_KEY] = context
|
||||
|
||||
if MediaDescriptor.FFPROBE_TAGS_KEY in formatData.keys():
|
||||
kwargs[MediaDescriptor.TAGS_KEY] = formatData[
|
||||
MediaDescriptor.FFPROBE_TAGS_KEY
|
||||
]
|
||||
|
||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = []
|
||||
|
||||
# TODO: Evtl obsolet
|
||||
subIndexCounters = {}
|
||||
|
||||
for streamObj in streamData:
|
||||
|
||||
ffprobeCodecType = streamObj[MediaDescriptor.FFPROBE_CODEC_TYPE_KEY]
|
||||
trackType = TrackType.fromLabel(ffprobeCodecType)
|
||||
|
||||
if trackType != TrackType.UNKNOWN:
|
||||
|
||||
if trackType not in subIndexCounters.keys():
|
||||
subIndexCounters[trackType] = 0
|
||||
|
||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY].append(
|
||||
TrackDescriptor.fromFfprobe(
|
||||
streamObj, subIndex=subIndexCounters[trackType]
|
||||
)
|
||||
)
|
||||
subIndexCounters[trackType] += 1
|
||||
|
||||
return cls(**kwargs)
|
||||
|
||||
def getTags(self):
|
||||
return self.__mediaTags
|
||||
|
||||
|
||||
def sortSubIndices(
|
||||
self, descriptors: List[TrackDescriptor]
|
||||
) -> List[TrackDescriptor]:
|
||||
subIndex = 0
|
||||
for d in descriptors:
|
||||
d.setSubIndex(subIndex)
|
||||
subIndex += 1
|
||||
return descriptors
|
||||
|
||||
def reindexSubIndices(self, trackDescriptors: list = []):
|
||||
tdList = trackDescriptors if trackDescriptors else self.__trackDescriptors
|
||||
subIndexCounter = {}
|
||||
for td in tdList:
|
||||
trackType = td.getType()
|
||||
if trackType not in subIndexCounter.keys():
|
||||
subIndexCounter[trackType] = 0
|
||||
td.setSubIndex(subIndexCounter[trackType])
|
||||
subIndexCounter[trackType] += 1
|
||||
|
||||
def sortIndices(
|
||||
self, descriptors: List[TrackDescriptor]
|
||||
) -> List[TrackDescriptor]:
|
||||
index = 0
|
||||
for d in descriptors:
|
||||
d.setIndex(index)
|
||||
index += 1
|
||||
return descriptors
|
||||
|
||||
def reindexIndices(self, trackDescriptors: list = []):
|
||||
tdList = trackDescriptors if trackDescriptors else self.__trackDescriptors
|
||||
for trackIndex in range(len(tdList)):
|
||||
tdList[trackIndex].setIndex(trackIndex)
|
||||
|
||||
|
||||
# def getAllTrackDescriptors(self):
|
||||
# """Returns all track descriptors sorted by type: video, audio then subtitles"""
|
||||
# return self.getVideoTracks() + self.getAudioTracks() + self.getSubtitleTracks()
|
||||
|
||||
|
||||
def getTrackDescriptors(self,
|
||||
trackType: TrackType = None) -> List[TrackDescriptor]:
|
||||
|
||||
if trackType is None:
|
||||
return self.__trackDescriptors
|
||||
|
||||
descriptorList = []
|
||||
for td in self.__trackDescriptors:
|
||||
if td.getType() == trackType:
|
||||
descriptorList.append(td)
|
||||
|
||||
return descriptorList
|
||||
|
||||
|
||||
def getVideoTracks(self) -> List[TrackDescriptor]:
|
||||
return [v for v in self.__trackDescriptors if v.getType() == TrackType.VIDEO]
|
||||
|
||||
def getAudioTracks(self) -> List[TrackDescriptor]:
|
||||
return [a for a in self.__trackDescriptors if a.getType() == TrackType.AUDIO]
|
||||
|
||||
def getSubtitleTracks(self) -> List[TrackDescriptor]:
|
||||
return [
|
||||
s
|
||||
for s in self.__trackDescriptors
|
||||
if s.getType() == TrackType.SUBTITLE
|
||||
]
|
||||
|
||||
|
||||
def getImportFileTokens(self, use_sub_index: bool = True):
|
||||
"""Generate ffmpeg import options for external stream files"""
|
||||
|
||||
importFileTokens = []
|
||||
|
||||
td: TrackDescriptor
|
||||
for td in self.__trackDescriptors:
|
||||
|
||||
importedFilePath = td.getExternalSourceFilePath()
|
||||
|
||||
if importedFilePath:
|
||||
|
||||
self.__logger.info(f"Substituting subtitle stream #{td.getIndex()} "
|
||||
+ f"({td.getType().label()}:{td.getSubIndex()}) "
|
||||
+ f"with import from file {td.getExternalSourceFilePath()}")
|
||||
|
||||
importFileTokens += [
|
||||
"-i",
|
||||
importedFilePath,
|
||||
]
|
||||
|
||||
return importFileTokens
|
||||
|
||||
|
||||
def getInputMappingTokens(self, use_sub_index: bool = True, only_video: bool = False):
|
||||
"""Tracks must be reordered for source index order"""
|
||||
|
||||
inputMappingTokens = []
|
||||
|
||||
sortedTrackDescriptors = sorted(self.__trackDescriptors, key=lambda d: d.getIndex())
|
||||
|
||||
# raise click.ClickException(' '.join([f"\nindex={td.getIndex()} subIndex={td.getSubIndex()} srcIndex={td.getSourceIndex()} type={td.getType().label()}" for td in self.__trackDescriptors]))
|
||||
|
||||
filePointer = 1
|
||||
for trackIndex in range(len(sortedTrackDescriptors)):
|
||||
|
||||
td: TrackDescriptor = sortedTrackDescriptors[trackIndex]
|
||||
|
||||
#HINT: Attached thumbnails are not supported by .webm container format
|
||||
if td.getCodec() != TrackCodec.PNG:
|
||||
|
||||
stdi = sortedTrackDescriptors[td.getSourceIndex()].getIndex()
|
||||
stdsi = sortedTrackDescriptors[td.getSourceIndex()].getSubIndex()
|
||||
|
||||
trackType = td.getType()
|
||||
|
||||
if (trackType == TrackType.VIDEO or not only_video):
|
||||
|
||||
importedFilePath = td.getExternalSourceFilePath()
|
||||
|
||||
if use_sub_index:
|
||||
|
||||
if importedFilePath:
|
||||
|
||||
inputMappingTokens += [
|
||||
"-map",
|
||||
f"{filePointer}:{trackType.indicator()}:0",
|
||||
]
|
||||
filePointer += 1
|
||||
|
||||
else:
|
||||
|
||||
if not td.getCodec() in [TrackCodec.PGS, TrackCodec.VOBSUB]:
|
||||
inputMappingTokens += [
|
||||
"-map",
|
||||
f"0:{trackType.indicator()}:{stdsi}",
|
||||
]
|
||||
|
||||
else:
|
||||
if not td.getCodec() in [TrackCodec.PGS, TrackCodec.VOBSUB]:
|
||||
inputMappingTokens += ["-map", f"0:{stdi}"]
|
||||
|
||||
return inputMappingTokens
|
||||
|
||||
|
||||
def searchSubtitleFiles(self, searchDirectory, prefix):
|
||||
|
||||
sesld_match = re.compile(f"{prefix}_{MediaDescriptor.SEASON_EPISODE_STREAM_LANGUAGE_DISPOSITIONS_MATCH}")
|
||||
sld_match = re.compile(f"{prefix}_{MediaDescriptor.STREAM_LANGUAGE_DISPOSITIONS_MATCH}")
|
||||
|
||||
subtitleFileDescriptors = []
|
||||
|
||||
for subtitleFilename in os.listdir(searchDirectory):
|
||||
if subtitleFilename.startswith(prefix) and subtitleFilename.endswith(
|
||||
"." + MediaDescriptor.SUBTITLE_FILE_EXTENSION
|
||||
):
|
||||
|
||||
sesld_result = sesld_match.search(subtitleFilename)
|
||||
sld_result = None if not sesld_result is None else sld_match.search(subtitleFilename)
|
||||
|
||||
if not sesld_result is None:
|
||||
|
||||
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
|
||||
if os.path.isfile(subtitleFilePath):
|
||||
|
||||
subtitleFileDescriptor = {}
|
||||
subtitleFileDescriptor["path"] = subtitleFilePath
|
||||
subtitleFileDescriptor["season"] = int(sesld_result.group(1))
|
||||
subtitleFileDescriptor["episode"] = int(sesld_result.group(2))
|
||||
subtitleFileDescriptor["index"] = int(sesld_result.group(3))
|
||||
subtitleFileDescriptor["language"] = sesld_result.group(4)
|
||||
|
||||
dispSet = set()
|
||||
dispCaptGroups = sesld_result.groups()
|
||||
numCaptGroups = len(dispCaptGroups)
|
||||
if numCaptGroups > 4:
|
||||
for groupIndex in range(numCaptGroups - 4):
|
||||
disp = TrackDisposition.fromIndicator(dispCaptGroups[groupIndex + 4])
|
||||
if disp is not None:
|
||||
dispSet.add(disp)
|
||||
subtitleFileDescriptor["disposition_set"] = dispSet
|
||||
|
||||
subtitleFileDescriptors.append(subtitleFileDescriptor)
|
||||
|
||||
if not sld_result is None:
|
||||
|
||||
subtitleFilePath = os.path.join(searchDirectory, subtitleFilename)
|
||||
if os.path.isfile(subtitleFilePath):
|
||||
|
||||
subtitleFileDescriptor = {}
|
||||
subtitleFileDescriptor["path"] = subtitleFilePath
|
||||
subtitleFileDescriptor["index"] = int(sld_result.group(1))
|
||||
subtitleFileDescriptor["language"] = sld_result.group(2)
|
||||
|
||||
dispSet = set()
|
||||
dispCaptGroups = sld_result.groups()
|
||||
numCaptGroups = len(dispCaptGroups)
|
||||
if numCaptGroups > 2:
|
||||
for groupIndex in range(numCaptGroups - 2):
|
||||
disp = TrackDisposition.fromIndicator(dispCaptGroups[groupIndex + 2])
|
||||
if disp is not None:
|
||||
dispSet.add(disp)
|
||||
subtitleFileDescriptor["disposition_set"] = dispSet
|
||||
|
||||
subtitleFileDescriptors.append(subtitleFileDescriptor)
|
||||
|
||||
|
||||
self.__logger.debug(f"searchSubtitleFiles(): Available subtitle files {subtitleFileDescriptors}")
|
||||
|
||||
return subtitleFileDescriptors
|
||||
|
||||
|
||||
def importSubtitles(self, searchDirectory, prefix, season: int = -1, episode: int = -1):
|
||||
|
||||
# click.echo(f"Season: {season} Episode: {episode}")
|
||||
self.__logger.debug(f"importSubtitles(): Season: {season} Episode: {episode}")
|
||||
|
||||
availableFileSubtitleDescriptors = self.searchSubtitleFiles(searchDirectory, prefix)
|
||||
|
||||
self.__logger.debug(f"importSubtitles(): availableFileSubtitleDescriptors: {availableFileSubtitleDescriptors}")
|
||||
|
||||
subtitleTracks = self.getSubtitleTracks()
|
||||
|
||||
self.__logger.debug(f"importSubtitles(): subtitleTracks: {[s.getIndex() for s in subtitleTracks]}")
|
||||
|
||||
matchingSubtitleFileDescriptors = (
|
||||
sorted(
|
||||
[
|
||||
d
|
||||
for d in availableFileSubtitleDescriptors
|
||||
if ((season == -1 and episode == -1)
|
||||
or (d["season"] == int(season) and d["episode"] == int(episode)))
|
||||
],
|
||||
key=lambda d: d["index"],
|
||||
)
|
||||
if availableFileSubtitleDescriptors
|
||||
else []
|
||||
)
|
||||
|
||||
self.__logger.debug(f"importSubtitles(): matchingSubtitleFileDescriptors: {matchingSubtitleFileDescriptors}")
|
||||
|
||||
for msfd in matchingSubtitleFileDescriptors:
|
||||
matchingSubtitleTrackDescriptor = [s for s in subtitleTracks if s.getIndex() == msfd["index"]]
|
||||
if matchingSubtitleTrackDescriptor:
|
||||
# click.echo(f"Found matching subtitle file {msfd["path"]}\n")
|
||||
self.__logger.debug(f"importSubtitles(): Found matching subtitle file {msfd['path']}")
|
||||
matchingSubtitleTrackDescriptor[0].setExternalSourceFilePath(msfd["path"])
|
||||
|
||||
# TODO: Check if useful
|
||||
# matchingSubtitleTrackDescriptor[0].setDispositionSet(msfd["disposition_set"])
|
||||
|
||||
|
||||
def getConfiguration(self, label: str = ''):
|
||||
yield f"--- {label if label else 'MediaDescriptor '+str(id(self))} {' '.join([str(k)+'='+str(v) for k,v in self.__mediaTags.items()])}"
|
||||
# for td in self.getAllTrackDescriptors():
|
||||
for td in self.getTrackDescriptors():
|
||||
yield (f"{td.getIndex()}:{td.getType().indicator()}:{td.getSubIndex()} "
|
||||
+ '|'.join([d.indicator() for d in td.getDispositionSet()])
|
||||
+ ' ' + ' '.join([str(k)+'='+str(v) for k,v in td.getTags().items()]))
|
||||
@ -1,302 +0,0 @@
|
||||
import click
|
||||
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
|
||||
from ffx.helper import dictDiff, setDiff, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY, DIFF_UNCHANGED_KEY
|
||||
|
||||
from ffx.track_codec import TrackCodec
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
|
||||
|
||||
class MediaDescriptorChangeSet():
|
||||
|
||||
TAGS_KEY = "tags"
|
||||
TRACKS_KEY = "tracks"
|
||||
DISPOSITION_SET_KEY = "disposition_set"
|
||||
|
||||
TRACK_DESCRIPTOR_KEY = "track_descriptor"
|
||||
|
||||
|
||||
def __init__(self, context,
|
||||
targetMediaDescriptor: MediaDescriptor = None,
|
||||
sourceMediaDescriptor: MediaDescriptor = None):
|
||||
|
||||
self.__context = context
|
||||
self.__logger = context['logger']
|
||||
|
||||
self.__configurationData = self.__context['config'].getData()
|
||||
|
||||
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
|
||||
|
||||
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
|
||||
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
|
||||
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
|
||||
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
|
||||
if 'streams' in metadataConfiguration.keys()
|
||||
and 'remove' in metadataConfiguration['streams'].keys() else [])
|
||||
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
|
||||
if 'streams' in metadataConfiguration.keys()
|
||||
and 'ignore' in metadataConfiguration['streams'].keys() else [])
|
||||
|
||||
|
||||
self.__targetTrackDescriptors = targetMediaDescriptor.getTrackDescriptors() if targetMediaDescriptor is not None else []
|
||||
self.__sourceTrackDescriptors = sourceMediaDescriptor.getTrackDescriptors() if sourceMediaDescriptor is not None else []
|
||||
|
||||
targetMediaTags = targetMediaDescriptor.getTags() if targetMediaDescriptor is not None else {}
|
||||
sourceMediaTags = sourceMediaDescriptor.getTags() if sourceMediaDescriptor is not None else {}
|
||||
|
||||
|
||||
self.__changeSetObj = {}
|
||||
|
||||
#if targetMediaDescriptor is not None:
|
||||
|
||||
|
||||
|
||||
#!!#
|
||||
tagsDiff = dictDiff(sourceMediaTags,
|
||||
targetMediaTags,
|
||||
ignoreKeys=self.__ignoreGlobalKeys,
|
||||
removeKeys=self.__removeGlobalKeys)
|
||||
|
||||
if tagsDiff:
|
||||
self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY] = tagsDiff
|
||||
|
||||
|
||||
|
||||
self.__numTargetTracks = len(self.__targetTrackDescriptors)
|
||||
|
||||
# Current track configuration (of file)
|
||||
|
||||
self.__numSourceTracks = len(self.__sourceTrackDescriptors)
|
||||
|
||||
maxNumOfTracks = max(self.__numSourceTracks, self.__numTargetTracks)
|
||||
|
||||
trackCompareResult = {}
|
||||
|
||||
|
||||
for trackIndex in range(maxNumOfTracks):
|
||||
|
||||
correspondingSourceTrackDescriptors = [st for st in self.__sourceTrackDescriptors if st.getIndex() == trackIndex]
|
||||
correspondingTargetTrackDescriptors = [tt for tt in self.__targetTrackDescriptors if tt.getIndex() == trackIndex]
|
||||
|
||||
# Track present in target but not in source
|
||||
if (not correspondingSourceTrackDescriptors
|
||||
and correspondingTargetTrackDescriptors):
|
||||
|
||||
if DIFF_ADDED_KEY not in trackCompareResult.keys():
|
||||
trackCompareResult[DIFF_ADDED_KEY] = {}
|
||||
|
||||
trackCompareResult[DIFF_ADDED_KEY][trackIndex] = correspondingTargetTrackDescriptors[0]
|
||||
continue
|
||||
|
||||
# Track present in target but not in source
|
||||
if (correspondingSourceTrackDescriptors
|
||||
and not correspondingTargetTrackDescriptors):
|
||||
|
||||
if DIFF_REMOVED_KEY not in trackCompareResult.keys():
|
||||
trackCompareResult[DIFF_REMOVED_KEY] = {}
|
||||
|
||||
trackCompareResult[DIFF_REMOVED_KEY][trackIndex] = correspondingSourceTrackDescriptors[0]
|
||||
continue
|
||||
|
||||
if (correspondingSourceTrackDescriptors
|
||||
and correspondingTargetTrackDescriptors):
|
||||
|
||||
# if correspondingTargetTrackDescriptors[0].getIndex() == 3:
|
||||
# raise click.ClickException(f"{correspondingSourceTrackDescriptors[0].getDispositionSet()} {correspondingTargetTrackDescriptors[0].getDispositionSet()}")
|
||||
|
||||
|
||||
trackDiff = self.compareTracks(correspondingTargetTrackDescriptors[0],
|
||||
correspondingSourceTrackDescriptors[0])
|
||||
|
||||
if trackDiff:
|
||||
if DIFF_CHANGED_KEY not in trackCompareResult.keys():
|
||||
trackCompareResult[DIFF_CHANGED_KEY] = {}
|
||||
|
||||
trackCompareResult[DIFF_CHANGED_KEY][trackIndex] = trackDiff
|
||||
|
||||
|
||||
if trackCompareResult:
|
||||
self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY] = trackCompareResult
|
||||
|
||||
|
||||
def compareTracks(self,
|
||||
targetTrackDescriptor: TrackDescriptor = None,
|
||||
sourceTrackDescriptor: TrackDescriptor = None):
|
||||
|
||||
sourceTrackTags = sourceTrackDescriptor.getTags() if sourceTrackDescriptor is not None else {}
|
||||
targetTrackTags = targetTrackDescriptor.getTags() if targetTrackDescriptor is not None else {}
|
||||
|
||||
trackCompareResult = {}
|
||||
|
||||
tagsDiffResult = dictDiff(sourceTrackTags,
|
||||
targetTrackTags,
|
||||
ignoreKeys=self.__ignoreTrackKeys,
|
||||
removeKeys=self.__removeTrackKeys)
|
||||
|
||||
if tagsDiffResult:
|
||||
trackCompareResult[MediaDescriptorChangeSet.TAGS_KEY] = tagsDiffResult
|
||||
|
||||
sourceDispositionSet = sourceTrackDescriptor.getDispositionSet() if sourceTrackDescriptor is not None else set()
|
||||
targetDispositionSet = targetTrackDescriptor.getDispositionSet() if targetTrackDescriptor is not None else set()
|
||||
|
||||
# if targetTrackDescriptor.getIndex() == 3:
|
||||
# raise click.ClickException(f"{sourceDispositionSet} {targetDispositionSet}")
|
||||
|
||||
dispositionDiffResult = setDiff(sourceDispositionSet, targetDispositionSet)
|
||||
|
||||
if dispositionDiffResult:
|
||||
trackCompareResult[MediaDescriptorChangeSet.DISPOSITION_SET_KEY] = dispositionDiffResult
|
||||
|
||||
return trackCompareResult
|
||||
|
||||
|
||||
def generateDispositionTokens(self):
|
||||
"""
|
||||
#Example: -disposition:s:0 default -disposition:s:1 0
|
||||
"""
|
||||
dispositionTokens = []
|
||||
|
||||
# if MediaDescriptorChangeSet.TRACKS_KEY in self.__changeSetObj.keys():
|
||||
#
|
||||
# if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||
# addedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY]
|
||||
# trackDescriptor: TrackDescriptor
|
||||
# for trackDescriptor in addedTracks.values():
|
||||
#
|
||||
# dispositionSet = trackDescriptor.getDispositionSet()
|
||||
#
|
||||
# if dispositionSet:
|
||||
# dispositionTokens += [f"-disposition:{trackDescriptor.getType().indicator()}:{trackDescriptor.getSubIndex()}",
|
||||
# '+'.join([d.label() for d in dispositionSet])]
|
||||
#
|
||||
# if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||
# changedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
|
||||
# trackDiffObj: dict
|
||||
#
|
||||
#
|
||||
# for trackIndex, trackDiffObj in changedTracks.items():
|
||||
#
|
||||
# if MediaDescriptorChangeSet.DISPOSITION_SET_KEY in trackDiffObj.keys():
|
||||
#
|
||||
# dispositionDiffObj: dict = trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY]
|
||||
#
|
||||
# addedDispositions = dispositionDiffObj[DIFF_ADDED_KEY] if DIFF_ADDED_KEY in dispositionDiffObj.keys() else set()
|
||||
# removedDispositions = dispositionDiffObj[DIFF_REMOVED_KEY] if DIFF_REMOVED_KEY in dispositionDiffObj.keys() else set()
|
||||
# unchangedDispositions = dispositionDiffObj[DIFF_UNCHANGED_KEY] if DIFF_UNCHANGED_KEY in dispositionDiffObj.keys() else set()
|
||||
#
|
||||
# targetDispositions = addedDispositions | unchangedDispositions
|
||||
#
|
||||
# trackDescriptor = self.__targetTrackDescriptors[trackIndex]
|
||||
# streamIndicator = trackDescriptor.getType().indicator()
|
||||
# subIndex = trackDescriptor.getSubIndex()
|
||||
#
|
||||
# if targetDispositions:
|
||||
# dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in targetDispositions])]
|
||||
# # if not targetDispositions and removedDispositions:
|
||||
# else:
|
||||
# dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
|
||||
for ttd in self.__targetTrackDescriptors:
|
||||
|
||||
targetDispositions = ttd.getDispositionSet()
|
||||
streamIndicator = ttd.getType().indicator()
|
||||
subIndex = ttd.getSubIndex()
|
||||
|
||||
if targetDispositions:
|
||||
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '+'.join([d.label() for d in targetDispositions])]
|
||||
# if not targetDispositions and removedDispositions:
|
||||
else:
|
||||
dispositionTokens += [f"-disposition:{streamIndicator}:{subIndex}", '0']
|
||||
|
||||
return dispositionTokens
|
||||
|
||||
|
||||
def generateMetadataTokens(self):
|
||||
|
||||
metadataTokens = []
|
||||
|
||||
if MediaDescriptorChangeSet.TAGS_KEY in self.__changeSetObj.keys():
|
||||
|
||||
addedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY]
|
||||
if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||
removedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY]
|
||||
if DIFF_REMOVED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||
changedMediaTags = (self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY]
|
||||
if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||
|
||||
outputMediaTags = addedMediaTags | changedMediaTags
|
||||
|
||||
if (not 'no_signature' in self.__context.keys()
|
||||
or not self.__context['no_signature']):
|
||||
outputMediaTags = outputMediaTags | self.__signatureTags
|
||||
|
||||
# outputMediaTags = {k:v for k,v in outputMediaTags.items() if k not in self.__removeGlobalKeys}
|
||||
|
||||
for tagKey, tagValue in outputMediaTags.items():
|
||||
metadataTokens += [f"-metadata:g",
|
||||
f"{tagKey}={tagValue}"]
|
||||
|
||||
for tagKey, tagValue in changedMediaTags.items():
|
||||
metadataTokens += [f"-metadata:g",
|
||||
f"{tagKey}={tagValue}"]
|
||||
|
||||
for removeKey in removedMediaTags.keys():
|
||||
metadataTokens += [f"-metadata:g",
|
||||
f"{removeKey}="]
|
||||
|
||||
|
||||
if MediaDescriptorChangeSet.TRACKS_KEY in self.__changeSetObj.keys():
|
||||
|
||||
if DIFF_ADDED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||
addedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY]
|
||||
trackDescriptor: TrackDescriptor
|
||||
for trackDescriptor in addedTracks.values():
|
||||
for tagKey, tagValue in trackDescriptor.getTags().items():
|
||||
if not tagKey in self.__removeTrackKeys:
|
||||
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
|
||||
+ f":{trackDescriptor.getSubIndex()}",
|
||||
f"{tagKey}={tagValue}"]
|
||||
|
||||
if DIFF_CHANGED_KEY in self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||
changedTracks: dict = self.__changeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
|
||||
trackDiffObj: dict
|
||||
for trackIndex, trackDiffObj in changedTracks.items():
|
||||
|
||||
if MediaDescriptorChangeSet.TAGS_KEY in trackDiffObj.keys():
|
||||
|
||||
tagsDiffObj = trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY]
|
||||
|
||||
addedTrackTags = tagsDiffObj[DIFF_ADDED_KEY] if DIFF_ADDED_KEY in tagsDiffObj.keys() else {}
|
||||
changedTrackTags = tagsDiffObj[DIFF_CHANGED_KEY] if DIFF_CHANGED_KEY in tagsDiffObj.keys() else {}
|
||||
unchangedTrackTags = tagsDiffObj[DIFF_UNCHANGED_KEY] if DIFF_UNCHANGED_KEY in tagsDiffObj.keys() else {}
|
||||
removedTrackTags = tagsDiffObj[DIFF_REMOVED_KEY] if DIFF_REMOVED_KEY in tagsDiffObj.keys() else {}
|
||||
|
||||
outputTrackTags = addedTrackTags | changedTrackTags
|
||||
|
||||
trackDescriptor = self.__targetTrackDescriptors[trackIndex]
|
||||
|
||||
for tagKey, tagValue in outputTrackTags.items():
|
||||
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
|
||||
+ f":{trackDescriptor.getSubIndex()}",
|
||||
f"{tagKey}={tagValue}"]
|
||||
|
||||
for removeKey in removedTrackTags.keys():
|
||||
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
|
||||
+ f":{trackDescriptor.getSubIndex()}",
|
||||
f"{removeKey}="]
|
||||
|
||||
#HINT: In case of loading a track from an external file
|
||||
# no tags from source are present for the track so
|
||||
# the unchanged tracks are passed to the output file as well
|
||||
if trackDescriptor.getExternalSourceFilePath():
|
||||
for tagKey, tagValue in unchangedTrackTags.items():
|
||||
metadataTokens += [f"-metadata:s:{trackDescriptor.getType().indicator()}"
|
||||
+ f":{trackDescriptor.getSubIndex()}",
|
||||
f"{tagKey}={tagValue}"]
|
||||
|
||||
return metadataTokens
|
||||
|
||||
|
||||
def getChangeSetObj(self):
|
||||
return self.__changeSetObj
|
||||
@ -1,757 +0,0 @@
|
||||
import os, click, re
|
||||
|
||||
from textual.screen import Screen
|
||||
from textual.widgets import Header, Footer, Static, Button, Input, DataTable
|
||||
from textual.containers import Grid
|
||||
|
||||
from ffx.audio_layout import AudioLayout
|
||||
|
||||
from .pattern_controller import PatternController
|
||||
from .show_controller import ShowController
|
||||
from .track_controller import TrackController
|
||||
from .tag_controller import TagController
|
||||
|
||||
from .show_details_screen import ShowDetailsScreen
|
||||
from .pattern_details_screen import PatternDetailsScreen
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
from ffx.track_codec import TrackCodec
|
||||
from ffx.model.track import Track
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.show_descriptor import ShowDescriptor
|
||||
|
||||
from textual.widgets._data_table import CellDoesNotExist
|
||||
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
from ffx.file_properties import FileProperties
|
||||
|
||||
from ffx.media_descriptor_change_set import MediaDescriptorChangeSet
|
||||
|
||||
from ffx.helper import formatRichColor, DIFF_ADDED_KEY, DIFF_CHANGED_KEY, DIFF_REMOVED_KEY, DIFF_UNCHANGED_KEY
|
||||
|
||||
|
||||
# Screen[dict[int, str, int]]
|
||||
class MediaDetailsScreen(Screen):
|
||||
|
||||
CSS = """
|
||||
|
||||
Grid {
|
||||
grid-size: 5 8;
|
||||
grid-rows: 8 2 2 2 2 8 2 2 8;
|
||||
grid-columns: 15 25 90 10 105;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
padding: 1;
|
||||
}
|
||||
|
||||
DataTable .datatable--cursor {
|
||||
background: darkorange;
|
||||
color: black;
|
||||
}
|
||||
|
||||
DataTable .datatable--header {
|
||||
background: steelblue;
|
||||
color: white;
|
||||
}
|
||||
|
||||
Input {
|
||||
border: none;
|
||||
}
|
||||
Button {
|
||||
border: none;
|
||||
}
|
||||
|
||||
DataTable {
|
||||
min-height: 40;
|
||||
}
|
||||
|
||||
#toplabel {
|
||||
height: 1;
|
||||
}
|
||||
.two {
|
||||
column-span: 2;
|
||||
}
|
||||
.three {
|
||||
column-span: 3;
|
||||
}
|
||||
|
||||
.four {
|
||||
column-span: 4;
|
||||
}
|
||||
.five {
|
||||
column-span: 5;
|
||||
}
|
||||
|
||||
.triple {
|
||||
row-span: 3;
|
||||
}
|
||||
|
||||
.box {
|
||||
height: 100%;
|
||||
border: solid green;
|
||||
}
|
||||
|
||||
.purple {
|
||||
tint: purple 40%;
|
||||
}
|
||||
|
||||
.yellow {
|
||||
tint: yellow 40%;
|
||||
}
|
||||
|
||||
#differences-table {
|
||||
row-span: 8;
|
||||
/* tint: magenta 40%; */
|
||||
}
|
||||
|
||||
/* #pattern_input {
|
||||
tint: red 40%;
|
||||
}*/
|
||||
"""
|
||||
|
||||
|
||||
TRACKS_TABLE_INDEX_COLUMN_LABEL = "Index"
|
||||
TRACKS_TABLE_TYPE_COLUMN_LABEL = "Type"
|
||||
TRACKS_TABLE_SUB_INDEX_COLUMN_LABEL = "SubIndex"
|
||||
TRACKS_TABLE_CODEC_COLUMN_LABEL = "Codec"
|
||||
TRACKS_TABLE_LAYOUT_COLUMN_LABEL = "Layout"
|
||||
TRACKS_TABLE_LANGUAGE_COLUMN_LABEL = "Language"
|
||||
TRACKS_TABLE_TITLE_COLUMN_LABEL = "Title"
|
||||
TRACKS_TABLE_DEFAULT_COLUMN_LABEL = "Default"
|
||||
TRACKS_TABLE_FORCED_COLUMN_LABEL = "Forced"
|
||||
|
||||
DIFFERENCES_TABLE_DIFFERENCES_COLUMN_LABEL = 'Differences (file->db/output)'
|
||||
|
||||
|
||||
BINDINGS = [
|
||||
("n", "new_pattern", "New Pattern"),
|
||||
("u", "update_pattern", "Update Pattern"),
|
||||
("e", "edit_pattern", "Edit Pattern"),
|
||||
]
|
||||
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
self.context = self.app.getContext()
|
||||
self.Session = self.context['database']['session'] # convenience
|
||||
|
||||
|
||||
self.__configurationData = self.context['config'].getData()
|
||||
|
||||
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
|
||||
|
||||
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
|
||||
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
|
||||
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
|
||||
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
|
||||
if 'streams' in metadataConfiguration.keys()
|
||||
and 'remove' in metadataConfiguration['streams'].keys() else [])
|
||||
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
|
||||
if 'streams' in metadataConfiguration.keys()
|
||||
and 'ignore' in metadataConfiguration['streams'].keys() else [])
|
||||
|
||||
|
||||
self.__pc = PatternController(context = self.context)
|
||||
self.__sc = ShowController(context = self.context)
|
||||
self.__tc = TrackController(context = self.context)
|
||||
self.__tac = TagController(context = self.context)
|
||||
|
||||
if not 'command' in self.context.keys() or self.context['command'] != 'inspect':
|
||||
raise click.ClickException(f"MediaDetailsScreen.__init__(): Can only perform command 'inspect'")
|
||||
|
||||
if not 'arguments' in self.context.keys() or not 'filename' in self.context['arguments'].keys() or not self.context['arguments']['filename']:
|
||||
raise click.ClickException(f"MediaDetailsScreen.__init__(): Argument 'filename' is required to be provided for command 'inspect'")
|
||||
|
||||
self.__mediaFilename = self.context['arguments']['filename']
|
||||
|
||||
if not os.path.isfile(self.__mediaFilename):
|
||||
raise click.ClickException(f"MediaDetailsScreen.__init__(): Media file {self.__mediaFilename} does not exist")
|
||||
|
||||
self.loadProperties()
|
||||
|
||||
|
||||
def removeShow(self, showId : int = -1):
|
||||
"""Remove show entry from DataTable.
|
||||
Removes the <New show> entry if showId is not set"""
|
||||
|
||||
for rowKey, row in self.showsTable.rows.items(): # dict[RowKey, Row]
|
||||
|
||||
rowData = self.showsTable.get_row(rowKey)
|
||||
|
||||
try:
|
||||
if (showId == -1 and rowData[0] == ' '
|
||||
or showId == int(rowData[0])):
|
||||
self.showsTable.remove_row(rowKey)
|
||||
return
|
||||
except:
|
||||
continue
|
||||
|
||||
|
||||
|
||||
def getRowIndexFromShowId(self, showId : int = -1) -> int:
|
||||
"""Find the index of the row where the value in the specified column matches the target_value."""
|
||||
|
||||
for rowKey, row in self.showsTable.rows.items(): # dict[RowKey, Row]
|
||||
|
||||
rowData = self.showsTable.get_row(rowKey)
|
||||
|
||||
try:
|
||||
if ((showId == -1 and rowData[0] == ' ')
|
||||
or showId == int(rowData[0])):
|
||||
return int(self.showsTable.get_row_index(rowKey))
|
||||
except:
|
||||
continue
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def loadProperties(self):
|
||||
|
||||
self.__mediaFileProperties = FileProperties(self.context, self.__mediaFilename)
|
||||
self.__sourceMediaDescriptor = self.__mediaFileProperties.getMediaDescriptor()
|
||||
|
||||
#HINT: This is None if the filename did not match anything in database
|
||||
self.__currentPattern = self.__mediaFileProperties.getPattern()
|
||||
|
||||
# keine tags vorhanden
|
||||
self.__targetMediaDescriptor = self.__currentPattern.getMediaDescriptor(self.context) if self.__currentPattern is not None else None
|
||||
|
||||
# Enumerating differences between media descriptors
|
||||
# from file (=current) vs from stored in database (=target)
|
||||
try:
|
||||
mdcs = MediaDescriptorChangeSet(self.context,
|
||||
self.__targetMediaDescriptor,
|
||||
self.__sourceMediaDescriptor)
|
||||
|
||||
self.__mediaChangeSetObj = mdcs.getChangeSetObj()
|
||||
except ValueError:
|
||||
self.__mediaChangeSetObj = {}
|
||||
|
||||
|
||||
def updateDifferences(self):
|
||||
|
||||
self.loadProperties()
|
||||
|
||||
self.differencesTable.clear()
|
||||
|
||||
|
||||
if MediaDescriptorChangeSet.TAGS_KEY in self.__mediaChangeSetObj.keys():
|
||||
|
||||
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY].items():
|
||||
if tagKey not in self.__ignoreGlobalKeys:
|
||||
row = (f"add media tag: key='{tagKey}' value='{tagValue}'",)
|
||||
self.differencesTable.add_row(*map(str, row))
|
||||
|
||||
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY].items():
|
||||
if tagKey not in self.__ignoreGlobalKeys and tagKey not in self.__removeGlobalKeys:
|
||||
row = (f"remove media tag: key='{tagKey}' value='{tagValue}'",)
|
||||
self.differencesTable.add_row(*map(str, row))
|
||||
|
||||
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||
for tagKey, tagValue in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY].items():
|
||||
if tagKey not in self.__ignoreGlobalKeys:
|
||||
row = (f"change media tag: key='{tagKey}' value='{tagValue}'",)
|
||||
self.differencesTable.add_row(*map(str, row))
|
||||
|
||||
|
||||
if MediaDescriptorChangeSet.TRACKS_KEY in self.__mediaChangeSetObj.keys():
|
||||
|
||||
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||
|
||||
trackDescriptor: TrackDescriptor
|
||||
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY].items():
|
||||
row = (f"add {trackDescriptor.getType().label()} track: index={trackDescriptor.getIndex()} lang={trackDescriptor.getLanguage().threeLetter()}",)
|
||||
self.differencesTable.add_row(*map(str, row))
|
||||
|
||||
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_REMOVED_KEY].items():
|
||||
row = (f"remove stream #{trackIndex}",)
|
||||
self.differencesTable.add_row(*map(str, row))
|
||||
|
||||
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||
|
||||
changedTracks: dict = self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY]
|
||||
|
||||
targetTrackDescriptors = self.__targetMediaDescriptor.getTrackDescriptors()
|
||||
|
||||
trackDiffObj: dict
|
||||
for trackIndex, trackDiffObj in changedTracks.items():
|
||||
|
||||
ttd: TrackDescriptor = targetTrackDescriptors[trackIndex]
|
||||
|
||||
|
||||
if MediaDescriptorChangeSet.TAGS_KEY in trackDiffObj.keys():
|
||||
|
||||
removedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY]
|
||||
if DIFF_REMOVED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||
for tagKey, tagValue in removedTags.items():
|
||||
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) remove key={tagKey} value={tagValue}",)
|
||||
self.differencesTable.add_row(*map(str, row))
|
||||
|
||||
addedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY]
|
||||
if DIFF_ADDED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||
for tagKey, tagValue in addedTags.items():
|
||||
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) add key={tagKey} value={tagValue}",)
|
||||
self.differencesTable.add_row(*map(str, row))
|
||||
|
||||
changedTags = (trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY]
|
||||
if DIFF_CHANGED_KEY in trackDiffObj[MediaDescriptorChangeSet.TAGS_KEY].keys() else {})
|
||||
for tagKey, tagValue in changedTags.items():
|
||||
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) change key={tagKey} value={tagValue}",)
|
||||
self.differencesTable.add_row(*map(str, row))
|
||||
|
||||
|
||||
if MediaDescriptorChangeSet.DISPOSITION_SET_KEY in trackDiffObj.keys():
|
||||
|
||||
addedDispositions = (trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY][DIFF_ADDED_KEY]
|
||||
if DIFF_ADDED_KEY in trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY].keys() else set())
|
||||
for ad in addedDispositions:
|
||||
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) add disposition={ad.label()}",)
|
||||
self.differencesTable.add_row(*map(str, row))
|
||||
|
||||
removedDispositions = (trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY][DIFF_REMOVED_KEY]
|
||||
if DIFF_REMOVED_KEY in trackDiffObj[MediaDescriptorChangeSet.DISPOSITION_SET_KEY].keys() else set())
|
||||
for rd in removedDispositions:
|
||||
row = (f"change stream #{ttd.getIndex()} ({ttd.getType().label()}:{ttd.getSubIndex()}) remove disposition={rd.label()}",)
|
||||
self.differencesTable.add_row(*map(str, row))
|
||||
|
||||
|
||||
def on_mount(self):
|
||||
|
||||
if self.__currentPattern is None:
|
||||
row = (' ', '<New show>', ' ') # Convert each element to a string before adding
|
||||
self.showsTable.add_row(*map(str, row))
|
||||
|
||||
for show in self.__sc.getAllShows():
|
||||
row = (int(show.id), show.name, show.year) # Convert each element to a string before adding
|
||||
self.showsTable.add_row(*map(str, row))
|
||||
|
||||
for mediaTagKey, mediaTagValue in self.__sourceMediaDescriptor.getTags().items():
|
||||
|
||||
textColor = None
|
||||
if mediaTagKey in self.__ignoreGlobalKeys:
|
||||
textColor = 'blue'
|
||||
if mediaTagKey in self.__removeGlobalKeys:
|
||||
textColor = 'red'
|
||||
|
||||
row = (formatRichColor(mediaTagKey, textColor), formatRichColor(mediaTagValue, textColor)) # Convert each element to a string before adding
|
||||
self.mediaTagsTable.add_row(*map(str, row))
|
||||
|
||||
self.updateTracks()
|
||||
|
||||
|
||||
if self.__currentPattern is not None:
|
||||
|
||||
showIdentifier = self.__currentPattern.getShowId()
|
||||
showRowIndex = self.getRowIndexFromShowId(showIdentifier)
|
||||
if showRowIndex is not None:
|
||||
self.showsTable.move_cursor(row=showRowIndex)
|
||||
|
||||
self.query_one("#pattern_input", Input).value = self.__currentPattern.getPattern()
|
||||
|
||||
self.updateDifferences()
|
||||
|
||||
else:
|
||||
|
||||
self.query_one("#pattern_input", Input).value = self.__mediaFilename
|
||||
self.highlightPattern(True)
|
||||
|
||||
|
||||
def highlightPattern(self, state : bool):
|
||||
if state:
|
||||
self.query_one("#pattern_input", Input).styles.background = 'red'
|
||||
else:
|
||||
self.query_one("#pattern_input", Input).styles.background = None
|
||||
|
||||
|
||||
def updateTracks(self):
|
||||
|
||||
self.tracksTable.clear()
|
||||
|
||||
# trackDescriptorList = self.__sourceMediaDescriptor.getAllTrackDescriptors()
|
||||
trackDescriptorList = self.__sourceMediaDescriptor.getTrackDescriptors()
|
||||
|
||||
typeCounter = {}
|
||||
|
||||
for td in trackDescriptorList:
|
||||
|
||||
trackType = td.getType()
|
||||
if not trackType in typeCounter.keys():
|
||||
typeCounter[trackType] = 0
|
||||
|
||||
dispoSet = td.getDispositionSet()
|
||||
audioLayout = td.getAudioLayout()
|
||||
row = (td.getIndex(),
|
||||
trackType.label(),
|
||||
typeCounter[trackType],
|
||||
td.getCodec().label(),
|
||||
audioLayout.label() if trackType == TrackType.AUDIO
|
||||
and audioLayout != AudioLayout.LAYOUT_UNDEFINED else ' ',
|
||||
td.getLanguage().label(),
|
||||
td.getTitle(),
|
||||
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
|
||||
'Yes' if TrackDisposition.FORCED in dispoSet else 'No')
|
||||
|
||||
self.tracksTable.add_row(*map(str, row))
|
||||
|
||||
typeCounter[trackType] += 1
|
||||
|
||||
|
||||
def compose(self):
|
||||
|
||||
# Create the DataTable widget
|
||||
self.showsTable = DataTable(classes="two")
|
||||
|
||||
# Define the columns with headers
|
||||
self.column_key_show_id = self.showsTable.add_column("ID", width=10)
|
||||
self.column_key_show_name = self.showsTable.add_column("Name", width=80)
|
||||
self.column_key_show_year = self.showsTable.add_column("Year", width=10)
|
||||
|
||||
self.showsTable.cursor_type = 'row'
|
||||
|
||||
|
||||
self.mediaTagsTable = DataTable(classes="two")
|
||||
|
||||
# Define the columns with headers
|
||||
self.column_key_track_tag_key = self.mediaTagsTable.add_column("Key", width=30)
|
||||
self.column_key_track_tag_value = self.mediaTagsTable.add_column("Value", width=70)
|
||||
|
||||
self.mediaTagsTable.cursor_type = 'row'
|
||||
|
||||
|
||||
self.tracksTable = DataTable(classes="two")
|
||||
|
||||
# Define the columns with headers
|
||||
self.column_key_track_index = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_INDEX_COLUMN_LABEL, width=5)
|
||||
self.column_key_track_type = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_TYPE_COLUMN_LABEL, width=10)
|
||||
self.column_key_track_sub_index = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_SUB_INDEX_COLUMN_LABEL, width=8)
|
||||
self.column_key_track_codec = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_CODEC_COLUMN_LABEL, width=10)
|
||||
self.column_key_track_layout = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_LAYOUT_COLUMN_LABEL, width=10)
|
||||
self.column_key_track_language = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_LANGUAGE_COLUMN_LABEL, width=15)
|
||||
self.column_key_track_title = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_TITLE_COLUMN_LABEL, width=48)
|
||||
self.column_key_track_default = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_DEFAULT_COLUMN_LABEL, width=8)
|
||||
self.column_key_track_forced = self.tracksTable.add_column(MediaDetailsScreen.TRACKS_TABLE_FORCED_COLUMN_LABEL, width=8)
|
||||
|
||||
self.tracksTable.cursor_type = 'row'
|
||||
|
||||
|
||||
# Create the DataTable widget
|
||||
self.differencesTable = DataTable(id='differences-table') # classes="triple"
|
||||
|
||||
# Define the columns with headers
|
||||
self.column_key_differences = self.differencesTable.add_column(MediaDetailsScreen.DIFFERENCES_TABLE_DIFFERENCES_COLUMN_LABEL, width=100)
|
||||
|
||||
self.differencesTable.cursor_type = 'row'
|
||||
|
||||
yield Header()
|
||||
|
||||
with Grid():
|
||||
|
||||
# 1
|
||||
yield Static("Show")
|
||||
yield self.showsTable
|
||||
yield Static(" ")
|
||||
yield self.differencesTable
|
||||
|
||||
# 2
|
||||
yield Static(" ", classes="four")
|
||||
|
||||
# 3
|
||||
yield Static(" ")
|
||||
yield Button("Substitute", id="pattern_button")
|
||||
yield Static(" ", classes="two")
|
||||
|
||||
# 4
|
||||
yield Static("Pattern")
|
||||
yield Input(type="text", id='pattern_input', classes="two")
|
||||
|
||||
yield Static(" ")
|
||||
|
||||
# 5
|
||||
yield Static(" ", classes="four")
|
||||
|
||||
# 6
|
||||
yield Static("Media Tags")
|
||||
yield self.mediaTagsTable
|
||||
yield Static(" ")
|
||||
|
||||
# 7
|
||||
yield Static(" ", classes="four")
|
||||
|
||||
# 8
|
||||
yield Static(" ")
|
||||
yield Button("Set Default", id="select_default_button")
|
||||
yield Button("Set Forced", id="select_forced_button")
|
||||
yield Static(" ")
|
||||
# 9
|
||||
yield Static("Streams")
|
||||
yield self.tracksTable
|
||||
yield Static(" ")
|
||||
|
||||
yield Footer()
|
||||
|
||||
|
||||
def getPatternObjFromInput(self):
|
||||
"""Returns show id and pattern as obj from corresponding inputs"""
|
||||
patternObj = {}
|
||||
try:
|
||||
patternObj['show_id'] = self.getSelectedShowDescriptor().getId()
|
||||
patternObj['pattern'] = str(self.query_one("#pattern_input", Input).value)
|
||||
except:
|
||||
return {}
|
||||
return patternObj
|
||||
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
|
||||
if event.button.id == "pattern_button":
|
||||
|
||||
pattern = self.query_one("#pattern_input", Input).value
|
||||
|
||||
patternMatch = re.search(FileProperties.SE_INDICATOR_PATTERN, pattern)
|
||||
|
||||
if patternMatch:
|
||||
self.query_one("#pattern_input", Input).value = pattern.replace(patternMatch.group(1), FileProperties.SE_INDICATOR_PATTERN)
|
||||
|
||||
|
||||
if event.button.id == "select_default_button":
|
||||
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
||||
self.__sourceMediaDescriptor.setDefaultSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
|
||||
self.updateTracks()
|
||||
|
||||
if event.button.id == "select_forced_button":
|
||||
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
||||
self.__sourceMediaDescriptor.setForcedSubTrack(selectedTrackDescriptor.getType(), selectedTrackDescriptor.getSubIndex())
|
||||
self.updateTracks()
|
||||
|
||||
|
||||
def getSelectedTrackDescriptor(self):
|
||||
"""Returns a partial track descriptor"""
|
||||
try:
|
||||
|
||||
# Fetch the currently selected row when 'Enter' is pressed
|
||||
#selected_row_index = self.table.cursor_row
|
||||
row_key, col_key = self.tracksTable.coordinate_to_cell_key(self.tracksTable.cursor_coordinate)
|
||||
|
||||
if row_key is not None:
|
||||
selected_track_data = self.tracksTable.get_row(row_key)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self.context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = int(selected_track_data[0])
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.fromLabel(selected_track_data[1])
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = int(selected_track_data[2])
|
||||
kwargs[TrackDescriptor.CODEC_KEY] = TrackCodec.fromLabel(selected_track_data[3])
|
||||
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = AudioLayout.fromLabel(selected_track_data[4])
|
||||
|
||||
return TrackDescriptor(**kwargs)
|
||||
else:
|
||||
return None
|
||||
|
||||
except CellDoesNotExist:
|
||||
return None
|
||||
|
||||
|
||||
def getSelectedShowDescriptor(self) -> ShowDescriptor:
|
||||
|
||||
try:
|
||||
|
||||
row_key, col_key = self.showsTable.coordinate_to_cell_key(self.showsTable.cursor_coordinate)
|
||||
|
||||
if row_key is not None:
|
||||
selected_row_data = self.showsTable.get_row(row_key)
|
||||
|
||||
try:
|
||||
kwargs = {}
|
||||
|
||||
kwargs[ShowDescriptor.ID_KEY] = int(selected_row_data[0])
|
||||
kwargs[ShowDescriptor.NAME_KEY] = str(selected_row_data[1])
|
||||
kwargs[ShowDescriptor.YEAR_KEY] = int(selected_row_data[2])
|
||||
|
||||
return ShowDescriptor(**kwargs)
|
||||
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
except CellDoesNotExist:
|
||||
return None
|
||||
|
||||
|
||||
def handle_new_pattern(self, showDescriptor: ShowDescriptor):
|
||||
""""""
|
||||
|
||||
if type(showDescriptor) is not ShowDescriptor:
|
||||
raise TypeError("MediaDetailsScreen.handle_new_pattern(): Argument 'showDescriptor' has to be of type ShowDescriptor")
|
||||
|
||||
self.removeShow()
|
||||
|
||||
showRowIndex = self.getRowIndexFromShowId(showDescriptor.getId())
|
||||
if showRowIndex is None:
|
||||
show = (showDescriptor.getId(), showDescriptor.getName(), showDescriptor.getYear())
|
||||
self.showsTable.add_row(*map(str, show))
|
||||
|
||||
showRowIndex = self.getRowIndexFromShowId(showDescriptor.getId())
|
||||
if showRowIndex is not None:
|
||||
self.showsTable.move_cursor(row=showRowIndex)
|
||||
|
||||
patternObj = self.getPatternObjFromInput()
|
||||
|
||||
if patternObj:
|
||||
patternId = self.__pc.addPattern(patternObj)
|
||||
if patternId:
|
||||
self.highlightPattern(False)
|
||||
|
||||
for tagKey, tagValue in self.__sourceMediaDescriptor.getTags().items():
|
||||
|
||||
# Filter tags that make no sense to preserve
|
||||
if tagKey not in self.__ignoreGlobalKeys and not tagKey in self.__removeGlobalKeys:
|
||||
self.__tac.updateMediaTag(patternId, tagKey, tagValue)
|
||||
|
||||
# for trackDescriptor in self.__sourceMediaDescriptor.getAllTrackDescriptors():
|
||||
for trackDescriptor in self.__sourceMediaDescriptor.getTrackDescriptors():
|
||||
self.__tc.addTrack(trackDescriptor, patternId = patternId)
|
||||
|
||||
|
||||
def action_new_pattern(self):
|
||||
"""Adding new patterns
|
||||
|
||||
If the corresponding show does not exists in DB it is added beforehand"""
|
||||
|
||||
selectedShowDescriptor = self.getSelectedShowDescriptor()
|
||||
|
||||
#HINT: Callback is invoked after this method has exited. As a workaround the callback is executed directly
|
||||
# from here with a mock-up screen result containing the necessary part of keys to perform correctly.
|
||||
if selectedShowDescriptor is None:
|
||||
self.app.push_screen(ShowDetailsScreen(), self.handle_new_pattern)
|
||||
else:
|
||||
self.handle_new_pattern(selectedShowDescriptor)
|
||||
|
||||
|
||||
def action_update_pattern(self):
|
||||
"""Updating patterns
|
||||
|
||||
When updating the database the actions must reverse the difference (eq to diff db->file)"""
|
||||
|
||||
if self.__currentPattern is not None:
|
||||
patternObj = self.getPatternObjFromInput()
|
||||
if (patternObj
|
||||
and self.__currentPattern.getPattern() != patternObj['pattern']):
|
||||
return self.__pc.updatePattern(self.__currentPattern.getId(), patternObj)
|
||||
|
||||
self.loadProperties()
|
||||
|
||||
# __mediaChangeSetObj is file vs database
|
||||
if MediaDescriptorChangeSet.TAGS_KEY in self.__mediaChangeSetObj.keys():
|
||||
|
||||
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||
for addedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_ADDED_KEY].keys():
|
||||
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} addedTagKey={addedTagKey}")
|
||||
self.__tac.deleteMediaTagByKey(self.__currentPattern.getId(), addedTagKey)
|
||||
|
||||
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||
for removedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_REMOVED_KEY].keys():
|
||||
currentTags = self.__sourceMediaDescriptor.getTags()
|
||||
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} removedTagKey={removedTagKey} currentTags={currentTags[removedTagKey]}")
|
||||
self.__tac.updateMediaTag(self.__currentPattern.getId(), removedTagKey, currentTags[removedTagKey])
|
||||
|
||||
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY].keys():
|
||||
for changedTagKey in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TAGS_KEY][DIFF_CHANGED_KEY].keys():
|
||||
currentTags = self.__sourceMediaDescriptor.getTags()
|
||||
# click.ClickException(f"delete media tag patternId={self.__currentPattern.getId()} changedTagKey={changedTagKey} currentTags={currentTags[changedTagKey]}")
|
||||
self.__tac.updateMediaTag(self.__currentPattern.getId(), changedTagKey, currentTags[changedTagKey])
|
||||
|
||||
if MediaDescriptorChangeSet.TRACKS_KEY in self.__mediaChangeSetObj.keys():
|
||||
|
||||
if DIFF_ADDED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||
|
||||
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_ADDED_KEY].items():
|
||||
#targetTracks = [t for t in self.__targetMediaDescriptor.getAllTrackDescriptors() if t.getIndex() == addedTrackIndex]
|
||||
# if targetTracks:
|
||||
# self.__tc.deleteTrack(targetTracks[0].getId()) # id
|
||||
# self.__tc.deleteTrack(targetTracks[0].getId())
|
||||
self.__tc.addTrack(trackDescriptor, patternId = self.__currentPattern.getId())
|
||||
|
||||
if DIFF_REMOVED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||
trackDescriptor: TrackDescriptor
|
||||
for trackIndex, trackDescriptor in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_REMOVED_KEY].items():
|
||||
# Track per inspect/update hinzufügen
|
||||
#self.__tc.addTrack(removedTrack, patternId = self.__currentPattern.getId())
|
||||
self.__tc.deleteTrack(trackDescriptor.getId())
|
||||
|
||||
if DIFF_CHANGED_KEY in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY].keys():
|
||||
|
||||
# [vsTracks[tp].getIndex()] = trackDiff
|
||||
for trackIndex, trackDiff in self.__mediaChangeSetObj[MediaDescriptorChangeSet.TRACKS_KEY][DIFF_CHANGED_KEY].items():
|
||||
|
||||
targetTracks = [t for t in self.__targetMediaDescriptor.getTrackDescriptors() if t.getIndex() == trackIndex]
|
||||
targetTrackId = targetTracks[0].getId() if targetTracks else None
|
||||
targetTrackIndex = targetTracks[0].getIndex() if targetTracks else None
|
||||
|
||||
changedCurrentTracks = [t for t in self.__sourceMediaDescriptor.getTrackDescriptors() if t.getIndex() == trackIndex]
|
||||
# changedCurrentTrackId #HINT: Undefined as track descriptors do not come from file with track_id
|
||||
|
||||
if TrackDescriptor.TAGS_KEY in trackDiff.keys():
|
||||
tagsDiff = trackDiff[TrackDescriptor.TAGS_KEY]
|
||||
|
||||
if DIFF_ADDED_KEY in tagsDiff.keys():
|
||||
for tagKey, tagValue in tagsDiff[DIFF_ADDED_KEY].items():
|
||||
|
||||
# if targetTracks:
|
||||
# self.__tac.deleteTrackTagByKey(targetTrackId, addedTrackTagKey)
|
||||
self.__tac.updateTrackTag(targetTrackId, tagKey, tagValue)
|
||||
|
||||
|
||||
if DIFF_REMOVED_KEY in tagsDiff.keys():
|
||||
for tagKey, tagValue in tagsDiff[DIFF_REMOVED_KEY].items():
|
||||
# if changedCurrentTracks:
|
||||
# self.__tac.updateTrackTag(targetTrackId, removedTrackTagKey, changedCurrentTracks[0].getTags()[removedTrackTagKey])
|
||||
self.__tac.deleteTrackTagByKey(targetTrackId, tagKey)
|
||||
|
||||
if DIFF_CHANGED_KEY in tagsDiff.keys():
|
||||
for tagKey, tagValue in tagsDiff[DIFF_CHANGED_KEY].items():
|
||||
# if changedCurrentTracks:
|
||||
# self.__tac.updateTrackTag(targetTrackId, changedTrackTagKey, changedCurrentTracks[0].getTags()[changedTrackTagKey])
|
||||
self.__tac.updateTrackTag(targetTrackId, tagKey, tagValue)
|
||||
|
||||
|
||||
if TrackDescriptor.DISPOSITION_SET_KEY in trackDiff.keys():
|
||||
changedTrackDispositionDiff = trackDiff[TrackDescriptor.DISPOSITION_SET_KEY]
|
||||
|
||||
if DIFF_ADDED_KEY in changedTrackDispositionDiff.keys():
|
||||
for changedDisposition in changedTrackDispositionDiff[DIFF_ADDED_KEY]:
|
||||
if targetTrackIndex is not None:
|
||||
self.__tc.setDispositionState(self.__currentPattern.getId(), targetTrackIndex, changedDisposition, True)
|
||||
|
||||
if DIFF_REMOVED_KEY in changedTrackDispositionDiff.keys():
|
||||
for changedDisposition in changedTrackDispositionDiff[DIFF_REMOVED_KEY]:
|
||||
if targetTrackIndex is not None:
|
||||
self.__tc.setDispositionState(self.__currentPattern.getId(), targetTrackIndex, changedDisposition, False)
|
||||
|
||||
|
||||
self.updateDifferences()
|
||||
|
||||
|
||||
|
||||
def action_edit_pattern(self):
|
||||
|
||||
patternObj = self.getPatternObjFromInput()
|
||||
|
||||
if patternObj['pattern']:
|
||||
|
||||
selectedPatternId = self.__pc.findPattern(patternObj)
|
||||
|
||||
if selectedPatternId is None:
|
||||
raise click.ClickException(f"MediaDetailsScreen.action_edit_pattern(): Pattern to edit has no id")
|
||||
|
||||
self.app.push_screen(PatternDetailsScreen(patternId = selectedPatternId, showId = self.getSelectedShowDescriptor().getId()), self.handle_edit_pattern) # <-
|
||||
|
||||
|
||||
def handle_edit_pattern(self, screenResult):
|
||||
self.query_one("#pattern_input", Input).value = screenResult['pattern']
|
||||
self.updateDifferences()
|
||||
|
||||
@ -1,47 +0,0 @@
|
||||
import os, sys, importlib, inspect, glob, re
|
||||
|
||||
from ffx.configuration_controller import ConfigurationController
|
||||
from ffx.database import databaseContext
|
||||
|
||||
from sqlalchemy import Engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
|
||||
class Conversion():
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self._context = {}
|
||||
self._context['config'] = ConfigurationController()
|
||||
|
||||
self._context['database'] = databaseContext(databasePath=self._context['config'].getDatabaseFilePath())
|
||||
|
||||
self.__databaseSession: sessionmaker = self._context['database']['session']
|
||||
self.__databaseEngine: Engine = self._context['database']['engine']
|
||||
|
||||
|
||||
@staticmethod
|
||||
def list():
|
||||
|
||||
basePath = os.path.dirname(__file__)
|
||||
|
||||
filenamePattern = re.compile("conversion_([0-9]+)_([0-9]+)\\.py")
|
||||
|
||||
filenameList = [os.path.basename(fp) for fp in glob.glob(f"{ basePath }/*.py") if fp != __file__]
|
||||
|
||||
versionTupleList = [(fm.group(1), fm.group(2)) for fn in filenameList if (fm := filenamePattern.search(fn))]
|
||||
|
||||
return versionTupleList
|
||||
|
||||
|
||||
@staticmethod
|
||||
def getClassReference(versionFrom, versionTo):
|
||||
importlib.import_module(f"ffx.model.conversions.conversion_{ versionFrom }_{ versionTo }")
|
||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.model.conversions.conversion_{ versionFrom }_{ versionTo }"]):
|
||||
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
||||
if inspect.isclass(obj) and name != 'Conversion' and name.startswith('Conversion'):
|
||||
return obj
|
||||
|
||||
@staticmethod
|
||||
def getAllClassReferences():
|
||||
return [Conversion.getClassReference(verFrom, verTo) for verFrom, verTo in Conversion.list()]
|
||||
@ -1,17 +0,0 @@
|
||||
import os, sys, importlib, inspect, glob, re
|
||||
|
||||
from .conversion import Conversion
|
||||
|
||||
|
||||
class Conversion_2_3(Conversion):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def applyConversion(self):
|
||||
|
||||
s = self.__databaseSession()
|
||||
e = self.__databaseEngine
|
||||
|
||||
with e.connect() as c:
|
||||
c.execute("ALTER TABLE user ADD COLUMN email VARCHAR(255)")
|
||||
@ -1,7 +0,0 @@
|
||||
import os, sys, importlib, inspect, glob, re
|
||||
|
||||
from .conversion import Conversion
|
||||
|
||||
|
||||
class Conversion_3_4(Conversion):
|
||||
pass
|
||||
@ -1,28 +0,0 @@
|
||||
# from typing import List
|
||||
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Enum
|
||||
from sqlalchemy.orm import relationship, declarative_base, sessionmaker
|
||||
|
||||
from .show import Base
|
||||
|
||||
|
||||
class MediaTag(Base):
|
||||
"""
|
||||
relationship(argument, opt1, opt2, ...)
|
||||
argument is string of class or Mapped class of the target entity
|
||||
backref creates a bi-directional corresponding relationship (back_populates preferred)
|
||||
back_populates points to the corresponding relationship (the actual class attribute identifier)
|
||||
|
||||
See: https://docs.sqlalchemy.org/en/(14|20)/orm/basic_relationships.html
|
||||
"""
|
||||
|
||||
__tablename__ = 'media_tags'
|
||||
|
||||
# v1.x
|
||||
id = Column(Integer, primary_key=True)
|
||||
|
||||
key = Column(String)
|
||||
value = Column(String)
|
||||
|
||||
# v1.x
|
||||
pattern_id = Column(Integer, ForeignKey('patterns.id', ondelete="CASCADE"))
|
||||
pattern = relationship('Pattern', back_populates='media_tags')
|
||||
@ -1,78 +0,0 @@
|
||||
import click
|
||||
|
||||
from sqlalchemy import Column, Integer, String, ForeignKey
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from .show import Base, Show
|
||||
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
from ffx.show_descriptor import ShowDescriptor
|
||||
|
||||
|
||||
class Pattern(Base):
|
||||
|
||||
__tablename__ = 'patterns'
|
||||
|
||||
# v1.x
|
||||
id = Column(Integer, primary_key=True)
|
||||
pattern = Column(String)
|
||||
|
||||
# v2.0
|
||||
# id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
# pattern: Mapped[str] = mapped_column(String, nullable=False)
|
||||
|
||||
# v1.x
|
||||
show_id = Column(Integer, ForeignKey('shows.id', ondelete="CASCADE"))
|
||||
show = relationship(Show, back_populates='patterns', lazy='joined')
|
||||
|
||||
# v2.0
|
||||
# show_id: Mapped[int] = mapped_column(ForeignKey("shows.id", ondelete="CASCADE"))
|
||||
# show: Mapped["Show"] = relationship(back_populates="patterns")
|
||||
|
||||
tracks = relationship('Track', back_populates='pattern', cascade="all, delete", lazy='joined')
|
||||
|
||||
media_tags = relationship('MediaTag', back_populates='pattern', cascade="all, delete", lazy='joined')
|
||||
|
||||
quality = Column(Integer, default=0)
|
||||
|
||||
|
||||
|
||||
def getId(self):
|
||||
return int(self.id)
|
||||
|
||||
def getShowId(self):
|
||||
return int(self.show_id)
|
||||
|
||||
def getShowDescriptor(self, context) -> ShowDescriptor:
|
||||
# click.echo(f"self.show {self.show} id={self.show_id}")
|
||||
return self.show.getDescriptor(context)
|
||||
|
||||
def getId(self):
|
||||
return int(self.id)
|
||||
|
||||
def getPattern(self):
|
||||
return str(self.pattern)
|
||||
|
||||
def getTags(self):
|
||||
return {str(t.key):str(t.value) for t in self.media_tags}
|
||||
|
||||
|
||||
def getMediaDescriptor(self, context):
|
||||
|
||||
kwargs = {}
|
||||
|
||||
kwargs[MediaDescriptor.CONTEXT_KEY] = context
|
||||
|
||||
kwargs[MediaDescriptor.TAGS_KEY] = self.getTags()
|
||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = []
|
||||
|
||||
# Set ordered subindices
|
||||
subIndexCounter = {}
|
||||
for track in self.tracks:
|
||||
trackType = track.getType()
|
||||
if not trackType in subIndexCounter.keys():
|
||||
subIndexCounter[trackType] = 0
|
||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY].append(track.getDescriptor(context, subIndex = subIndexCounter[trackType]))
|
||||
subIndexCounter[trackType] += 1
|
||||
|
||||
return MediaDescriptor(**kwargs)
|
||||
@ -1,16 +0,0 @@
|
||||
# from typing import List
|
||||
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Enum
|
||||
from sqlalchemy.orm import relationship, declarative_base, sessionmaker
|
||||
|
||||
from .show import Base
|
||||
|
||||
|
||||
class Property(Base):
|
||||
|
||||
__tablename__ = 'properties'
|
||||
|
||||
# v1.x
|
||||
id = Column(Integer, primary_key=True)
|
||||
|
||||
key = Column(String)
|
||||
value = Column(String)
|
||||
@ -1,71 +0,0 @@
|
||||
import click
|
||||
|
||||
from sqlalchemy import Column, Integer, ForeignKey
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from .show import Base, Show
|
||||
|
||||
|
||||
class ShiftedSeason(Base):
|
||||
|
||||
__tablename__ = 'shifted_seasons'
|
||||
|
||||
# v1.x
|
||||
id = Column(Integer, primary_key=True)
|
||||
|
||||
|
||||
# v2.0
|
||||
# id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
# pattern: Mapped[str] = mapped_column(String, nullable=False)
|
||||
|
||||
# v1.x
|
||||
show_id = Column(Integer, ForeignKey('shows.id', ondelete="CASCADE"))
|
||||
show = relationship(Show, back_populates='shifted_seasons', lazy='joined')
|
||||
|
||||
# v2.0
|
||||
# show_id: Mapped[int] = mapped_column(ForeignKey("shows.id", ondelete="CASCADE"))
|
||||
# show: Mapped["Show"] = relationship(back_populates="patterns")
|
||||
|
||||
|
||||
original_season = Column(Integer)
|
||||
|
||||
first_episode = Column(Integer, default = -1)
|
||||
last_episode = Column(Integer, default = -1)
|
||||
|
||||
season_offset = Column(Integer, default = 0)
|
||||
episode_offset = Column(Integer, default = 0)
|
||||
|
||||
|
||||
def getId(self):
|
||||
return self.id
|
||||
|
||||
|
||||
def getOriginalSeason(self):
|
||||
return self.original_season
|
||||
|
||||
def getFirstEpisode(self):
|
||||
return self.first_episode
|
||||
|
||||
def getLastEpisode(self):
|
||||
return self.last_episode
|
||||
|
||||
|
||||
def getSeasonOffset(self):
|
||||
return self.season_offset
|
||||
|
||||
def getEpisodeOffset(self):
|
||||
return self.episode_offset
|
||||
|
||||
|
||||
def getObj(self):
|
||||
|
||||
shiftedSeasonObj = {}
|
||||
|
||||
shiftedSeasonObj['original_season'] = self.getOriginalSeason()
|
||||
shiftedSeasonObj['first_episode'] = self.getFirstEpisode()
|
||||
shiftedSeasonObj['last_episode'] = self.getLastEpisode()
|
||||
shiftedSeasonObj['season_offset'] = self.getSeasonOffset()
|
||||
shiftedSeasonObj['episode_offset'] = self.getEpisodeOffset()
|
||||
|
||||
return shiftedSeasonObj
|
||||
|
||||
@ -1,62 +0,0 @@
|
||||
# from typing import List
|
||||
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
|
||||
from sqlalchemy.orm import relationship, declarative_base, sessionmaker
|
||||
|
||||
from ffx.show_descriptor import ShowDescriptor
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
class Show(Base):
|
||||
"""
|
||||
relationship(argument, opt1, opt2, ...)
|
||||
argument is string of class or Mapped class of the target entity
|
||||
backref creates a bi-directional corresponding relationship (back_populates preferred)
|
||||
back_populates points to the corresponding relationship (the actual class attribute identifier)
|
||||
|
||||
See: https://docs.sqlalchemy.org/en/(14|20)/orm/basic_relationships.html
|
||||
"""
|
||||
|
||||
__tablename__ = 'shows'
|
||||
|
||||
# v1.x
|
||||
id = Column(Integer, primary_key=True)
|
||||
|
||||
name = Column(String)
|
||||
year = Column(Integer)
|
||||
|
||||
# v2.0
|
||||
# id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
# name: Mapped[str] = mapped_column(String, nullable=False)
|
||||
# year: Mapped[int] = mapped_column(Integer, nullable=False)
|
||||
|
||||
# v1.x
|
||||
#patterns = relationship('Pattern', back_populates='show', cascade="all, delete", passive_deletes=True)
|
||||
patterns = relationship('Pattern', back_populates='show', cascade="all, delete")
|
||||
# patterns = relationship('Pattern', back_populates='show', cascade="all")
|
||||
|
||||
# v2.0
|
||||
# patterns: Mapped[List["Pattern"]] = relationship(back_populates="show", cascade="all, delete")
|
||||
|
||||
shifted_seasons = relationship('ShiftedSeason', back_populates='show', cascade="all, delete")
|
||||
|
||||
|
||||
index_season_digits = Column(Integer, default=ShowDescriptor.DEFAULT_INDEX_SEASON_DIGITS)
|
||||
index_episode_digits = Column(Integer, default=ShowDescriptor.DEFAULT_INDEX_EPISODE_DIGITS)
|
||||
indicator_season_digits = Column(Integer, default=ShowDescriptor.DEFAULT_INDICATOR_SEASON_DIGITS)
|
||||
indicator_episode_digits = Column(Integer, default=ShowDescriptor.DEFAULT_INDICATOR_EPISODE_DIGITS)
|
||||
|
||||
|
||||
def getDescriptor(self, context):
|
||||
|
||||
kwargs = {}
|
||||
kwargs[ShowDescriptor.CONTEXT_KEY] = context
|
||||
kwargs[ShowDescriptor.ID_KEY] = int(self.id)
|
||||
kwargs[ShowDescriptor.NAME_KEY] = str(self.name)
|
||||
kwargs[ShowDescriptor.YEAR_KEY] = int(self.year)
|
||||
kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY] = int(self.index_season_digits)
|
||||
kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY] = int(self.index_episode_digits)
|
||||
kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY] = int(self.indicator_season_digits)
|
||||
kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY] = int(self.indicator_episode_digits)
|
||||
|
||||
return ShowDescriptor(**kwargs)
|
||||
@ -1,216 +0,0 @@
|
||||
# from typing import List
|
||||
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
|
||||
from sqlalchemy.orm import relationship, declarative_base, sessionmaker
|
||||
|
||||
from .show import Base
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.iso_language import IsoLanguage
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
|
||||
from ffx.audio_layout import AudioLayout
|
||||
from ffx.track_codec import TrackCodec
|
||||
|
||||
|
||||
class Track(Base):
|
||||
"""
|
||||
relationship(argument, opt1, opt2, ...)
|
||||
argument is string of class or Mapped class of the target entity
|
||||
backref creates a bi-directional corresponding relationship (back_populates preferred)
|
||||
back_populates points to the corresponding relationship (the actual class attribute identifier)
|
||||
|
||||
See: https://docs.sqlalchemy.org/en/(14|20)/orm/basic_relationships.html
|
||||
"""
|
||||
|
||||
__tablename__ = 'tracks'
|
||||
|
||||
# v1.x
|
||||
id = Column(Integer, primary_key=True, autoincrement = True)
|
||||
|
||||
# P=pattern_id+sub_index+track_type
|
||||
track_type = Column(Integer) # TrackType
|
||||
|
||||
index = Column(Integer)
|
||||
source_index = Column(Integer)
|
||||
|
||||
# v1.x
|
||||
pattern_id = Column(Integer, ForeignKey('patterns.id', ondelete="CASCADE"))
|
||||
pattern = relationship('Pattern', back_populates='tracks')
|
||||
|
||||
track_tags = relationship('TrackTag', back_populates='track', cascade="all, delete", lazy="joined")
|
||||
|
||||
disposition_flags = Column(Integer)
|
||||
|
||||
codec_name = Column(String)
|
||||
audio_layout = Column(Integer)
|
||||
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
trackType = kwargs.pop('track_type', None)
|
||||
if trackType is not None:
|
||||
self.track_type = int(trackType)
|
||||
|
||||
dispositionSet = kwargs.pop(TrackDescriptor.DISPOSITION_SET_KEY, set())
|
||||
self.disposition_flags = int(TrackDisposition.toFlags(dispositionSet))
|
||||
|
||||
super().__init__(**kwargs)
|
||||
|
||||
|
||||
@classmethod
|
||||
def fromFfprobeStreamObj(cls, streamObj, patternId):
|
||||
"""{
|
||||
'index': 4,
|
||||
'codec_name': 'hdmv_pgs_subtitle',
|
||||
'codec_long_name': 'HDMV Presentation Graphic Stream subtitles',
|
||||
'codec_type': 'subtitle',
|
||||
'codec_tag_string': '[0][0][0][0]',
|
||||
'codec_tag': '0x0000',
|
||||
'r_frame_rate': '0/0',
|
||||
'avg_frame_rate': '0/0',
|
||||
'time_base': '1/1000',
|
||||
'start_pts': 0,
|
||||
'start_time': '0.000000',
|
||||
'duration_ts': 1421035,
|
||||
'duration': '1421.035000',
|
||||
'disposition': {
|
||||
'default': 1,
|
||||
'dub': 0,
|
||||
'original': 0,
|
||||
'comment': 0,
|
||||
'lyrics': 0,
|
||||
'karaoke': 0,
|
||||
'forced': 0,
|
||||
'hearing_impaired': 0,
|
||||
'visual_impaired': 0,
|
||||
'clean_effects': 0,
|
||||
'attached_pic': 0,
|
||||
'timed_thumbnails': 0,
|
||||
'non_diegetic': 0,
|
||||
'captions': 0,
|
||||
'descriptions': 0,
|
||||
'metadata': 0,
|
||||
'dependent': 0,
|
||||
'still_image': 0
|
||||
},
|
||||
'tags': {
|
||||
'language': 'ger',
|
||||
'title': 'German Full'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# v1.x
|
||||
id = Column(Integer, primary_key=True, autoincrement = True)
|
||||
|
||||
# P=pattern_id+sub_index+track_type
|
||||
track_type = Column(Integer) # TrackType
|
||||
sub_index = Column(Integer)
|
||||
|
||||
# v1.x
|
||||
pattern_id = Column(Integer, ForeignKey('patterns.id', ondelete='CASCADE'))
|
||||
pattern = relationship('Pattern', back_populates='tracks')
|
||||
|
||||
|
||||
language = Column(String) # IsoLanguage threeLetter
|
||||
title = Column(String)
|
||||
|
||||
|
||||
track_tags = relationship('TrackTag', back_populates='track', cascade='all, delete')
|
||||
|
||||
|
||||
disposition_flags = Column(Integer)
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
||||
trackType = streamObj[TrackDescriptor.FFPROBE_CODEC_TYPE_KEY]
|
||||
|
||||
if trackType in [t.label() for t in TrackType]:
|
||||
|
||||
return cls(pattern_id = patternId,
|
||||
track_type = trackType,
|
||||
codec_name = streamObj[TrackDescriptor.FFPROBE_CODEC_NAME_KEY],
|
||||
disposition_flags = sum([2**t.index() for (k,v) in streamObj[TrackDescriptor.FFPROBE_DISPOSITION_KEY].items()
|
||||
if v and (t := TrackDisposition.find(k)) is not None]),
|
||||
audio_layout = AudioLayout.identify(streamObj))
|
||||
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def getId(self):
|
||||
return int(self.id)
|
||||
|
||||
def getPatternId(self):
|
||||
return int(self.pattern_id)
|
||||
|
||||
def getType(self):
|
||||
return TrackType.fromIndex(self.track_type)
|
||||
|
||||
def getCodec(self) -> TrackCodec:
|
||||
return TrackCodec.identify(self.codec_name)
|
||||
|
||||
def getIndex(self):
|
||||
return int(self.index) if self.index is not None else -1
|
||||
|
||||
def getSourceIndex(self):
|
||||
return int(self.source_index) if self.source_index is not None else -1
|
||||
|
||||
def getLanguage(self):
|
||||
tags = {t.key:t.value for t in self.track_tags}
|
||||
return IsoLanguage.findThreeLetter(tags['language']) if 'language' in tags.keys() else IsoLanguage.UNDEFINED
|
||||
|
||||
def getTitle(self):
|
||||
tags = {t.key:t.value for t in self.track_tags}
|
||||
return tags['title'] if 'title' in tags.keys() else ''
|
||||
|
||||
def getDispositionSet(self):
|
||||
return TrackDisposition.toSet(self.disposition_flags)
|
||||
|
||||
def getAudioLayout(self):
|
||||
return AudioLayout.fromIndex(self.audio_layout)
|
||||
|
||||
def getTags(self):
|
||||
return {str(t.key):str(t.value) for t in self.track_tags}
|
||||
|
||||
|
||||
def setDisposition(self, disposition : TrackDisposition):
|
||||
self.disposition_flags = self.disposition_flags | int(2**disposition.index())
|
||||
|
||||
def resetDisposition(self, disposition : TrackDisposition):
|
||||
self.disposition_flags = self.disposition_flags & sum([2**d.index() for d in TrackDisposition if d != disposition])
|
||||
|
||||
def getDisposition(self, disposition : TrackDisposition):
|
||||
return bool(self.disposition_flags & 2**disposition.index())
|
||||
|
||||
|
||||
def getDescriptor(self, context = None, subIndex : int = -1) -> TrackDescriptor:
|
||||
|
||||
kwargs = {}
|
||||
|
||||
if not context is None:
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = context
|
||||
|
||||
kwargs[TrackDescriptor.ID_KEY] = self.getId()
|
||||
kwargs[TrackDescriptor.PATTERN_ID_KEY] = self.getPatternId()
|
||||
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = self.getIndex()
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = self.getSourceIndex()
|
||||
|
||||
if subIndex > -1:
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = subIndex
|
||||
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = self.getType()
|
||||
kwargs[TrackDescriptor.CODEC_KEY] = self.getCodec()
|
||||
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = self.getDispositionSet()
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = self.getTags()
|
||||
|
||||
kwargs[TrackDescriptor.AUDIO_LAYOUT_KEY] = self.getAudioLayout()
|
||||
|
||||
return TrackDescriptor(**kwargs)
|
||||
@ -1,28 +0,0 @@
|
||||
# from typing import List
|
||||
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Enum
|
||||
from sqlalchemy.orm import relationship, declarative_base, sessionmaker
|
||||
|
||||
from .show import Base
|
||||
|
||||
|
||||
class TrackTag(Base):
|
||||
"""
|
||||
relationship(argument, opt1, opt2, ...)
|
||||
argument is string of class or Mapped class of the target entity
|
||||
backref creates a bi-directional corresponding relationship (back_populates preferred)
|
||||
back_populates points to the corresponding relationship (the actual class attribute identifier)
|
||||
|
||||
See: https://docs.sqlalchemy.org/en/(14|20)/orm/basic_relationships.html
|
||||
"""
|
||||
|
||||
__tablename__ = 'track_tags'
|
||||
|
||||
# v1.x
|
||||
id = Column(Integer, primary_key=True)
|
||||
|
||||
key = Column(String)
|
||||
value = Column(String)
|
||||
|
||||
# v1.x
|
||||
track_id = Column(Integer, ForeignKey('tracks.id', ondelete="CASCADE"))
|
||||
track = relationship('Track', back_populates='track_tags')
|
||||
@ -1,160 +0,0 @@
|
||||
import click, re
|
||||
|
||||
from ffx.model.pattern import Pattern
|
||||
|
||||
|
||||
class PatternController():
|
||||
|
||||
def __init__(self, context):
|
||||
|
||||
self.context = context
|
||||
self.Session = self.context['database']['session'] # convenience
|
||||
|
||||
|
||||
def addPattern(self, patternObj):
|
||||
"""Adds pattern to database from obj
|
||||
|
||||
Returns database id or 0 if pattern already exists"""
|
||||
|
||||
try:
|
||||
|
||||
s = self.Session()
|
||||
q = s.query(Pattern).filter(Pattern.show_id == int(patternObj['show_id']),
|
||||
Pattern.pattern == str(patternObj['pattern']))
|
||||
|
||||
if not q.count():
|
||||
pattern = Pattern(show_id = int(patternObj['show_id']),
|
||||
pattern = str(patternObj['pattern']))
|
||||
s.add(pattern)
|
||||
s.commit()
|
||||
return pattern.getId()
|
||||
else:
|
||||
return 0
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"PatternController.addPattern(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def updatePattern(self, patternId, patternObj):
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(Pattern).filter(Pattern.id == int(patternId))
|
||||
|
||||
if q.count():
|
||||
|
||||
pattern = q.first()
|
||||
|
||||
pattern.show_id = int(patternObj['show_id'])
|
||||
pattern.pattern = str(patternObj['pattern'])
|
||||
pattern.quality = str(patternObj['quality'])
|
||||
|
||||
s.commit()
|
||||
return True
|
||||
|
||||
else:
|
||||
return False
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"PatternController.updatePattern(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
|
||||
def findPattern(self, patternObj):
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(Pattern).filter(Pattern.show_id == int(patternObj['show_id']), Pattern.pattern == str(patternObj['pattern']))
|
||||
|
||||
if q.count():
|
||||
pattern = q.first()
|
||||
return int(pattern.id)
|
||||
else:
|
||||
return None
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"PatternController.findPattern(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def getPattern(self, patternId : int):
|
||||
|
||||
if type(patternId) is not int:
|
||||
raise ValueError(f"PatternController.getPattern(): Argument patternId is required to be of type int")
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(Pattern).filter(Pattern.id == int(patternId))
|
||||
|
||||
return q.first() if q.count() else None
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"PatternController.getPattern(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def deletePattern(self, patternId):
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(Pattern).filter(Pattern.id == int(patternId))
|
||||
|
||||
if q.count():
|
||||
|
||||
#DAFUQ: https://stackoverflow.com/a/19245058
|
||||
# q.delete()
|
||||
pattern = q.first()
|
||||
s.delete(pattern)
|
||||
|
||||
s.commit()
|
||||
return True
|
||||
return False
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"PatternController.deletePattern(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def matchFilename(self, filename : str) -> dict:
|
||||
"""Returns dict {'match': <a regex match obj>, 'pattern': <ffx pattern obj>} or empty dict of no pattern was found"""
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(Pattern)
|
||||
|
||||
matchResult = {}
|
||||
|
||||
for pattern in q.all():
|
||||
patternMatch = re.search(str(pattern.pattern), str(filename))
|
||||
if patternMatch is not None:
|
||||
matchResult['match'] = patternMatch
|
||||
matchResult['pattern'] = pattern
|
||||
|
||||
return matchResult
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"PatternController.matchFilename(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
# def getMediaDescriptor(self, context, patternId):
|
||||
#
|
||||
# try:
|
||||
# s = self.Session()
|
||||
# q = s.query(Pattern).filter(Pattern.id == int(patternId))
|
||||
#
|
||||
# if q.count():
|
||||
# return q.first().getMediaDescriptor(context)
|
||||
# else:
|
||||
# return None
|
||||
#
|
||||
# except Exception as ex:
|
||||
# raise click.ClickException(f"PatternController.getMediaDescriptor(): {repr(ex)}")
|
||||
# finally:
|
||||
# s.close()
|
||||
@ -1,111 +0,0 @@
|
||||
import click
|
||||
|
||||
from textual.screen import Screen
|
||||
from textual.widgets import Header, Footer, Static, Button
|
||||
from textual.containers import Grid
|
||||
|
||||
from .show_controller import ShowController
|
||||
from .pattern_controller import PatternController
|
||||
|
||||
from ffx.model.pattern import Pattern
|
||||
|
||||
|
||||
# Screen[dict[int, str, int]]
|
||||
class PatternDeleteScreen(Screen):
|
||||
|
||||
CSS = """
|
||||
|
||||
Grid {
|
||||
grid-size: 2;
|
||||
grid-rows: 2 auto;
|
||||
grid-columns: 30 330;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
padding: 1;
|
||||
}
|
||||
|
||||
Input {
|
||||
border: none;
|
||||
}
|
||||
Button {
|
||||
border: none;
|
||||
}
|
||||
#toplabel {
|
||||
height: 1;
|
||||
}
|
||||
|
||||
.two {
|
||||
column-span: 2;
|
||||
}
|
||||
|
||||
.box {
|
||||
height: 100%;
|
||||
border: solid green;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, patternId = None, showId = None):
|
||||
super().__init__()
|
||||
|
||||
self.context = self.app.getContext()
|
||||
self.Session = self.context['database']['session'] # convenience
|
||||
|
||||
self.__pc = PatternController(context = self.context)
|
||||
self.__sc = ShowController(context = self.context)
|
||||
|
||||
self.__patternId = patternId
|
||||
self.__pattern: Pattern = self.__pc.getPattern(patternId) if patternId is not None else {}
|
||||
self.__showDescriptor = self.__sc.getShowDescriptor(showId) if showId is not None else {}
|
||||
|
||||
|
||||
def on_mount(self):
|
||||
if self.__showDescriptor:
|
||||
self.query_one("#showlabel", Static).update(f"{self.__showDescriptor.getId()} - {self.__showDescriptor.getName()} ({self.__showDescriptor.getYear()})")
|
||||
if not self.__pattern is None:
|
||||
self.query_one("#patternlabel", Static).update(str(self.__pattern.pattern))
|
||||
|
||||
|
||||
def compose(self):
|
||||
|
||||
yield Header()
|
||||
|
||||
with Grid():
|
||||
|
||||
yield Static("Are you sure to delete the following filename pattern?", id="toplabel", classes="two")
|
||||
|
||||
yield Static("", classes="two")
|
||||
|
||||
yield Static("Pattern")
|
||||
yield Static("", id="patternlabel")
|
||||
|
||||
yield Static("", classes="two")
|
||||
|
||||
yield Static("from show")
|
||||
yield Static("", id="showlabel")
|
||||
|
||||
yield Static("", classes="two")
|
||||
|
||||
yield Button("Delete", id="delete_button")
|
||||
yield Button("Cancel", id="cancel_button")
|
||||
|
||||
yield Footer()
|
||||
|
||||
|
||||
# Event handler for button press
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
|
||||
if event.button.id == "delete_button":
|
||||
|
||||
if self.__patternId is None:
|
||||
raise click.ClickException('PatternDeleteScreen.on_button_pressed(): pattern id is undefined')
|
||||
|
||||
if self.__pc.deletePattern(self.__patternId):
|
||||
self.dismiss(self.__pattern)
|
||||
|
||||
else:
|
||||
#TODO: Meldung
|
||||
self.app.pop_screen()
|
||||
|
||||
if event.button.id == "cancel_button":
|
||||
self.app.pop_screen()
|
||||
|
||||
@ -1,592 +0,0 @@
|
||||
import click, re
|
||||
from typing import List
|
||||
|
||||
from textual.screen import Screen
|
||||
from textual.widgets import Header, Footer, Static, Button, Input, DataTable
|
||||
from textual.containers import Grid
|
||||
|
||||
from ffx.model.pattern import Pattern
|
||||
from ffx.model.track import Track
|
||||
|
||||
from .pattern_controller import PatternController
|
||||
from .show_controller import ShowController
|
||||
from .track_controller import TrackController
|
||||
from .tag_controller import TagController
|
||||
|
||||
from .track_details_screen import TrackDetailsScreen
|
||||
from .track_delete_screen import TrackDeleteScreen
|
||||
|
||||
from .tag_details_screen import TagDetailsScreen
|
||||
from .tag_delete_screen import TagDeleteScreen
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
|
||||
from textual.widgets._data_table import CellDoesNotExist
|
||||
|
||||
from ffx.file_properties import FileProperties
|
||||
from ffx.iso_language import IsoLanguage
|
||||
from ffx.audio_layout import AudioLayout
|
||||
|
||||
from ffx.helper import formatRichColor, removeRichColor
|
||||
|
||||
|
||||
# Screen[dict[int, str, int]]
|
||||
class PatternDetailsScreen(Screen):
|
||||
|
||||
CSS = """
|
||||
|
||||
Grid {
|
||||
grid-size: 7 13;
|
||||
grid-rows: 2 2 2 2 2 2 8 2 2 8 2 2 2 2;
|
||||
grid-columns: 25 25 25 25 25 25 25;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
padding: 1;
|
||||
}
|
||||
|
||||
Input {
|
||||
border: none;
|
||||
}
|
||||
Button {
|
||||
border: none;
|
||||
}
|
||||
|
||||
DataTable {
|
||||
min-height: 6;
|
||||
}
|
||||
|
||||
DataTable .datatable--cursor {
|
||||
background: darkorange;
|
||||
color: black;
|
||||
}
|
||||
|
||||
DataTable .datatable--header {
|
||||
background: steelblue;
|
||||
color: white;
|
||||
}
|
||||
|
||||
#toplabel {
|
||||
height: 1;
|
||||
}
|
||||
|
||||
.three {
|
||||
column-span: 3;
|
||||
}
|
||||
|
||||
.four {
|
||||
column-span: 4;
|
||||
}
|
||||
.five {
|
||||
column-span: 5;
|
||||
}
|
||||
.six {
|
||||
column-span: 6;
|
||||
}
|
||||
.seven {
|
||||
column-span: 7;
|
||||
}
|
||||
|
||||
.box {
|
||||
height: 100%;
|
||||
border: solid green;
|
||||
}
|
||||
|
||||
.yellow {
|
||||
tint: yellow 40%;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, patternId = None, showId = None):
|
||||
super().__init__()
|
||||
|
||||
self.context = self.app.getContext()
|
||||
self.Session = self.context['database']['session'] # convenience
|
||||
|
||||
self.__configurationData = self.context['config'].getData()
|
||||
|
||||
metadataConfiguration = self.__configurationData['metadata'] if 'metadata' in self.__configurationData.keys() else {}
|
||||
|
||||
self.__signatureTags = metadataConfiguration['signature'] if 'signature' in metadataConfiguration.keys() else {}
|
||||
self.__removeGlobalKeys = metadataConfiguration['remove'] if 'remove' in metadataConfiguration.keys() else []
|
||||
self.__ignoreGlobalKeys = metadataConfiguration['ignore'] if 'ignore' in metadataConfiguration.keys() else []
|
||||
self.__removeTrackKeys = (metadataConfiguration['streams']['remove']
|
||||
if 'streams' in metadataConfiguration.keys()
|
||||
and 'remove' in metadataConfiguration['streams'].keys() else [])
|
||||
self.__ignoreTrackKeys = (metadataConfiguration['streams']['ignore']
|
||||
if 'streams' in metadataConfiguration.keys()
|
||||
and 'ignore' in metadataConfiguration['streams'].keys() else [])
|
||||
|
||||
self.__pc = PatternController(context = self.context)
|
||||
self.__sc = ShowController(context = self.context)
|
||||
self.__tc = TrackController(context = self.context)
|
||||
self.__tac = TagController(context = self.context)
|
||||
|
||||
self.__pattern : Pattern = self.__pc.getPattern(patternId) if patternId is not None else None
|
||||
self.__showDescriptor = self.__sc.getShowDescriptor(showId) if showId is not None else None
|
||||
|
||||
|
||||
#TODO: per controller
|
||||
def loadTracks(self, show_id):
|
||||
|
||||
try:
|
||||
|
||||
tracks = {}
|
||||
tracks['audio'] = {}
|
||||
tracks['subtitle'] = {}
|
||||
|
||||
s = self.Session()
|
||||
q = s.query(Pattern).filter(Pattern.show_id == int(show_id))
|
||||
|
||||
return [{'id': int(p.id), 'pattern': p.pattern} for p in q.all()]
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"loadTracks(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def updateTracks(self):
|
||||
|
||||
self.tracksTable.clear()
|
||||
|
||||
if self.__pattern is not None:
|
||||
|
||||
tracks = self.__tc.findTracks(self.__pattern.getId())
|
||||
|
||||
typeCounter = {}
|
||||
|
||||
tr: Track
|
||||
for tr in tracks:
|
||||
|
||||
td : TrackDescriptor = tr.getDescriptor(self.context)
|
||||
|
||||
trackType = td.getType()
|
||||
if not trackType in typeCounter.keys():
|
||||
typeCounter[trackType] = 0
|
||||
|
||||
dispoSet = td.getDispositionSet()
|
||||
|
||||
trackLanguage = td.getLanguage()
|
||||
audioLayout = td.getAudioLayout()
|
||||
|
||||
row = (td.getIndex(),
|
||||
trackType.label(),
|
||||
typeCounter[trackType],
|
||||
td.getCodec().label(),
|
||||
audioLayout.label() if trackType == TrackType.AUDIO
|
||||
and audioLayout != AudioLayout.LAYOUT_UNDEFINED else ' ',
|
||||
trackLanguage.label() if trackLanguage != IsoLanguage.UNDEFINED else ' ',
|
||||
td.getTitle(),
|
||||
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
|
||||
'Yes' if TrackDisposition.FORCED in dispoSet else 'No',
|
||||
td.getSourceIndex())
|
||||
|
||||
self.tracksTable.add_row(*map(str, row))
|
||||
|
||||
typeCounter[trackType] += 1
|
||||
|
||||
|
||||
def swapTracks(self, trackIndex1: int, trackIndex2: int):
|
||||
|
||||
ti1 = int(trackIndex1)
|
||||
ti2 = int(trackIndex2)
|
||||
|
||||
siblingDescriptors: List[TrackDescriptor] = self.__tc.findSiblingDescriptors(self.__pattern.getId())
|
||||
|
||||
numSiblings = len(siblingDescriptors)
|
||||
|
||||
if ti1 < 0 or ti1 >= numSiblings:
|
||||
raise ValueError(f"PatternDetailsScreen.swapTracks(): trackIndex1 ({ti1}) is out of range ({numSiblings})")
|
||||
|
||||
if ti2 < 0 or ti2 >= numSiblings:
|
||||
raise ValueError(f"PatternDetailsScreen.swapTracks(): trackIndex2 ({ti2}) is out of range ({numSiblings})")
|
||||
|
||||
sibling1 = siblingDescriptors[trackIndex1]
|
||||
sibling2 = siblingDescriptors[trackIndex2]
|
||||
|
||||
# raise click.ClickException(f"siblings id1={sibling1.getId()} id2={sibling2.getId()}")
|
||||
|
||||
subIndex2 = sibling2.getSubIndex()
|
||||
|
||||
sibling2.setIndex(sibling1.getIndex())
|
||||
sibling2.setSubIndex(sibling1.getSubIndex())
|
||||
|
||||
sibling1.setIndex(trackIndex2)
|
||||
sibling1.setSubIndex(subIndex2)
|
||||
|
||||
if not self.__tc.updateTrack(sibling1.getId(), sibling1):
|
||||
raise click.ClickException('Update sibling1 failed')
|
||||
if not self.__tc.updateTrack(sibling2.getId(), sibling2):
|
||||
raise click.ClickException('Update sibling2 failed')
|
||||
|
||||
self.updateTracks()
|
||||
|
||||
|
||||
def updateTags(self):
|
||||
|
||||
self.tagsTable.clear()
|
||||
|
||||
if self.__pattern is not None:
|
||||
|
||||
tags = self.__tac.findAllMediaTags(self.__pattern.getId())
|
||||
|
||||
for tagKey, tagValue in tags.items():
|
||||
|
||||
textColor = None
|
||||
if tagKey in self.__ignoreGlobalKeys:
|
||||
textColor = 'blue'
|
||||
if tagKey in self.__removeGlobalKeys:
|
||||
textColor = 'red'
|
||||
|
||||
# if tagKey not in self.__ignoreTrackKeys:
|
||||
row = (formatRichColor(tagKey, textColor), formatRichColor(tagValue, textColor))
|
||||
self.tagsTable.add_row(*map(str, row))
|
||||
|
||||
|
||||
def on_mount(self):
|
||||
|
||||
if not self.__showDescriptor is None:
|
||||
self.query_one("#showlabel", Static).update(f"{self.__showDescriptor.getId()} - {self.__showDescriptor.getName()} ({self.__showDescriptor.getYear()})")
|
||||
|
||||
if self.__pattern is not None:
|
||||
|
||||
self.query_one("#pattern_input", Input).value = str(self.__pattern.getPattern())
|
||||
|
||||
if self.__pattern and self.__pattern.quality:
|
||||
self.query_one("#quality_input", Input).value = str(self.__pattern.quality)
|
||||
|
||||
self.updateTags()
|
||||
self.updateTracks()
|
||||
|
||||
def compose(self):
|
||||
|
||||
|
||||
self.tagsTable = DataTable(classes="seven")
|
||||
|
||||
# Define the columns with headers
|
||||
self.column_key_tag_key = self.tagsTable.add_column("Key", width=50)
|
||||
self.column_key_tag_value = self.tagsTable.add_column("Value", width=100)
|
||||
|
||||
self.tagsTable.cursor_type = 'row'
|
||||
|
||||
|
||||
self.tracksTable = DataTable(id="tracks_table", classes="seven")
|
||||
|
||||
self.column_key_track_index = self.tracksTable.add_column("Index", width=5)
|
||||
self.column_key_track_type = self.tracksTable.add_column("Type", width=10)
|
||||
self.column_key_track_sub_index = self.tracksTable.add_column("SubIndex", width=8)
|
||||
self.column_key_track_codec = self.tracksTable.add_column("Codec", width=10)
|
||||
self.column_key_track_audio_layout = self.tracksTable.add_column("Layout", width=10)
|
||||
self.column_key_track_language = self.tracksTable.add_column("Language", width=15)
|
||||
self.column_key_track_title = self.tracksTable.add_column("Title", width=48)
|
||||
self.column_key_track_default = self.tracksTable.add_column("Default", width=8)
|
||||
self.column_key_track_forced = self.tracksTable.add_column("Forced", width=8)
|
||||
self.column_key_track_source_index = self.tracksTable.add_column("SrcIndex", width=8)
|
||||
|
||||
self.tracksTable.cursor_type = 'row'
|
||||
|
||||
|
||||
yield Header()
|
||||
|
||||
with Grid():
|
||||
|
||||
# 1
|
||||
yield Static("Edit filename pattern" if self.__pattern is not None else "New filename pattern", id="toplabel")
|
||||
yield Input(type="text", id="pattern_input", classes="six")
|
||||
|
||||
# 2
|
||||
yield Static("from show")
|
||||
yield Static("", id="showlabel", classes="five")
|
||||
yield Button("Substitute pattern", id="pattern_button")
|
||||
|
||||
# 3
|
||||
yield Static(" ", classes="seven")
|
||||
|
||||
# 4
|
||||
yield Static("Quality")
|
||||
yield Input(type="integer", id="quality_input")
|
||||
yield Static(' ', classes="five")
|
||||
|
||||
# 4
|
||||
yield Static(" ", classes="seven")
|
||||
|
||||
# 5
|
||||
yield Static("Media Tags")
|
||||
|
||||
|
||||
if self.__pattern is not None:
|
||||
yield Button("Add", id="button_add_tag")
|
||||
yield Button("Edit", id="button_edit_tag")
|
||||
yield Button("Delete", id="button_delete_tag")
|
||||
else:
|
||||
yield Static(" ")
|
||||
yield Static(" ")
|
||||
yield Static(" ")
|
||||
|
||||
yield Static(" ")
|
||||
yield Static(" ")
|
||||
yield Static(" ")
|
||||
|
||||
# 6
|
||||
yield self.tagsTable
|
||||
|
||||
# 7
|
||||
yield Static(" ", classes="seven")
|
||||
|
||||
# 8
|
||||
yield Static("Streams")
|
||||
|
||||
|
||||
if self.__pattern is not None:
|
||||
yield Button("Add", id="button_add_track")
|
||||
yield Button("Edit", id="button_edit_track")
|
||||
yield Button("Delete", id="button_delete_track")
|
||||
else:
|
||||
yield Static(" ")
|
||||
yield Static(" ")
|
||||
yield Static(" ")
|
||||
|
||||
yield Static(" ")
|
||||
yield Button("Up", id="button_track_up")
|
||||
yield Button("Down", id="button_track_down")
|
||||
|
||||
# 9
|
||||
yield self.tracksTable
|
||||
|
||||
# 10
|
||||
yield Static(" ", classes="seven")
|
||||
|
||||
# 11
|
||||
yield Static(" ", classes="seven")
|
||||
|
||||
# 12
|
||||
yield Button("Save", id="save_button")
|
||||
yield Button("Cancel", id="cancel_button")
|
||||
yield Static(" ", classes="five")
|
||||
|
||||
# 13
|
||||
yield Static(" ", classes="seven")
|
||||
|
||||
yield Footer()
|
||||
|
||||
|
||||
def getPatternFromInput(self):
|
||||
return str(self.query_one("#pattern_input", Input).value)
|
||||
|
||||
def getQualityFromInput(self):
|
||||
try:
|
||||
return int(self.query_one("#quality_input", Input).value)
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
|
||||
def getSelectedTrackDescriptor(self):
|
||||
|
||||
if not self.__pattern:
|
||||
return None
|
||||
|
||||
try:
|
||||
|
||||
# Fetch the currently selected row when 'Enter' is pressed
|
||||
#selected_row_index = self.table.cursor_row
|
||||
row_key, col_key = self.tracksTable.coordinate_to_cell_key(self.tracksTable.cursor_coordinate)
|
||||
|
||||
if row_key is not None:
|
||||
selected_track_data = self.tracksTable.get_row(row_key)
|
||||
|
||||
trackIndex = int(selected_track_data[0])
|
||||
trackSubIndex = int(selected_track_data[2])
|
||||
|
||||
return self.__tc.getTrack(self.__pattern.getId(), trackIndex).getDescriptor(self.context, subIndex=trackSubIndex)
|
||||
|
||||
else:
|
||||
return None
|
||||
|
||||
except CellDoesNotExist:
|
||||
return None
|
||||
|
||||
|
||||
|
||||
def getSelectedTag(self):
|
||||
|
||||
try:
|
||||
|
||||
# Fetch the currently selected row when 'Enter' is pressed
|
||||
#selected_row_index = self.table.cursor_row
|
||||
row_key, col_key = self.tagsTable.coordinate_to_cell_key(self.tagsTable.cursor_coordinate)
|
||||
|
||||
if row_key is not None:
|
||||
selected_tag_data = self.tagsTable.get_row(row_key)
|
||||
|
||||
tagKey = removeRichColor(selected_tag_data[0])
|
||||
tagValue = removeRichColor(selected_tag_data[1])
|
||||
|
||||
return tagKey, tagValue
|
||||
|
||||
else:
|
||||
return None
|
||||
|
||||
except CellDoesNotExist:
|
||||
return None
|
||||
|
||||
|
||||
|
||||
# Event handler for button press
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
# Check if the button pressed is the one we are interested in
|
||||
if event.button.id == "save_button":
|
||||
|
||||
patternDescriptor = {}
|
||||
patternDescriptor['show_id'] = self.__showDescriptor.getId()
|
||||
patternDescriptor['pattern'] = self.getPatternFromInput()
|
||||
patternDescriptor['quality'] = self.getQualityFromInput()
|
||||
|
||||
if self.__pattern is not None:
|
||||
|
||||
if self.__pc.updatePattern(self.__pattern.getId(), patternDescriptor):
|
||||
self.dismiss(patternDescriptor)
|
||||
else:
|
||||
#TODO: Meldung
|
||||
self.app.pop_screen()
|
||||
|
||||
else:
|
||||
patternId = self.__pc.addPattern(patternDescriptor)
|
||||
if patternId:
|
||||
self.dismiss(patternDescriptor)
|
||||
else:
|
||||
#TODO: Meldung
|
||||
self.app.pop_screen()
|
||||
|
||||
|
||||
if event.button.id == "cancel_button":
|
||||
self.app.pop_screen()
|
||||
|
||||
|
||||
# Save pattern when just created before adding streams
|
||||
if self.__pattern is not None:
|
||||
|
||||
numTracks = len(self.tracksTable.rows)
|
||||
|
||||
if event.button.id == "button_add_track":
|
||||
self.app.push_screen(TrackDetailsScreen(patternId = self.__pattern.getId(), index = numTracks), self.handle_add_track)
|
||||
|
||||
selectedTrack = self.getSelectedTrackDescriptor()
|
||||
if selectedTrack is not None:
|
||||
if event.button.id == "button_edit_track":
|
||||
self.app.push_screen(TrackDetailsScreen(trackDescriptor = selectedTrack), self.handle_edit_track)
|
||||
if event.button.id == "button_delete_track":
|
||||
self.app.push_screen(TrackDeleteScreen(trackDescriptor = selectedTrack), self.handle_delete_track)
|
||||
|
||||
|
||||
if event.button.id == "button_add_tag":
|
||||
if self.__pattern is not None:
|
||||
self.app.push_screen(TagDetailsScreen(), self.handle_update_tag)
|
||||
|
||||
if event.button.id == "button_edit_tag":
|
||||
tagKey, tagValue = self.getSelectedTag()
|
||||
self.app.push_screen(TagDetailsScreen(key=tagKey, value=tagValue), self.handle_update_tag)
|
||||
|
||||
if event.button.id == "button_delete_tag":
|
||||
tagKey, tagValue = self.getSelectedTag()
|
||||
self.app.push_screen(TagDeleteScreen(key=tagKey, value=tagValue), self.handle_delete_tag)
|
||||
|
||||
|
||||
if event.button.id == "pattern_button":
|
||||
|
||||
pattern = self.query_one("#pattern_input", Input).value
|
||||
|
||||
patternMatch = re.search(FileProperties.SE_INDICATOR_PATTERN, pattern)
|
||||
|
||||
if patternMatch:
|
||||
self.query_one("#pattern_input", Input).value = pattern.replace(patternMatch.group(1),
|
||||
FileProperties.SE_INDICATOR_PATTERN)
|
||||
|
||||
|
||||
if event.button.id == "button_track_up":
|
||||
|
||||
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
||||
selectedTrackIndex = selectedTrackDescriptor.getIndex()
|
||||
|
||||
if selectedTrackIndex > 0 and selectedTrackIndex < self.tracksTable.row_count:
|
||||
correspondingTrackIndex = selectedTrackIndex - 1
|
||||
self.swapTracks(selectedTrackIndex, correspondingTrackIndex)
|
||||
|
||||
|
||||
if event.button.id == "button_track_down":
|
||||
|
||||
selectedTrackDescriptor = self.getSelectedTrackDescriptor()
|
||||
selectedTrackIndex = selectedTrackDescriptor.getIndex()
|
||||
|
||||
if selectedTrackIndex >= 0 and selectedTrackIndex < (self.tracksTable.row_count - 1):
|
||||
correspondingTrackIndex = selectedTrackIndex + 1
|
||||
self.swapTracks(selectedTrackIndex, correspondingTrackIndex)
|
||||
|
||||
|
||||
def handle_add_track(self, trackDescriptor : TrackDescriptor):
|
||||
|
||||
dispoSet = trackDescriptor.getDispositionSet()
|
||||
trackType = trackDescriptor.getType()
|
||||
index = trackDescriptor.getIndex()
|
||||
subIndex = trackDescriptor.getSubIndex()
|
||||
codec = trackDescriptor.getCodec()
|
||||
language = trackDescriptor.getLanguage()
|
||||
title = trackDescriptor.getTitle()
|
||||
|
||||
row = (index,
|
||||
trackType.label(),
|
||||
subIndex,
|
||||
codec.label(),
|
||||
language.label(),
|
||||
title,
|
||||
'Yes' if TrackDisposition.DEFAULT in dispoSet else 'No',
|
||||
'Yes' if TrackDisposition.FORCED in dispoSet else 'No')
|
||||
|
||||
self.tracksTable.add_row(*map(str, row))
|
||||
|
||||
|
||||
def handle_edit_track(self, trackDescriptor : TrackDescriptor):
|
||||
|
||||
try:
|
||||
|
||||
row_key, col_key = self.tracksTable.coordinate_to_cell_key(self.tracksTable.cursor_coordinate)
|
||||
|
||||
self.tracksTable.update_cell(row_key, self.column_key_track_audio_layout,
|
||||
trackDescriptor.getAudioLayout().label()
|
||||
if trackDescriptor.getType() == TrackType.AUDIO else ' ')
|
||||
|
||||
self.tracksTable.update_cell(row_key, self.column_key_track_language, trackDescriptor.getLanguage().label())
|
||||
self.tracksTable.update_cell(row_key, self.column_key_track_title, trackDescriptor.getTitle())
|
||||
self.tracksTable.update_cell(row_key, self.column_key_track_default,
|
||||
'Yes' if TrackDisposition.DEFAULT in trackDescriptor.getDispositionSet() else 'No')
|
||||
self.tracksTable.update_cell(row_key, self.column_key_track_forced,
|
||||
'Yes' if TrackDisposition.FORCED in trackDescriptor.getDispositionSet() else 'No')
|
||||
|
||||
except CellDoesNotExist:
|
||||
pass
|
||||
|
||||
|
||||
def handle_delete_track(self, trackDescriptor : TrackDescriptor):
|
||||
self.updateTracks()
|
||||
|
||||
|
||||
|
||||
def handle_update_tag(self, tag):
|
||||
|
||||
if self.__pattern is None:
|
||||
raise click.ClickException(f"PatternDetailsScreen.handle_update_tag: pattern not set")
|
||||
|
||||
if self.__tac.updateMediaTag(self.__pattern.getId(), tag[0], tag[1]) is not None:
|
||||
self.updateTags()
|
||||
|
||||
def handle_delete_tag(self, tag):
|
||||
|
||||
if self.__pattern is None:
|
||||
raise click.ClickException(f"PatternDetailsScreen.handle_delete_tag: pattern not set")
|
||||
|
||||
if self.__tac.deleteMediaTagByKey(self.__pattern.getId(), tag[0]):
|
||||
self.updateTags()
|
||||
else:
|
||||
raise click.ClickException('tag delete failed')
|
||||
@ -1,33 +0,0 @@
|
||||
import subprocess, logging
|
||||
from typing import List
|
||||
|
||||
def executeProcess(commandSequence: List[str], directory: str = None, context: dict = None):
|
||||
"""
|
||||
niceness -20 bis +19
|
||||
cpu_percent: 1 bis 99
|
||||
"""
|
||||
|
||||
if context is None:
|
||||
logger = logging.getLogger('FFX')
|
||||
logger.addHandler(logging.NullHandler())
|
||||
else:
|
||||
logger = context['logger']
|
||||
|
||||
niceSequence = []
|
||||
|
||||
niceness = int((context or {}).get('resource_limits', {}).get('niceness', 99))
|
||||
cpu_percent = int((context or {}).get('resource_limits', {}).get('cpu_percent', 0))
|
||||
|
||||
if niceness >= -20 and niceness <= 19:
|
||||
niceSequence += ['nice', '-n', str(niceness)]
|
||||
if cpu_percent >= 1:
|
||||
niceSequence += ['cpulimit', '-l', str(cpu_percent), '--']
|
||||
|
||||
niceCommand = niceSequence + commandSequence
|
||||
|
||||
logger.debug(f"executeProcess() command sequence: {' '.join(niceCommand)}")
|
||||
|
||||
process = subprocess.Popen(niceCommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8', cwd = directory)
|
||||
output, error = process.communicate()
|
||||
|
||||
return output, error, process.returncode
|
||||
@ -1,12 +0,0 @@
|
||||
from textual.app import ComposeResult
|
||||
from textual.screen import Screen
|
||||
from textual.widgets import Footer, Placeholder
|
||||
|
||||
|
||||
class SettingsScreen(Screen):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
context = self.app.getContext()
|
||||
def compose(self) -> ComposeResult:
|
||||
yield Placeholder("Settings Screen")
|
||||
yield Footer()
|
||||
@ -1,233 +0,0 @@
|
||||
import click
|
||||
|
||||
from ffx.model.shifted_season import ShiftedSeason
|
||||
|
||||
|
||||
class EpisodeOrderException(Exception):
|
||||
pass
|
||||
|
||||
class RangeOverlapException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ShiftedSeasonController():
|
||||
|
||||
def __init__(self, context):
|
||||
|
||||
self.context = context
|
||||
self.Session = self.context['database']['session'] # convenience
|
||||
|
||||
def checkShiftedSeason(self, showId: int, shiftedSeasonObj: dict, shiftedSeasonId: int = 0):
|
||||
"""
|
||||
Check if for a particula season
|
||||
|
||||
shiftedSeasonId
|
||||
"""
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
|
||||
originalSeason = shiftedSeasonObj['original_season']
|
||||
firstEpisode = int(shiftedSeasonObj['first_episode'])
|
||||
lastEpisode = int(shiftedSeasonObj['last_episode'])
|
||||
|
||||
q = s.query(ShiftedSeason).filter(ShiftedSeason.show_id == int(showId))
|
||||
if shiftedSeasonId:
|
||||
q = q.filter(ShiftedSeason.id != int(shiftedSeasonId))
|
||||
|
||||
siblingShiftedSeason: ShiftedSeason
|
||||
for siblingShiftedSeason in q.all():
|
||||
|
||||
siblingOriginalSeason = siblingShiftedSeason.getOriginalSeason
|
||||
siblingFirstEpisode = siblingShiftedSeason.getFirstEpisode()
|
||||
siblingLastEpisode = siblingShiftedSeason.getLastEpisode()
|
||||
|
||||
if (originalSeason == siblingOriginalSeason
|
||||
and lastEpisode >= siblingFirstEpisode
|
||||
and siblingLastEpisode >= firstEpisode):
|
||||
|
||||
return False
|
||||
return True
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"ShiftedSeasonController.addShiftedSeason(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def addShiftedSeason(self, showId: int, shiftedSeasonObj: dict):
|
||||
|
||||
if type(showId) is not int:
|
||||
raise ValueError(f"ShiftedSeasonController.addShiftedSeason(): Argument showId is required to be of type int")
|
||||
|
||||
if type(shiftedSeasonObj) is not dict:
|
||||
raise ValueError(f"ShiftedSeasonController.addShiftedSeason(): Argument shiftedSeasonObj is required to be of type dict")
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
|
||||
firstEpisode = int(shiftedSeasonObj['first_episode'])
|
||||
lastEpisode = int(shiftedSeasonObj['last_episode'])
|
||||
|
||||
if lastEpisode < firstEpisode:
|
||||
raise EpisodeOrderException()
|
||||
|
||||
q = s.query(ShiftedSeason).filter(ShiftedSeason.show_id == int(showId))
|
||||
|
||||
shiftedSeason = ShiftedSeason(show_id = int(showId),
|
||||
original_season = int(shiftedSeasonObj['original_season']),
|
||||
first_episode = firstEpisode,
|
||||
last_episode = lastEpisode,
|
||||
season_offset = int(shiftedSeasonObj['season_offset']),
|
||||
episode_offset = int(shiftedSeasonObj['episode_offset']))
|
||||
s.add(shiftedSeason)
|
||||
s.commit()
|
||||
return shiftedSeason.getId()
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"ShiftedSeasonController.addShiftedSeason(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def updateShiftedSeason(self, shiftedSeasonId: int, shiftedSeasonObj: dict):
|
||||
|
||||
if type(shiftedSeasonId) is not int:
|
||||
raise ValueError(f"ShiftedSeasonController.updateShiftedSeason(): Argument shiftedSeasonId is required to be of type int")
|
||||
|
||||
if type(shiftedSeasonObj) is not dict:
|
||||
raise ValueError(f"ShiftedSeasonController.updateShiftedSeason(): Argument shiftedSeasonObj is required to be of type dict")
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
|
||||
q = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId))
|
||||
|
||||
if q.count():
|
||||
|
||||
shiftedSeason = q.first()
|
||||
|
||||
shiftedSeason.original_season = int(shiftedSeasonObj['original_season'])
|
||||
shiftedSeason.first_episode = int(shiftedSeasonObj['first_episode'])
|
||||
shiftedSeason.last_episode = int(shiftedSeasonObj['last_episode'])
|
||||
shiftedSeason.season_offset = int(shiftedSeasonObj['season_offset'])
|
||||
shiftedSeason.episode_offset = int(shiftedSeasonObj['episode_offset'])
|
||||
|
||||
s.commit()
|
||||
return True
|
||||
|
||||
else:
|
||||
return False
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"ShiftedSeasonController.updateShiftedSeason(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def findShiftedSeason(self, showId: int, originalSeason: int, firstEpisode: int, lastEpisode: int):
|
||||
|
||||
if type(showId) is not int:
|
||||
raise ValueError(f"ShiftedSeasonController.findShiftedSeason(): Argument shiftedSeasonId is required to be of type int")
|
||||
|
||||
if type(originalSeason) is not int:
|
||||
raise ValueError(f"ShiftedSeasonController.findShiftedSeason(): Argument originalSeason is required to be of type int")
|
||||
|
||||
if type(firstEpisode) is not int:
|
||||
raise ValueError(f"ShiftedSeasonController.findShiftedSeason(): Argument firstEpisode is required to be of type int")
|
||||
|
||||
if type(lastEpisode) is not int:
|
||||
raise ValueError(f"ShiftedSeasonController.findShiftedSeason(): Argument lastEpisode is required to be of type int")
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(ShiftedSeason).filter(ShiftedSeason.show_id == int(showId),
|
||||
ShiftedSeason.original_season == int(originalSeason),
|
||||
ShiftedSeason.first_episode == int(firstEpisode),
|
||||
ShiftedSeason.last_episode == int(lastEpisode))
|
||||
|
||||
return q.first().getId() if q.count() else None
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"PatternController.findShiftedSeason(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
def getShiftedSeasonSiblings(self, showId: int):
|
||||
|
||||
if type(showId) is not int:
|
||||
raise ValueError(f"ShiftedSeasonController.getShiftedSeasonSiblings(): Argument shiftedSeasonId is required to be of type int")
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(ShiftedSeason).filter(ShiftedSeason.show_id == int(showId))
|
||||
|
||||
return q.all()
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"PatternController.getShiftedSeasonSiblings(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def getShiftedSeason(self, shiftedSeasonId: int):
|
||||
|
||||
if type(shiftedSeasonId) is not int:
|
||||
raise ValueError(f"ShiftedSeasonController.getShiftedSeason(): Argument shiftedSeasonId is required to be of type int")
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId))
|
||||
|
||||
return q.first() if q.count() else None
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"ShiftedSeasonController.getShiftedSeason(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def deleteShiftedSeason(self, shiftedSeasonId):
|
||||
|
||||
if type(shiftedSeasonId) is not int:
|
||||
raise ValueError(f"ShiftedSeasonController.deleteShiftedSeason(): Argument shiftedSeasonId is required to be of type int")
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(ShiftedSeason).filter(ShiftedSeason.id == int(shiftedSeasonId))
|
||||
|
||||
if q.count():
|
||||
|
||||
#DAFUQ: https://stackoverflow.com/a/19245058
|
||||
# q.delete()
|
||||
shiftedSeason = q.first()
|
||||
s.delete(shiftedSeason)
|
||||
|
||||
s.commit()
|
||||
return True
|
||||
return False
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"ShiftedSeasonController.deleteShiftedSeason(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def shiftSeason(self, showId, season, episode):
|
||||
|
||||
shiftedSeasonEntry: ShiftedSeason
|
||||
for shiftedSeasonEntry in self.getShiftedSeasonSiblings(showId):
|
||||
|
||||
if (season == shiftedSeasonEntry.getOriginalSeason()
|
||||
and (shiftedSeasonEntry.getFirstEpisode() == -1 or episode >= shiftedSeasonEntry.getFirstEpisode())
|
||||
and (shiftedSeasonEntry.getLastEpisode() == -1 or episode <= shiftedSeasonEntry.getLastEpisode())):
|
||||
|
||||
shiftedSeason = season + shiftedSeasonEntry.getSeasonOffset()
|
||||
shiftedEpisode = episode + shiftedSeasonEntry.getEpisodeOffset()
|
||||
|
||||
self.context['logger'].info(f"Shifting season: {season} episode: {episode} "
|
||||
+f"-> season: {shiftedSeason} episode: {shiftedEpisode}")
|
||||
|
||||
return shiftedSeason, shiftedEpisode
|
||||
|
||||
return season, episode
|
||||
@ -1,125 +0,0 @@
|
||||
import click
|
||||
|
||||
from textual.screen import Screen
|
||||
from textual.widgets import Header, Footer, Static, Button
|
||||
from textual.containers import Grid
|
||||
|
||||
from .shifted_season_controller import ShiftedSeasonController
|
||||
|
||||
from ffx.model.shifted_season import ShiftedSeason
|
||||
|
||||
|
||||
# Screen[dict[int, str, int]]
|
||||
class ShiftedSeasonDeleteScreen(Screen):
|
||||
|
||||
CSS = """
|
||||
|
||||
Grid {
|
||||
grid-size: 2;
|
||||
grid-rows: 2 auto;
|
||||
grid-columns: 30 330;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
padding: 1;
|
||||
}
|
||||
|
||||
Input {
|
||||
border: none;
|
||||
}
|
||||
Button {
|
||||
border: none;
|
||||
}
|
||||
#toplabel {
|
||||
height: 1;
|
||||
}
|
||||
|
||||
.two {
|
||||
column-span: 2;
|
||||
}
|
||||
|
||||
.box {
|
||||
height: 100%;
|
||||
border: solid green;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, showId = None, shiftedSeasonId = None):
|
||||
super().__init__()
|
||||
|
||||
self.context = self.app.getContext()
|
||||
self.Session = self.context['database']['session'] # convenience
|
||||
|
||||
self.__ssc = ShiftedSeasonController(context = self.context)
|
||||
|
||||
self._showId = showId
|
||||
self.__shiftedSeasonId = shiftedSeasonId
|
||||
|
||||
|
||||
def on_mount(self):
|
||||
|
||||
shiftedSeason: ShiftedSeason = self.__ssc.getShiftedSeason(self.__shiftedSeasonId)
|
||||
|
||||
self.query_one("#static_show_id", Static).update(str(self._showId))
|
||||
self.query_one("#static_original_season", Static).update(str(shiftedSeason.getOriginalSeason()))
|
||||
self.query_one("#static_first_episode", Static).update(str(shiftedSeason.getFirstEpisode()))
|
||||
self.query_one("#static_last_episode", Static).update(str(shiftedSeason.getLastEpisode()))
|
||||
self.query_one("#static_season_offset", Static).update(str(shiftedSeason.getSeasonOffset()))
|
||||
self.query_one("#static_episode_offset", Static).update(str(shiftedSeason.getEpisodeOffset()))
|
||||
|
||||
|
||||
def compose(self):
|
||||
|
||||
yield Header()
|
||||
|
||||
with Grid():
|
||||
|
||||
yield Static("Are you sure to delete the following shifted season?", id="toplabel", classes="two")
|
||||
|
||||
yield Static(" ", classes="two")
|
||||
|
||||
yield Static("from show")
|
||||
yield Static(" ", id="static_show_id")
|
||||
|
||||
yield Static(" ", classes="two")
|
||||
|
||||
yield Static("Original season")
|
||||
yield Static(" ", id="static_original_season")
|
||||
|
||||
yield Static("First episode")
|
||||
yield Static(" ", id="static_first_episode")
|
||||
|
||||
yield Static("Last episode")
|
||||
yield Static(" ", id="static_last_episode")
|
||||
|
||||
yield Static("Season offset")
|
||||
yield Static(" ", id="static_season_offset")
|
||||
|
||||
yield Static("Episode offset")
|
||||
yield Static(" ", id="static_episode_offset")
|
||||
|
||||
yield Static(" ", classes="two")
|
||||
|
||||
yield Button("Delete", id="delete_button")
|
||||
yield Button("Cancel", id="cancel_button")
|
||||
|
||||
yield Footer()
|
||||
|
||||
|
||||
# Event handler for button press
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
|
||||
if event.button.id == "delete_button":
|
||||
|
||||
if self.__shiftedSeasonId is None:
|
||||
raise click.ClickException('ShiftedSeasonDeleteScreen.on_button_pressed(): shifted season id is undefined')
|
||||
|
||||
if self.__ssc.deleteShiftedSeason(self.__shiftedSeasonId):
|
||||
self.dismiss(self.__shiftedSeasonId)
|
||||
|
||||
else:
|
||||
#TODO: Meldung
|
||||
self.app.pop_screen()
|
||||
|
||||
if event.button.id == "cancel_button":
|
||||
self.app.pop_screen()
|
||||
|
||||
@ -1,221 +0,0 @@
|
||||
from typing import List
|
||||
|
||||
from textual.screen import Screen
|
||||
from textual.widgets import Header, Footer, Static, Button, Input
|
||||
from textual.containers import Grid
|
||||
|
||||
from .shifted_season_controller import ShiftedSeasonController
|
||||
|
||||
from ffx.model.shifted_season import ShiftedSeason
|
||||
|
||||
|
||||
# Screen[dict[int, str, int]]
|
||||
class ShiftedSeasonDetailsScreen(Screen):
|
||||
|
||||
CSS = """
|
||||
|
||||
Grid {
|
||||
grid-size: 3 10;
|
||||
grid-rows: 2 2 2 2 2 2 2 2 2 2;
|
||||
grid-columns: 40 40 40;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
padding: 1;
|
||||
}
|
||||
|
||||
Input {
|
||||
border: none;
|
||||
}
|
||||
Button {
|
||||
border: none;
|
||||
}
|
||||
|
||||
DataTable {
|
||||
min-height: 6;
|
||||
}
|
||||
|
||||
DataTable .datatable--cursor {
|
||||
background: darkorange;
|
||||
color: black;
|
||||
}
|
||||
|
||||
DataTable .datatable--header {
|
||||
background: steelblue;
|
||||
color: white;
|
||||
}
|
||||
|
||||
#toplabel {
|
||||
height: 1;
|
||||
|
||||
}
|
||||
|
||||
|
||||
.two {
|
||||
column-span: 3;
|
||||
}
|
||||
|
||||
.three {
|
||||
column-span: 3;
|
||||
}
|
||||
|
||||
.four {
|
||||
column-span: 4;
|
||||
}
|
||||
.five {
|
||||
column-span: 5;
|
||||
}
|
||||
.six {
|
||||
column-span: 6;
|
||||
}
|
||||
.seven {
|
||||
column-span: 7;
|
||||
}
|
||||
|
||||
.box {
|
||||
height: 100%;
|
||||
border: solid green;
|
||||
}
|
||||
|
||||
.yellow {
|
||||
tint: yellow 40%;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, showId = None, shiftedSeasonId = None):
|
||||
super().__init__()
|
||||
|
||||
self.context = self.app.getContext()
|
||||
self.Session = self.context['database']['session'] # convenience
|
||||
|
||||
self.__ssc = ShiftedSeasonController(context = self.context)
|
||||
|
||||
self.__showId = showId
|
||||
self.__shiftedSeasonId = shiftedSeasonId
|
||||
|
||||
def on_mount(self):
|
||||
|
||||
if self.__shiftedSeasonId is not None:
|
||||
shiftedSeason: ShiftedSeason = self.__ssc.getShiftedSeason(self.__shiftedSeasonId)
|
||||
|
||||
originalSeason = shiftedSeason.getOriginalSeason()
|
||||
self.query_one("#input_original_season", Input).value = str(originalSeason)
|
||||
|
||||
firstEpisode = shiftedSeason.getFirstEpisode()
|
||||
self.query_one("#input_first_episode", Input).value = str(firstEpisode) if firstEpisode != -1 else ''
|
||||
|
||||
lastEpisode = shiftedSeason.getLastEpisode()
|
||||
self.query_one("#input_last_episode", Input).value = str(lastEpisode) if lastEpisode != -1 else ''
|
||||
|
||||
seasonOffset = shiftedSeason.getSeasonOffset()
|
||||
self.query_one("#input_season_offset", Input).value = str(seasonOffset) if seasonOffset else ''
|
||||
|
||||
episodeOffset = shiftedSeason.getEpisodeOffset()
|
||||
self.query_one("#input_episode_offset", Input).value = str(episodeOffset) if episodeOffset else ''
|
||||
|
||||
|
||||
def compose(self):
|
||||
|
||||
yield Header()
|
||||
|
||||
with Grid():
|
||||
|
||||
# 1
|
||||
yield Static("Edit shifted season" if self.__shiftedSeasonId is not None else "New shifted season", id="toplabel", classes="three")
|
||||
|
||||
# 2
|
||||
yield Static(" ", classes="three")
|
||||
|
||||
# 3
|
||||
yield Static("Original season")
|
||||
yield Input(id="input_original_season", classes="two")
|
||||
|
||||
# 4
|
||||
yield Static("First Episode")
|
||||
yield Input(id="input_first_episode", classes="two")
|
||||
|
||||
# 5
|
||||
yield Static("Last Episode")
|
||||
yield Input(id="input_last_episode", classes="two")
|
||||
|
||||
# 6
|
||||
yield Static("Season offset")
|
||||
yield Input(id="input_season_offset", classes="two")
|
||||
|
||||
# 7
|
||||
yield Static("Episode offset")
|
||||
yield Input(id="input_episode_offset", classes="two")
|
||||
|
||||
# 8
|
||||
yield Static(" ", classes="three")
|
||||
|
||||
# 9
|
||||
yield Button("Save", id="save_button")
|
||||
yield Button("Cancel", id="cancel_button")
|
||||
yield Static(" ")
|
||||
|
||||
# 10
|
||||
yield Static(" ", classes="three")
|
||||
|
||||
yield Footer()
|
||||
|
||||
|
||||
def getShiftedSeasonObjFromInput(self):
|
||||
|
||||
shiftedSeasonObj = {}
|
||||
|
||||
originalSeason = self.query_one("#input_original_season", Input).value
|
||||
if not originalSeason:
|
||||
return None
|
||||
shiftedSeasonObj['original_season'] = int(originalSeason)
|
||||
|
||||
try:
|
||||
shiftedSeasonObj['first_episode'] = int(self.query_one("#input_first_episode", Input).value)
|
||||
except ValueError:
|
||||
shiftedSeasonObj['first_episode'] = -1
|
||||
|
||||
try:
|
||||
shiftedSeasonObj['last_episode'] = int(self.query_one("#input_last_episode", Input).value)
|
||||
except ValueError:
|
||||
shiftedSeasonObj['last_episode'] = -1
|
||||
|
||||
try:
|
||||
shiftedSeasonObj['season_offset'] = int(self.query_one("#input_season_offset", Input).value)
|
||||
except ValueError:
|
||||
shiftedSeasonObj['season_offset'] = 0
|
||||
|
||||
try:
|
||||
shiftedSeasonObj['episode_offset'] = int(self.query_one("#input_episode_offset", Input).value)
|
||||
except ValueError:
|
||||
shiftedSeasonObj['episode_offset'] = 0
|
||||
|
||||
return shiftedSeasonObj
|
||||
|
||||
|
||||
# Event handler for button press
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
|
||||
# Check if the button pressed is the one we are interested in
|
||||
if event.button.id == "save_button":
|
||||
|
||||
shiftedSeasonObj = self.getShiftedSeasonObjFromInput()
|
||||
|
||||
if shiftedSeasonObj is not None:
|
||||
|
||||
if self.__shiftedSeasonId is not None:
|
||||
|
||||
if self.__ssc.checkShiftedSeason(self.__showId, shiftedSeasonObj,
|
||||
shiftedSeasonId = self.__shiftedSeasonId):
|
||||
if self.__ssc.updateShiftedSeason(self.__shiftedSeasonId, shiftedSeasonObj):
|
||||
self.dismiss((self.__shiftedSeasonId, shiftedSeasonObj))
|
||||
else:
|
||||
#TODO: Meldung
|
||||
self.app.pop_screen()
|
||||
|
||||
else:
|
||||
if self.__ssc.checkShiftedSeason(self.__showId, shiftedSeasonObj):
|
||||
self.__shiftedSeasonId = self.__ssc.addShiftedSeason(self.__showId, shiftedSeasonObj)
|
||||
self.dismiss((self.__shiftedSeasonId, shiftedSeasonObj))
|
||||
|
||||
|
||||
if event.button.id == "cancel_button":
|
||||
self.app.pop_screen()
|
||||
@ -1,133 +0,0 @@
|
||||
import click
|
||||
|
||||
from ffx.model.show import Show
|
||||
from ffx.show_descriptor import ShowDescriptor
|
||||
|
||||
|
||||
class ShowController():
|
||||
|
||||
def __init__(self, context):
|
||||
|
||||
self.context = context
|
||||
self.Session = self.context['database']['session'] # convenience
|
||||
|
||||
|
||||
def getShowDescriptor(self, showId):
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(Show).filter(Show.id == showId)
|
||||
|
||||
if q.count():
|
||||
show: Show = q.first()
|
||||
return show.getDescriptor(self.context)
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"ShowController.getShowDescriptor(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
def getShow(self, showId):
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(Show).filter(Show.id == showId)
|
||||
|
||||
return q.first() if q.count() else None
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"ShowController.getShow(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
def getAllShows(self):
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(Show)
|
||||
|
||||
if q.count():
|
||||
return q.all()
|
||||
else:
|
||||
return []
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"ShowController.getAllShows(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def updateShow(self, showDescriptor: ShowDescriptor):
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(Show).filter(Show.id == showDescriptor.getId())
|
||||
|
||||
if not q.count():
|
||||
show = Show(id = int(showDescriptor.getId()),
|
||||
name = str(showDescriptor.getName()),
|
||||
year = int(showDescriptor.getYear()),
|
||||
index_season_digits = showDescriptor.getIndexSeasonDigits(),
|
||||
index_episode_digits = showDescriptor.getIndexEpisodeDigits(),
|
||||
indicator_season_digits = showDescriptor.getIndicatorSeasonDigits(),
|
||||
indicator_episode_digits = showDescriptor.getIndicatorEpisodeDigits())
|
||||
|
||||
s.add(show)
|
||||
s.commit()
|
||||
return True
|
||||
else:
|
||||
|
||||
currentShow = q.first()
|
||||
|
||||
changed = False
|
||||
if currentShow.name != str(showDescriptor.getName()):
|
||||
currentShow.name = str(showDescriptor.getName())
|
||||
changed = True
|
||||
if currentShow.year != int(showDescriptor.getYear()):
|
||||
currentShow.year = int(showDescriptor.getYear())
|
||||
changed = True
|
||||
|
||||
if currentShow.index_season_digits != int(showDescriptor.getIndexSeasonDigits()):
|
||||
currentShow.index_season_digits = int(showDescriptor.getIndexSeasonDigits())
|
||||
changed = True
|
||||
if currentShow.index_episode_digits != int(showDescriptor.getIndexEpisodeDigits()):
|
||||
currentShow.index_episode_digits = int(showDescriptor.getIndexEpisodeDigits())
|
||||
changed = True
|
||||
if currentShow.indicator_season_digits != int(showDescriptor.getIndicatorSeasonDigits()):
|
||||
currentShow.indicator_season_digits = int(showDescriptor.getIndicatorSeasonDigits())
|
||||
changed = True
|
||||
if currentShow.indicator_episode_digits != int(showDescriptor.getIndicatorEpisodeDigits()):
|
||||
currentShow.indicator_episode_digits = int(showDescriptor.getIndicatorEpisodeDigits())
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
s.commit()
|
||||
return changed
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"ShowController.updateShow(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def deleteShow(self, show_id):
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(Show).filter(Show.id == int(show_id))
|
||||
|
||||
|
||||
if q.count():
|
||||
|
||||
#DAFUQ: https://stackoverflow.com/a/19245058
|
||||
# q.delete()
|
||||
show = q.first()
|
||||
s.delete(show)
|
||||
|
||||
s.commit()
|
||||
return True
|
||||
return False
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"ShowController.deleteShow(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
@ -1,95 +0,0 @@
|
||||
from textual.screen import Screen
|
||||
from textual.widgets import Header, Footer, Static, Button
|
||||
from textual.containers import Grid
|
||||
|
||||
from .show_controller import ShowController
|
||||
|
||||
# Screen[dict[int, str, int]]
|
||||
class ShowDeleteScreen(Screen):
|
||||
|
||||
CSS = """
|
||||
|
||||
Grid {
|
||||
grid-size: 2;
|
||||
grid-rows: 2 auto;
|
||||
grid-columns: 30 auto;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
padding: 1;
|
||||
}
|
||||
|
||||
Input {
|
||||
border: none;
|
||||
}
|
||||
Button {
|
||||
border: none;
|
||||
}
|
||||
#toplabel {
|
||||
height: 1;
|
||||
}
|
||||
|
||||
.two {
|
||||
column-span: 2;
|
||||
}
|
||||
|
||||
.box {
|
||||
height: 100%;
|
||||
border: solid green;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, showId = None):
|
||||
super().__init__()
|
||||
|
||||
self.context = self.app.getContext()
|
||||
self.Session = self.context['database']['session'] # convenience
|
||||
|
||||
self.__sc = ShowController(context = self.context)
|
||||
|
||||
self.__showDescriptor = self.__sc.getShowDescriptor(showId) if showId is not None else {}
|
||||
|
||||
|
||||
def on_mount(self):
|
||||
if not self.__showDescriptor is None:
|
||||
self.query_one("#showlabel", Static).update(f"{self.__showDescriptor.getId()} - {self.__showDescriptor.getName()} ({self.__showDescriptor.getYear()})")
|
||||
|
||||
|
||||
def compose(self):
|
||||
|
||||
yield Header()
|
||||
|
||||
with Grid():
|
||||
|
||||
yield Static("Are you sure to delete the following show?", id="toplabel", classes="two")
|
||||
|
||||
yield Static("", classes="two")
|
||||
|
||||
yield Static("", id="showlabel")
|
||||
yield Static("")
|
||||
|
||||
yield Static("", classes="two")
|
||||
|
||||
yield Static("", classes="two")
|
||||
|
||||
yield Button("Delete", id="delete_button")
|
||||
yield Button("Cancel", id="cancel_button")
|
||||
|
||||
|
||||
yield Footer()
|
||||
|
||||
|
||||
# Event handler for button press
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
|
||||
if event.button.id == "delete_button":
|
||||
|
||||
if not self.__showDescriptor is None:
|
||||
if self.__sc.deleteShow(self.__showDescriptor.getId()):
|
||||
self.dismiss(self.__showDescriptor)
|
||||
|
||||
else:
|
||||
#TODO: Meldung
|
||||
self.app.pop_screen()
|
||||
|
||||
if event.button.id == "cancel_button":
|
||||
self.app.pop_screen()
|
||||
@ -1,106 +0,0 @@
|
||||
import logging
|
||||
|
||||
|
||||
class ShowDescriptor():
|
||||
"""This class represents the structural content of a media file including streams and metadata"""
|
||||
|
||||
CONTEXT_KEY = 'context'
|
||||
|
||||
ID_KEY = 'id'
|
||||
NAME_KEY = 'name'
|
||||
YEAR_KEY = 'year'
|
||||
|
||||
INDEX_SEASON_DIGITS_KEY = 'index_season_digits'
|
||||
INDEX_EPISODE_DIGITS_KEY = 'index_episode_digits'
|
||||
INDICATOR_SEASON_DIGITS_KEY = 'indicator_season_digits'
|
||||
INDICATOR_EPISODE_DIGITS_KEY = 'indicator_episode_digits'
|
||||
|
||||
DEFAULT_INDEX_SEASON_DIGITS = 2
|
||||
DEFAULT_INDEX_EPISODE_DIGITS = 2
|
||||
DEFAULT_INDICATOR_SEASON_DIGITS = 2
|
||||
DEFAULT_INDICATOR_EPISODE_DIGITS = 2
|
||||
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
if ShowDescriptor.CONTEXT_KEY in kwargs.keys():
|
||||
if type(kwargs[ShowDescriptor.CONTEXT_KEY]) is not dict:
|
||||
raise TypeError(
|
||||
f"ShowDescriptor.__init__(): Argument {ShowDescriptor.CONTEXT_KEY} is required to be of type dict"
|
||||
)
|
||||
self.__context = kwargs[ShowDescriptor.CONTEXT_KEY]
|
||||
self.__logger = self.__context['logger']
|
||||
else:
|
||||
self.__context = {}
|
||||
self.__logger = logging.getLogger('FFX')
|
||||
self.__logger.addHandler(logging.NullHandler())
|
||||
|
||||
if ShowDescriptor.ID_KEY in kwargs.keys():
|
||||
if type(kwargs[ShowDescriptor.ID_KEY]) is not int:
|
||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.ID_KEY} is required to be of type int")
|
||||
self.__showId = kwargs[ShowDescriptor.ID_KEY]
|
||||
else:
|
||||
self.__showId = -1
|
||||
|
||||
if ShowDescriptor.NAME_KEY in kwargs.keys():
|
||||
if type(kwargs[ShowDescriptor.NAME_KEY]) is not str:
|
||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.NAME_KEY} is required to be of type str")
|
||||
self.__showName = kwargs[ShowDescriptor.NAME_KEY]
|
||||
else:
|
||||
self.__showName = ''
|
||||
|
||||
if ShowDescriptor.YEAR_KEY in kwargs.keys():
|
||||
if type(kwargs[ShowDescriptor.YEAR_KEY]) is not int:
|
||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.YEAR_KEY} is required to be of type int")
|
||||
self.__showYear = kwargs[ShowDescriptor.YEAR_KEY]
|
||||
else:
|
||||
self.__showYear = -1
|
||||
|
||||
|
||||
if ShowDescriptor.INDEX_SEASON_DIGITS_KEY in kwargs.keys():
|
||||
if type(kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]) is not int:
|
||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDEX_SEASON_DIGITS_KEY} is required to be of type int")
|
||||
self.__indexSeasonDigits = kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY]
|
||||
else:
|
||||
self.__indexSeasonDigits = ShowDescriptor.DEFAULT_INDEX_SEASON_DIGITS
|
||||
|
||||
if ShowDescriptor.INDEX_EPISODE_DIGITS_KEY in kwargs.keys():
|
||||
if type(kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]) is not int:
|
||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDEX_EPISODE_DIGITS_KEY} is required to be of type int")
|
||||
self.__indexEpisodeDigits = kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY]
|
||||
else:
|
||||
self.__indexEpisodeDigits = ShowDescriptor.DEFAULT_INDEX_EPISODE_DIGITS
|
||||
|
||||
if ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY in kwargs.keys():
|
||||
if type(kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]) is not int:
|
||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY} is required to be of type int")
|
||||
self.__indicatorSeasonDigits = kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY]
|
||||
else:
|
||||
self.__indicatorSeasonDigits = ShowDescriptor.DEFAULT_INDICATOR_SEASON_DIGITS
|
||||
|
||||
if ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY in kwargs.keys():
|
||||
if type(kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]) is not int:
|
||||
raise TypeError(f"ShowDescriptor.__init__(): Argument {ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY} is required to be of type int")
|
||||
self.__indicatorEpisodeDigits = kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY]
|
||||
else:
|
||||
self.__indicatorEpisodeDigits = ShowDescriptor.DEFAULT_INDICATOR_EPISODE_DIGITS
|
||||
|
||||
|
||||
def getId(self):
|
||||
return self.__showId
|
||||
def getName(self):
|
||||
return self.__showName
|
||||
def getYear(self):
|
||||
return self.__showYear
|
||||
|
||||
def getIndexSeasonDigits(self):
|
||||
return self.__indexSeasonDigits
|
||||
def getIndexEpisodeDigits(self):
|
||||
return self.__indexEpisodeDigits
|
||||
def getIndicatorSeasonDigits(self):
|
||||
return self.__indicatorSeasonDigits
|
||||
def getIndicatorEpisodeDigits(self):
|
||||
return self.__indicatorEpisodeDigits
|
||||
|
||||
def getFilenamePrefix(self):
|
||||
return f"{self.__showName} ({str(self.__showYear)})"
|
||||
@ -1,492 +0,0 @@
|
||||
import click
|
||||
|
||||
from textual.screen import Screen
|
||||
from textual.widgets import Header, Footer, Static, Button, DataTable, Input
|
||||
from textual.containers import Grid
|
||||
from textual.widgets._data_table import CellDoesNotExist
|
||||
|
||||
from ffx.model.pattern import Pattern
|
||||
|
||||
from .pattern_details_screen import PatternDetailsScreen
|
||||
from .pattern_delete_screen import PatternDeleteScreen
|
||||
|
||||
from .show_controller import ShowController
|
||||
from .pattern_controller import PatternController
|
||||
from .tmdb_controller import TmdbController
|
||||
from .shifted_season_controller import ShiftedSeasonController
|
||||
|
||||
from .show_descriptor import ShowDescriptor
|
||||
|
||||
from .shifted_season_details_screen import ShiftedSeasonDetailsScreen
|
||||
from .shifted_season_delete_screen import ShiftedSeasonDeleteScreen
|
||||
|
||||
from ffx.model.shifted_season import ShiftedSeason
|
||||
|
||||
from .helper import filterFilename
|
||||
|
||||
|
||||
# Screen[dict[int, str, int]]
|
||||
class ShowDetailsScreen(Screen):
|
||||
|
||||
CSS = """
|
||||
|
||||
Grid {
|
||||
grid-size: 5 16;
|
||||
grid-rows: 2 2 2 2 2 2 2 2 2 2 2 9 2 9 2 2;
|
||||
grid-columns: 30 30 30 30 30;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
padding: 1;
|
||||
}
|
||||
|
||||
Input {
|
||||
border: none;
|
||||
}
|
||||
Button {
|
||||
border: none;
|
||||
}
|
||||
|
||||
DataTable {
|
||||
column-span: 2;
|
||||
min-height: 8;
|
||||
}
|
||||
|
||||
DataTable .datatable--cursor {
|
||||
background: darkorange;
|
||||
color: black;
|
||||
}
|
||||
|
||||
DataTable .datatable--header {
|
||||
background: steelblue;
|
||||
color: white;
|
||||
}
|
||||
|
||||
#toplabel {
|
||||
height: 1;
|
||||
}
|
||||
|
||||
|
||||
.two {
|
||||
column-span: 2;
|
||||
}
|
||||
.three {
|
||||
column-span: 3;
|
||||
}
|
||||
.four {
|
||||
column-span: 4;
|
||||
}
|
||||
.five {
|
||||
column-span: 5;
|
||||
}
|
||||
|
||||
.box {
|
||||
height: 100%;
|
||||
border: solid green;
|
||||
}
|
||||
"""
|
||||
|
||||
BINDINGS = [
|
||||
("a", "add_pattern", "Add Pattern"),
|
||||
("e", "edit_pattern", "Edit Pattern"),
|
||||
("r", "remove_pattern", "Remove Pattern"),
|
||||
]
|
||||
|
||||
def __init__(self, showId = None):
|
||||
super().__init__()
|
||||
|
||||
self.context = self.app.getContext()
|
||||
self.Session = self.context['database']['session'] # convenience
|
||||
|
||||
self.__sc = ShowController(context = self.context)
|
||||
self.__pc = PatternController(context = self.context)
|
||||
self.__tc = TmdbController()
|
||||
self.__ssc = ShiftedSeasonController(context = self.context)
|
||||
|
||||
self.__showDescriptor = self.__sc.getShowDescriptor(showId) if showId is not None else None
|
||||
|
||||
|
||||
def loadPatterns(self, show_id : int):
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(Pattern).filter(Pattern.show_id == int(show_id))
|
||||
|
||||
return [{'id': int(p.id), 'pattern': str(p.pattern)} for p in q.all()]
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"ShowDetailsScreen.loadPatterns(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
|
||||
def updateShiftedSeasons(self):
|
||||
|
||||
self.shiftedSeasonsTable.clear()
|
||||
|
||||
if not self.__showDescriptor is None:
|
||||
|
||||
showId = int(self.__showDescriptor.getId())
|
||||
|
||||
shiftedSeason: ShiftedSeason
|
||||
for shiftedSeason in self.__ssc.getShiftedSeasonSiblings(showId=showId):
|
||||
|
||||
shiftedSeasonObj = shiftedSeason.getObj()
|
||||
|
||||
firstEpisode = shiftedSeasonObj['first_episode']
|
||||
firstEpisodeStr = str(firstEpisode) if firstEpisode != -1 else ''
|
||||
|
||||
lastEpisode = shiftedSeasonObj['last_episode']
|
||||
lastEpisodeStr = str(lastEpisode) if lastEpisode != -1 else ''
|
||||
|
||||
row = (shiftedSeasonObj['original_season'],
|
||||
firstEpisodeStr,
|
||||
lastEpisodeStr,
|
||||
shiftedSeasonObj['season_offset'],
|
||||
shiftedSeasonObj['episode_offset'])
|
||||
|
||||
self.shiftedSeasonsTable.add_row(*map(str, row))
|
||||
|
||||
|
||||
|
||||
def on_mount(self):
|
||||
|
||||
if self.__showDescriptor is not None:
|
||||
|
||||
showId = int(self.__showDescriptor.getId())
|
||||
|
||||
self.query_one("#id_static", Static).update(str(showId))
|
||||
self.query_one("#name_input", Input).value = str(self.__showDescriptor.getName())
|
||||
self.query_one("#year_input", Input).value = str(self.__showDescriptor.getYear())
|
||||
|
||||
self.query_one("#index_season_digits_input", Input).value = str(self.__showDescriptor.getIndexSeasonDigits())
|
||||
self.query_one("#index_episode_digits_input", Input).value = str(self.__showDescriptor.getIndexEpisodeDigits())
|
||||
self.query_one("#indicator_season_digits_input", Input).value = str(self.__showDescriptor.getIndicatorSeasonDigits())
|
||||
self.query_one("#indicator_episode_digits_input", Input).value = str(self.__showDescriptor.getIndicatorEpisodeDigits())
|
||||
|
||||
|
||||
#raise click.ClickException(f"show_id {showId}")
|
||||
patternList = self.loadPatterns(showId)
|
||||
# raise click.ClickException(f"patternList {patternList}")
|
||||
for pattern in patternList:
|
||||
row = (pattern['pattern'],)
|
||||
self.patternTable.add_row(*map(str, row))
|
||||
|
||||
self.updateShiftedSeasons()
|
||||
|
||||
else:
|
||||
|
||||
self.query_one("#index_season_digits_input", Input).value = "2"
|
||||
self.query_one("#index_episode_digits_input", Input).value = "2"
|
||||
self.query_one("#indicator_season_digits_input", Input).value = "2"
|
||||
self.query_one("#indicator_episode_digits_input", Input).value = "2"
|
||||
|
||||
|
||||
def getSelectedPatternDescriptor(self):
|
||||
|
||||
selectedPattern = {}
|
||||
|
||||
try:
|
||||
|
||||
# Fetch the currently selected row when 'Enter' is pressed
|
||||
#selected_row_index = self.table.cursor_row
|
||||
row_key, col_key = self.patternTable.coordinate_to_cell_key(self.patternTable.cursor_coordinate)
|
||||
|
||||
if row_key is not None:
|
||||
selected_row_data = self.patternTable.get_row(row_key)
|
||||
|
||||
selectedPattern['show_id'] = self.__showDescriptor.getId()
|
||||
selectedPattern['pattern'] = str(selected_row_data[0])
|
||||
|
||||
except CellDoesNotExist:
|
||||
pass
|
||||
|
||||
return selectedPattern
|
||||
|
||||
|
||||
def getSelectedShiftedSeasonObjFromInput(self):
|
||||
|
||||
shiftedSeasonObj = {}
|
||||
|
||||
try:
|
||||
|
||||
# Fetch the currently selected row when 'Enter' is pressed
|
||||
#selected_row_index = self.table.cursor_row
|
||||
row_key, col_key = self.shiftedSeasonsTable.coordinate_to_cell_key(self.shiftedSeasonsTable.cursor_coordinate)
|
||||
|
||||
if row_key is not None:
|
||||
selected_row_data = self.shiftedSeasonsTable.get_row(row_key)
|
||||
|
||||
shiftedSeasonObj['original_season'] = int(selected_row_data[0])
|
||||
shiftedSeasonObj['first_episode'] = int(selected_row_data[1]) if selected_row_data[1].isnumeric() else -1
|
||||
shiftedSeasonObj['last_episode'] = int(selected_row_data[2]) if selected_row_data[2].isnumeric() else -1
|
||||
shiftedSeasonObj['season_offset'] = int(selected_row_data[3]) if selected_row_data[3].isnumeric() else 0
|
||||
shiftedSeasonObj['episode_offset'] = int(selected_row_data[4]) if selected_row_data[4].isnumeric() else 0
|
||||
|
||||
|
||||
if self.__showDescriptor is not None:
|
||||
|
||||
showId = int(self.__showDescriptor.getId())
|
||||
|
||||
shiftedSeasonId = self.__ssc.findShiftedSeason(showId,
|
||||
originalSeason=shiftedSeasonObj['original_season'],
|
||||
firstEpisode=shiftedSeasonObj['first_episode'],
|
||||
lastEpisode=shiftedSeasonObj['last_episode'])
|
||||
if shiftedSeasonId is not None:
|
||||
shiftedSeasonObj['id'] = shiftedSeasonId
|
||||
|
||||
except CellDoesNotExist:
|
||||
pass
|
||||
|
||||
return shiftedSeasonObj
|
||||
|
||||
|
||||
def action_add_pattern(self):
|
||||
if not self.__showDescriptor is None:
|
||||
self.app.push_screen(PatternDetailsScreen(showId = self.__showDescriptor.getId()), self.handle_add_pattern)
|
||||
|
||||
|
||||
def handle_add_pattern(self, screenResult):
|
||||
|
||||
pattern = (screenResult['pattern'],)
|
||||
self.patternTable.add_row(*map(str, pattern))
|
||||
|
||||
|
||||
def action_edit_pattern(self):
|
||||
|
||||
selectedPatternDescriptor = self.getSelectedPatternDescriptor()
|
||||
|
||||
if selectedPatternDescriptor:
|
||||
|
||||
selectedPatternId = self.__pc.findPattern(selectedPatternDescriptor)
|
||||
|
||||
if selectedPatternId is None:
|
||||
raise click.ClickException(f"ShowDetailsScreen.action_edit_pattern(): Pattern to edit has no id")
|
||||
|
||||
self.app.push_screen(PatternDetailsScreen(patternId = selectedPatternId, showId = self.__showDescriptor.getId()), self.handle_edit_pattern) # <-
|
||||
|
||||
|
||||
def handle_edit_pattern(self, screenResult):
|
||||
|
||||
try:
|
||||
|
||||
row_key, col_key = self.patternTable.coordinate_to_cell_key(self.patternTable.cursor_coordinate)
|
||||
self.patternTable.update_cell(row_key, self.column_key_pattern, screenResult['pattern'])
|
||||
|
||||
except CellDoesNotExist:
|
||||
pass
|
||||
|
||||
|
||||
def action_remove_pattern(self):
|
||||
|
||||
selectedPatternDescriptor = self.getSelectedPatternDescriptor()
|
||||
|
||||
if selectedPatternDescriptor:
|
||||
|
||||
selectedPatternId = self.__pc.findPattern(selectedPatternDescriptor)
|
||||
|
||||
if selectedPatternId is None:
|
||||
raise click.ClickException(f"ShowDetailsScreen.action_remove_pattern(): Pattern to remove has no id")
|
||||
|
||||
self.app.push_screen(PatternDeleteScreen(patternId = selectedPatternId, showId = self.__showDescriptor.getId()), self.handle_remove_pattern)
|
||||
|
||||
|
||||
def handle_remove_pattern(self, pattern):
|
||||
|
||||
try:
|
||||
row_key, col_key = self.patternTable.coordinate_to_cell_key(self.patternTable.cursor_coordinate)
|
||||
self.patternTable.remove_row(row_key)
|
||||
|
||||
except CellDoesNotExist:
|
||||
pass
|
||||
|
||||
|
||||
def compose(self):
|
||||
|
||||
# Create the DataTable widget
|
||||
self.patternTable = DataTable(classes="five")
|
||||
|
||||
# Define the columns with headers
|
||||
self.column_key_pattern = self.patternTable.add_column("Pattern", width=150)
|
||||
|
||||
self.patternTable.cursor_type = 'row'
|
||||
|
||||
|
||||
self.shiftedSeasonsTable = DataTable(classes="five")
|
||||
|
||||
self.column_key_original_season = self.shiftedSeasonsTable.add_column("Original Season", width=30)
|
||||
self.column_key_first_episode = self.shiftedSeasonsTable.add_column("First Episode", width=30)
|
||||
self.column_key_last_episode = self.shiftedSeasonsTable.add_column("Last Episode", width=30)
|
||||
self.column_key_season_offset = self.shiftedSeasonsTable.add_column("Season Offset", width=30)
|
||||
self.column_key_episode_offset = self.shiftedSeasonsTable.add_column("Episode Offset", width=30)
|
||||
|
||||
self.shiftedSeasonsTable.cursor_type = 'row'
|
||||
|
||||
|
||||
yield Header()
|
||||
|
||||
with Grid():
|
||||
|
||||
# 1
|
||||
yield Static("Show" if not self.__showDescriptor is None else "New Show", id="toplabel")
|
||||
yield Button("Identify", id="identify_button")
|
||||
yield Static(" ", classes="three")
|
||||
|
||||
# 2
|
||||
yield Static("ID")
|
||||
if not self.__showDescriptor is None:
|
||||
yield Static("", id="id_static", classes="four")
|
||||
else:
|
||||
yield Input(type="integer", id="id_input", classes="four")
|
||||
|
||||
# 3
|
||||
yield Static("Name")
|
||||
yield Input(type="text", id="name_input", classes="four")
|
||||
|
||||
# 4
|
||||
yield Static("Year")
|
||||
yield Input(type="integer", id="year_input", classes="four")
|
||||
|
||||
#5
|
||||
yield Static(" ", classes="five")
|
||||
|
||||
#6
|
||||
yield Static("Index Season Digits")
|
||||
yield Input(type="integer", id="index_season_digits_input", classes="four")
|
||||
|
||||
#7
|
||||
yield Static("Index Episode Digits")
|
||||
yield Input(type="integer", id="index_episode_digits_input", classes="four")
|
||||
|
||||
#8
|
||||
yield Static("Indicator Season Digits")
|
||||
yield Input(type="integer", id="indicator_season_digits_input", classes="four")
|
||||
|
||||
#9
|
||||
yield Static("Indicator Edisode Digits")
|
||||
yield Input(type="integer", id="indicator_episode_digits_input", classes="four")
|
||||
|
||||
# 10
|
||||
yield Static(" ", classes="five")
|
||||
|
||||
# 11
|
||||
yield Static("Shifted seasons", classes="two")
|
||||
|
||||
if self.__showDescriptor is not None:
|
||||
yield Button("Add", id="button_add_shifted_season")
|
||||
yield Button("Edit", id="button_edit_shifted_season")
|
||||
yield Button("Delete", id="button_delete_shifted_season")
|
||||
else:
|
||||
yield Static(" ")
|
||||
yield Static(" ")
|
||||
yield Static(" ")
|
||||
|
||||
# 12
|
||||
yield self.shiftedSeasonsTable
|
||||
|
||||
# 13
|
||||
yield Static("File patterns", classes="five")
|
||||
# 14
|
||||
yield self.patternTable
|
||||
|
||||
# 15
|
||||
yield Static(" ", classes="five")
|
||||
|
||||
# 16
|
||||
yield Button("Save", id="save_button")
|
||||
yield Button("Cancel", id="cancel_button")
|
||||
|
||||
|
||||
yield Footer()
|
||||
|
||||
|
||||
def getShowDescriptorFromInput(self) -> ShowDescriptor:
|
||||
|
||||
kwargs = {}
|
||||
|
||||
try:
|
||||
if self.__showDescriptor:
|
||||
kwargs[ShowDescriptor.ID_KEY] = int(self.__showDescriptor.getId())
|
||||
else:
|
||||
kwargs[ShowDescriptor.ID_KEY] = int(self.query_one("#id_input", Input).value)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
try:
|
||||
kwargs[ShowDescriptor.NAME_KEY] = str(self.query_one("#name_input", Input).value)
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
kwargs[ShowDescriptor.YEAR_KEY] = int(self.query_one("#year_input", Input).value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
kwargs[ShowDescriptor.INDEX_SEASON_DIGITS_KEY] = int(self.query_one("#index_season_digits_input", Input).value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
kwargs[ShowDescriptor.INDEX_EPISODE_DIGITS_KEY] = int(self.query_one("#index_episode_digits_input", Input).value)
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
kwargs[ShowDescriptor.INDICATOR_SEASON_DIGITS_KEY] = int(self.query_one("#indicator_season_digits_input", Input).value)
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
kwargs[ShowDescriptor.INDICATOR_EPISODE_DIGITS_KEY] = int(self.query_one("#indicator_episode_digits_input", Input).value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return ShowDescriptor(**kwargs)
|
||||
|
||||
|
||||
# Event handler for button press
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
|
||||
if event.button.id == "save_button":
|
||||
|
||||
showDescriptor = self.getShowDescriptorFromInput()
|
||||
|
||||
if not showDescriptor is None:
|
||||
if self.__sc.updateShow(showDescriptor):
|
||||
self.dismiss(showDescriptor)
|
||||
else:
|
||||
#TODO: Meldung
|
||||
self.app.pop_screen()
|
||||
|
||||
if event.button.id == "cancel_button":
|
||||
self.app.pop_screen()
|
||||
|
||||
|
||||
if event.button.id == "identify_button":
|
||||
|
||||
showDescriptor = self.getShowDescriptorFromInput()
|
||||
if not showDescriptor is None:
|
||||
showName, showYear = self.__tc.getShowNameAndYear(showDescriptor.getId())
|
||||
|
||||
self.query_one("#name_input", Input).value = filterFilename(showName)
|
||||
self.query_one("#year_input", Input).value = str(showYear)
|
||||
|
||||
|
||||
if event.button.id == "button_add_shifted_season":
|
||||
if not self.__showDescriptor is None:
|
||||
self.app.push_screen(ShiftedSeasonDetailsScreen(showId = self.__showDescriptor.getId()), self.handle_update_shifted_season)
|
||||
|
||||
if event.button.id == "button_edit_shifted_season":
|
||||
selectedShiftedSeasonObj = self.getSelectedShiftedSeasonObjFromInput()
|
||||
if 'id' in selectedShiftedSeasonObj.keys():
|
||||
self.app.push_screen(ShiftedSeasonDetailsScreen(showId = self.__showDescriptor.getId(), shiftedSeasonId=selectedShiftedSeasonObj['id']), self.handle_update_shifted_season)
|
||||
|
||||
if event.button.id == "button_delete_shifted_season":
|
||||
selectedShiftedSeasonObj = self.getSelectedShiftedSeasonObjFromInput()
|
||||
if 'id' in selectedShiftedSeasonObj.keys():
|
||||
self.app.push_screen(ShiftedSeasonDeleteScreen(showId = self.__showDescriptor.getId(), shiftedSeasonId=selectedShiftedSeasonObj['id']), self.handle_delete_shifted_season)
|
||||
|
||||
|
||||
def handle_update_shifted_season(self, screenResult):
|
||||
self.updateShiftedSeasons()
|
||||
|
||||
def handle_delete_shifted_season(self, screenResult):
|
||||
self.updateShiftedSeasons()
|
||||
@ -1,168 +0,0 @@
|
||||
from textual.screen import Screen
|
||||
from textual.widgets import Header, Footer, Static, DataTable
|
||||
from textual.containers import Grid
|
||||
|
||||
from .show_controller import ShowController
|
||||
|
||||
from .show_details_screen import ShowDetailsScreen
|
||||
from .show_delete_screen import ShowDeleteScreen
|
||||
|
||||
from ffx.show_descriptor import ShowDescriptor
|
||||
|
||||
from textual.widgets._data_table import CellDoesNotExist
|
||||
|
||||
|
||||
class ShowsScreen(Screen):
|
||||
|
||||
CSS = """
|
||||
|
||||
Grid {
|
||||
grid-size: 1;
|
||||
grid-rows: 2 auto;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
padding: 1;
|
||||
}
|
||||
|
||||
DataTable .datatable--cursor {
|
||||
background: darkorange;
|
||||
color: black;
|
||||
}
|
||||
|
||||
DataTable .datatable--header {
|
||||
background: steelblue;
|
||||
color: white;
|
||||
}
|
||||
|
||||
#top {
|
||||
height: 1;
|
||||
}
|
||||
|
||||
|
||||
#two {
|
||||
column-span: 2;
|
||||
row-span: 2;
|
||||
tint: magenta 40%;
|
||||
}
|
||||
|
||||
.box {
|
||||
height: 100%;
|
||||
border: solid green;
|
||||
}
|
||||
"""
|
||||
|
||||
BINDINGS = [
|
||||
("e", "edit_show", "Edit Show"),
|
||||
("n", "new_show", "New Show"),
|
||||
("d", "delete_show", "Delete Show"),
|
||||
]
|
||||
|
||||
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
self.context = self.app.getContext()
|
||||
self.Session = self.context['database']['session'] # convenience
|
||||
|
||||
self.__sc = ShowController(context = self.context)
|
||||
|
||||
|
||||
def getSelectedShowId(self):
|
||||
|
||||
try:
|
||||
# Fetch the currently selected row when 'Enter' is pressed
|
||||
#selected_row_index = self.table.cursor_row
|
||||
row_key, col_key = self.table.coordinate_to_cell_key(self.table.cursor_coordinate)
|
||||
|
||||
if row_key is not None:
|
||||
selected_row_data = self.table.get_row(row_key)
|
||||
|
||||
return selected_row_data[0]
|
||||
|
||||
except CellDoesNotExist:
|
||||
return None
|
||||
|
||||
|
||||
|
||||
|
||||
def action_new_show(self):
|
||||
self.app.push_screen(ShowDetailsScreen(), self.handle_new_screen)
|
||||
|
||||
def handle_new_screen(self, screenResult):
|
||||
|
||||
show = (screenResult['id'], screenResult['name'], screenResult['year'])
|
||||
self.table.add_row(*map(str, show))
|
||||
|
||||
|
||||
def action_edit_show(self):
|
||||
|
||||
selectedShowId = self.getSelectedShowId()
|
||||
|
||||
if selectedShowId is not None:
|
||||
self.app.push_screen(ShowDetailsScreen(showId = selectedShowId), self.handle_edit_screen)
|
||||
|
||||
|
||||
def handle_edit_screen(self, showDescriptor: ShowDescriptor):
|
||||
|
||||
try:
|
||||
|
||||
row_key, col_key = self.table.coordinate_to_cell_key(self.table.cursor_coordinate)
|
||||
|
||||
self.table.update_cell(row_key, self.column_key_name, showDescriptor.getName())
|
||||
self.table.update_cell(row_key, self.column_key_year, showDescriptor.getYear())
|
||||
|
||||
except CellDoesNotExist:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
def action_delete_show(self):
|
||||
|
||||
selectedShowId = self.getSelectedShowId()
|
||||
|
||||
if selectedShowId is not None:
|
||||
self.app.push_screen(ShowDeleteScreen(showId = selectedShowId), self.handle_delete_show)
|
||||
|
||||
|
||||
def handle_delete_show(self, showDescriptor: ShowDescriptor):
|
||||
|
||||
try:
|
||||
row_key, col_key = self.table.coordinate_to_cell_key(self.table.cursor_coordinate)
|
||||
self.table.remove_row(row_key)
|
||||
|
||||
except CellDoesNotExist:
|
||||
pass
|
||||
|
||||
|
||||
def on_mount(self) -> None:
|
||||
for show in self.__sc.getAllShows():
|
||||
row = (int(show.id), show.name, show.year) # Convert each element to a string before adding
|
||||
self.table.add_row(*map(str, row))
|
||||
|
||||
|
||||
def compose(self):
|
||||
|
||||
# Create the DataTable widget
|
||||
self.table = DataTable()
|
||||
|
||||
# Define the columns with headers
|
||||
self.column_key_id = self.table.add_column("ID", width=10)
|
||||
self.column_key_name = self.table.add_column("Name", width=50)
|
||||
self.column_key_year = self.table.add_column("Year", width=10)
|
||||
|
||||
self.table.cursor_type = 'row'
|
||||
|
||||
yield Header()
|
||||
|
||||
with Grid():
|
||||
|
||||
yield Static("Shows")
|
||||
|
||||
yield self.table
|
||||
|
||||
f = Footer()
|
||||
f.description = "yolo"
|
||||
|
||||
yield f
|
||||
@ -1,220 +0,0 @@
|
||||
import click
|
||||
|
||||
from ffx.model.track import Track
|
||||
|
||||
from ffx.model.media_tag import MediaTag
|
||||
from ffx.model.track_tag import TrackTag
|
||||
|
||||
|
||||
class TagController():
|
||||
|
||||
def __init__(self, context):
|
||||
|
||||
self.context = context
|
||||
self.Session = self.context['database']['session'] # convenience
|
||||
|
||||
|
||||
def updateMediaTag(self, patternId, tagKey, tagValue):
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
|
||||
q = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId),
|
||||
MediaTag.key == str(tagKey))
|
||||
tag = q.first()
|
||||
if tag:
|
||||
tag.value = str(tagValue)
|
||||
else:
|
||||
tag = MediaTag(pattern_id = int(patternId),
|
||||
key = str(tagKey),
|
||||
value = str(tagValue))
|
||||
s.add(tag)
|
||||
s.commit()
|
||||
|
||||
return int(tag.id)
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"TagController.updateTrackTag(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
def updateTrackTag(self, trackId, tagKey, tagValue):
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
|
||||
q = s.query(TrackTag).filter(TrackTag.track_id == int(trackId),
|
||||
TrackTag.key == str(tagKey))
|
||||
tag = q.first()
|
||||
if tag:
|
||||
tag.value = str(tagValue)
|
||||
else:
|
||||
tag = TrackTag(track_id = int(trackId),
|
||||
key = str(tagKey),
|
||||
value = str(tagValue))
|
||||
s.add(tag)
|
||||
s.commit()
|
||||
|
||||
return int(tag.id)
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"TagController.updateTrackTag(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
def deleteMediaTagByKey(self, patternId, tagKey):
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
|
||||
q = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId),
|
||||
MediaTag.key == str(tagKey))
|
||||
if q.count():
|
||||
tag = q.first()
|
||||
s.delete(tag)
|
||||
s.commit()
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"TagController.deleteMediaTagByKey(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
def deleteTrackTagByKey(self, trackId, tagKey):
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
|
||||
q = s.query(TrackTag).filter(TrackTag.track_id == int(trackId),
|
||||
TrackTag.key == str(tagKey))
|
||||
tag = q.first()
|
||||
if tag:
|
||||
s.delete(tag)
|
||||
s.commit()
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"TagController.deleteTrackTagByKey(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
def findAllMediaTags(self, patternId) -> dict:
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
|
||||
q = s.query(MediaTag).filter(MediaTag.pattern_id == int(patternId))
|
||||
|
||||
if q.count():
|
||||
return {t.key:t.value for t in q.all()}
|
||||
else:
|
||||
return {}
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"TagController.findAllMediaTags(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def findAllTrackTags(self, trackId) -> dict:
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
|
||||
q = s.query(TrackTag).filter(TrackTag.track_id == int(trackId))
|
||||
|
||||
if q.count():
|
||||
return {t.key:t.value for t in q.all()}
|
||||
else:
|
||||
return {}
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"TagController.findAllTracks(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def findMediaTag(self, trackId : int, trackKey : str) -> MediaTag:
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(Track).filter(MediaTag.track_id == int(trackId), MediaTag.key == str(trackKey))
|
||||
|
||||
if q.count():
|
||||
return q.first()
|
||||
else:
|
||||
return None
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"TagController.findMediaTag(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
def findTrackTag(self, trackId : int, tagKey : str) -> TrackTag:
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(TrackTag).filter(TrackTag.track_id == int(trackId), TrackTag.key == str(tagKey))
|
||||
|
||||
if q.count():
|
||||
return q.first()
|
||||
else:
|
||||
return None
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"TagController.findTrackTag(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
|
||||
def deleteMediaTag(self, tagId) -> bool:
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(MediaTag).filter(MediaTag.id == int(tagId))
|
||||
|
||||
if q.count():
|
||||
|
||||
tag = q.first()
|
||||
|
||||
s.delete(tag)
|
||||
|
||||
s.commit()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"TagController.deleteMediaTag(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def deleteTrackTag(self, tagId : int) -> bool:
|
||||
|
||||
if type(tagId) is not int:
|
||||
raise TypeError('TagController.deleteTrackTag(): Argument tagId is required to be of type int')
|
||||
|
||||
try:
|
||||
s = self.Session()
|
||||
q = s.query(TrackTag).filter(TrackTag.id == int(tagId))
|
||||
|
||||
if q.count():
|
||||
|
||||
tag = q.first()
|
||||
|
||||
s.delete(tag)
|
||||
|
||||
s.commit()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
except Exception as ex:
|
||||
raise click.ClickException(f"TagController.deleteTrackTag(): {repr(ex)}")
|
||||
finally:
|
||||
s.close()
|
||||
@ -1,98 +0,0 @@
|
||||
from textual.screen import Screen
|
||||
from textual.widgets import Header, Footer, Static, Button
|
||||
from textual.containers import Grid
|
||||
|
||||
|
||||
# Screen[dict[int, str, int]]
|
||||
class TagDeleteScreen(Screen):
|
||||
|
||||
CSS = """
|
||||
|
||||
Grid {
|
||||
grid-size: 4 9;
|
||||
grid-rows: 2 2 2 2 2 2 2 2 2;
|
||||
grid-columns: 30 30 30 30;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
padding: 1;
|
||||
}
|
||||
|
||||
Input {
|
||||
border: none;
|
||||
}
|
||||
Button {
|
||||
border: none;
|
||||
}
|
||||
#toplabel {
|
||||
height: 1;
|
||||
}
|
||||
|
||||
.two {
|
||||
column-span: 2;
|
||||
}
|
||||
.three {
|
||||
column-span: 3;
|
||||
}
|
||||
.four {
|
||||
column-span: 4;
|
||||
}
|
||||
.five {
|
||||
column-span: 5;
|
||||
}
|
||||
|
||||
.box {
|
||||
height: 100%;
|
||||
border: solid green;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, key=None, value=None):
|
||||
super().__init__()
|
||||
self.__key = key
|
||||
self.__value = value
|
||||
|
||||
|
||||
def on_mount(self):
|
||||
|
||||
self.query_one("#keylabel", Static).update(str(self.__key))
|
||||
self.query_one("#valuelabel", Static).update(str(self.__value))
|
||||
|
||||
|
||||
def compose(self):
|
||||
|
||||
yield Header()
|
||||
|
||||
with Grid():
|
||||
|
||||
#1
|
||||
yield Static(f"Are you sure to delete this tag ?", id="toplabel", classes="five")
|
||||
|
||||
#2
|
||||
yield Static("Key")
|
||||
yield Static(" ", id="keylabel", classes="four")
|
||||
|
||||
#3
|
||||
yield Static("Value")
|
||||
yield Static(" ", id="valuelabel", classes="four")
|
||||
|
||||
#4
|
||||
yield Static(" ", classes="five")
|
||||
|
||||
#9
|
||||
yield Button("Delete", id="delete_button")
|
||||
yield Button("Cancel", id="cancel_button")
|
||||
|
||||
yield Footer()
|
||||
|
||||
|
||||
# Event handler for button press
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
|
||||
if event.button.id == "delete_button":
|
||||
|
||||
tag = (self.__key, self.__value)
|
||||
self.dismiss(tag)
|
||||
|
||||
if event.button.id == "cancel_button":
|
||||
self.app.pop_screen()
|
||||
|
||||
@ -1,132 +0,0 @@
|
||||
from textual.screen import Screen
|
||||
from textual.widgets import Header, Footer, Static, Button, Input
|
||||
from textual.containers import Grid
|
||||
|
||||
|
||||
# Screen[dict[int, str, int]]
|
||||
class TagDetailsScreen(Screen):
|
||||
|
||||
CSS = """
|
||||
|
||||
Grid {
|
||||
grid-size: 5 20;
|
||||
grid-rows: 2 2 2 2 2 3 2 2 2 2 2 6 2 2 6 2 2 2 2 6;
|
||||
grid-columns: 25 25 25 25 225;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
padding: 1;
|
||||
}
|
||||
|
||||
Input {
|
||||
border: none;
|
||||
}
|
||||
Button {
|
||||
border: none;
|
||||
}
|
||||
SelectionList {
|
||||
border: none;
|
||||
min-height: 6;
|
||||
}
|
||||
Select {
|
||||
border: none;
|
||||
}
|
||||
|
||||
DataTable {
|
||||
min-height: 6;
|
||||
}
|
||||
|
||||
DataTable .datatable--cursor {
|
||||
background: darkorange;
|
||||
color: black;
|
||||
}
|
||||
|
||||
DataTable .datatable--header {
|
||||
background: steelblue;
|
||||
color: white;
|
||||
}
|
||||
|
||||
#toplabel {
|
||||
height: 1;
|
||||
}
|
||||
|
||||
.two {
|
||||
column-span: 2;
|
||||
}
|
||||
.three {
|
||||
column-span: 3;
|
||||
}
|
||||
|
||||
.four {
|
||||
column-span: 4;
|
||||
}
|
||||
.five {
|
||||
column-span: 5;
|
||||
}
|
||||
|
||||
.box {
|
||||
height: 100%;
|
||||
border: solid green;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, key=None, value=None):
|
||||
super().__init__()
|
||||
self.__key = key
|
||||
self.__value = value
|
||||
|
||||
|
||||
def on_mount(self):
|
||||
|
||||
if self.__key is not None:
|
||||
self.query_one("#key_input", Input).value = str(self.__key)
|
||||
|
||||
if self.__value is not None:
|
||||
self.query_one("#value_input", Input).value = str(self.__value)
|
||||
|
||||
|
||||
def compose(self):
|
||||
|
||||
yield Header()
|
||||
|
||||
with Grid():
|
||||
|
||||
# 8
|
||||
yield Static("Key")
|
||||
yield Input(id="key_input", classes="four")
|
||||
|
||||
yield Static("Value")
|
||||
yield Input(id="value_input", classes="four")
|
||||
|
||||
# 17
|
||||
yield Static(" ", classes="five")
|
||||
|
||||
# 18
|
||||
yield Button("Save", id="save_button")
|
||||
yield Button("Cancel", id="cancel_button")
|
||||
|
||||
# 19
|
||||
yield Static(" ", classes="five")
|
||||
|
||||
# 20
|
||||
yield Static(" ", classes="five", id="messagestatic")
|
||||
|
||||
yield Footer(id="footer")
|
||||
|
||||
|
||||
def getTagFromInput(self):
|
||||
|
||||
tagKey = self.query_one("#key_input", Input).value
|
||||
tagValue = self.query_one("#value_input", Input).value
|
||||
|
||||
return (tagKey, tagValue)
|
||||
|
||||
|
||||
# Event handler for button press
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
|
||||
# Check if the button pressed is the one we are interested in
|
||||
if event.button.id == "save_button":
|
||||
self.dismiss(self.getTagFromInput())
|
||||
|
||||
if event.button.id == "cancel_button":
|
||||
self.app.pop_screen()
|
||||
@ -1,64 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .basename_combinator import BasenameCombinator
|
||||
|
||||
from .indicator_combinator import IndicatorCombinator
|
||||
from .label_combinator import LabelCombinator
|
||||
|
||||
class BasenameCombinator2(BasenameCombinator):
|
||||
"""documentation_site"""
|
||||
|
||||
VARIANT = 'B2'
|
||||
|
||||
# def __init__(self, SubCombinators: dict = {}, context = None):
|
||||
def __init__(self, context = None):
|
||||
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getVariant(self):
|
||||
return BasenameCombinator2.VARIANT
|
||||
|
||||
def getPayload(self):
|
||||
return ''
|
||||
|
||||
def assertFunc(self, mediaDescriptor: MediaDescriptor):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
|
||||
for L in LabelCombinator.getAllClassReferences():
|
||||
# for I in IndicatorCombinator.getAllClassReferences():
|
||||
# for S in SiteCombinator.getAllClassReferences():
|
||||
# for T in TitleCombinator.getAllClassReferences():
|
||||
#
|
||||
|
||||
l = L(self._context)
|
||||
|
||||
yieldObj = {}
|
||||
|
||||
yieldObj['identifier'] = self.getIdentifier()
|
||||
|
||||
yieldObj['variants'] = [self.getVariant(),
|
||||
l.getVariant()]
|
||||
|
||||
yieldObj['payload'] = {'label': l.getPayload()}
|
||||
|
||||
yieldObj['assertSelectors'] = ['B', 'L']
|
||||
|
||||
yieldObj['assertFuncs'] = [self.assertFunc,
|
||||
l.assertFunc]
|
||||
|
||||
yieldObj['shouldFail'] = (self.shouldFail()
|
||||
| l.shouldFail())
|
||||
|
||||
yield yieldObj
|
||||
@ -1,35 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
class BasenameCombinator():
|
||||
|
||||
IDENTIFIER = 'basename'
|
||||
|
||||
BASENAME = 'media'
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getIdentifier(self):
|
||||
return BasenameCombinator.IDENTIFIER
|
||||
|
||||
@staticmethod
|
||||
def list():
|
||||
basePath = os.path.dirname(__file__)
|
||||
return [os.path.basename(p)[20:-3]
|
||||
for p
|
||||
in glob.glob(f"{ basePath }/basename_combinator_*.py", recursive = True)
|
||||
if p != __file__]
|
||||
|
||||
@staticmethod
|
||||
def getClassReference(identifier):
|
||||
importlib.import_module(f"ffx.test.basename_combinator_{ identifier }")
|
||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.basename_combinator_{ identifier }"]):
|
||||
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
||||
if inspect.isclass(obj) and name != 'BasenameCombinator' and name.startswith('BasenameCombinator'):
|
||||
return obj
|
||||
|
||||
@staticmethod
|
||||
def getAllClassReferences():
|
||||
return [BasenameCombinator.getClassReference(i) for i in BasenameCombinator.list()]
|
||||
@ -1,107 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .basename_combinator import BasenameCombinator
|
||||
|
||||
from .indicator_combinator import IndicatorCombinator
|
||||
from .label_combinator import LabelCombinator
|
||||
|
||||
|
||||
class BasenameCombinator0(BasenameCombinator):
|
||||
"""base[_indicator]"""
|
||||
|
||||
VARIANT = 'B0'
|
||||
|
||||
# def __init__(self, SubCombinators: dict = {}, context = None):
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getVariant(self):
|
||||
return BasenameCombinator0.VARIANT
|
||||
|
||||
def getPayload(self, indicator = '', label = ''):
|
||||
|
||||
basename = BasenameCombinator.BASENAME
|
||||
expectedBasename = label if label else BasenameCombinator.BASENAME
|
||||
|
||||
if indicator:
|
||||
basename += f"_{indicator}"
|
||||
expectedBasename += f"_{indicator}"
|
||||
|
||||
return {'basename': basename,
|
||||
'label': label,
|
||||
'expectedBasename': expectedBasename}
|
||||
|
||||
|
||||
def assertFunc(self, indicator = '', label = ''):
|
||||
|
||||
def f(testObj: dict = {}):
|
||||
|
||||
if not 'filenames' in testObj.keys():
|
||||
raise KeyError("testObj does not contain key 'filenames'")
|
||||
|
||||
fNames = testObj['filenames']
|
||||
|
||||
assert len(fNames) == 1, "More than one result file was created"
|
||||
|
||||
resultFilename = fNames[0]
|
||||
|
||||
fTokens = resultFilename.split('.')
|
||||
|
||||
resultBasename = '.'.join(fTokens[:-1])
|
||||
resultExtension = fTokens[-1]
|
||||
|
||||
if not indicator and not label:
|
||||
|
||||
assert resultBasename == BasenameCombinator.BASENAME, f"Result basename is not {BasenameCombinator.BASENAME}"
|
||||
if not indicator and label:
|
||||
assert resultBasename == label, f"Result basename is not {label}"
|
||||
if indicator and not label:
|
||||
assert resultBasename == f"{BasenameCombinator.BASENAME}_{indicator}", f"Result basename is not {BasenameCombinator.BASENAME}_{indicator}"
|
||||
if indicator and label:
|
||||
assert resultBasename == f"{label}_{indicator}", f"Result basename is not {label}_{indicator}"
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
|
||||
ic = IndicatorCombinator(self._context)
|
||||
|
||||
for L in LabelCombinator.getAllClassReferences():
|
||||
for i in ic.getYield():
|
||||
|
||||
l = L(self._context)
|
||||
|
||||
indicator = i['indicator']
|
||||
indicatorVariant = i['variant']
|
||||
|
||||
yieldObj = {}
|
||||
|
||||
yieldObj['identifier'] = self.getIdentifier()
|
||||
|
||||
yieldObj['variants'] = [self.getVariant(),
|
||||
l.getVariant(),
|
||||
indicatorVariant]
|
||||
|
||||
yieldObj['payload'] = self.getPayload(indicator = indicator,
|
||||
label = l.getPayload())
|
||||
|
||||
yieldObj['assertSelectors'] = ['B', 'L', 'I']
|
||||
|
||||
yieldObj['assertFuncs'] = [self.assertFunc(indicator, l.getPayload()), l.assertFunc, ic.assertFunc]
|
||||
|
||||
yieldObj['shouldFail'] = (self.shouldFail()
|
||||
| l.shouldFail()
|
||||
| ic.shouldFail())
|
||||
|
||||
yield yieldObj
|
||||
@ -1,159 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .basename_combinator import BasenameCombinator
|
||||
|
||||
from .indicator_combinator import IndicatorCombinator
|
||||
from .label_combinator import LabelCombinator
|
||||
from .title_combinator import TitleCombinator
|
||||
from .release_combinator import ReleaseCombinator
|
||||
from .show_combinator import ShowCombinator
|
||||
|
||||
|
||||
class BasenameCombinator2(BasenameCombinator):
|
||||
"""show[_indicator]_group"""
|
||||
|
||||
VARIANT = 'B2'
|
||||
|
||||
# def __init__(self, SubCombinators: dict = {}, context = None):
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getVariant(self):
|
||||
return BasenameCombinator2.VARIANT
|
||||
|
||||
#
|
||||
# SHOW_LIST = [
|
||||
# 'Boruto: Naruto Next Generations (2017)',
|
||||
# 'The Rising of the Shield Hero (2019)',
|
||||
# 'Scrubs - Die Anfänger (2001)'
|
||||
# ]
|
||||
#
|
||||
# RELEASE_LIST = [
|
||||
# ".GerEngSub.AAC.1080pINDICATOR.WebDL.x264-Tanuki",
|
||||
# ".German.AC3.DL.1080pINDICATOR.BluRay.x264-AST4u",
|
||||
# "-720pINDICATOR"
|
||||
# ]
|
||||
|
||||
|
||||
def getPayload(self,
|
||||
indicator = '',
|
||||
label = '',
|
||||
show = '',
|
||||
release = ''):
|
||||
|
||||
if label:
|
||||
basename = label
|
||||
expectedBasename = label
|
||||
if indicator:
|
||||
basename += f"_{indicator}"
|
||||
expectedBasename += f"_{indicator}"
|
||||
else:
|
||||
basename = show+release
|
||||
expectedBasename = basename
|
||||
|
||||
return {'basename': basename,
|
||||
'label': label,
|
||||
'expectedBasename': expectedBasename}
|
||||
|
||||
|
||||
def createAssertFunc(self,
|
||||
indicator = '',
|
||||
label = '',
|
||||
show = '',
|
||||
release = ''):
|
||||
|
||||
def f(testObj: dict = {}):
|
||||
|
||||
if not 'filenames' in testObj.keys():
|
||||
raise KeyError("testObj does not contain key 'filenames'")
|
||||
|
||||
fNames = testObj['filenames']
|
||||
|
||||
assert len(fNames) == 1, "More than one result file was created"
|
||||
|
||||
resultFilename = fNames[0]
|
||||
|
||||
fTokens = resultFilename.split('.')
|
||||
|
||||
resultBasename = '.'.join(fTokens[:-1])
|
||||
resultExtension = fTokens[-1]
|
||||
|
||||
if not indicator and not label:
|
||||
assert resultBasename == show+release, f"Result basename is not {show+release}"
|
||||
elif not indicator and label:
|
||||
assert resultBasename == label, f"Result basename is not {label}"
|
||||
elif indicator and not label:
|
||||
assert resultBasename == show+release, f"Result basename is not {show+release}"
|
||||
elif indicator and label:
|
||||
assert resultBasename == f"{label}_{indicator}", f"Result basename is not {label}_{indicator}"
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
|
||||
ic = IndicatorCombinator(self._context)
|
||||
sc = ShowCombinator(self._context)
|
||||
|
||||
for L in LabelCombinator.getAllClassReferences():
|
||||
for iy in ic.getYield():
|
||||
|
||||
indicator = iy['indicator']
|
||||
indicatorVariant = iy['variant']
|
||||
|
||||
rc = ReleaseCombinator(self._context, indicator=indicator)
|
||||
|
||||
for sy in sc.getYield():
|
||||
for ry in rc.getYield():
|
||||
|
||||
l = L(self._context)
|
||||
|
||||
show = sy['show']
|
||||
showVariant = sy['variant']
|
||||
|
||||
release = ry['release']
|
||||
releaseVariant = ry['variant']
|
||||
|
||||
yieldObj = {}
|
||||
|
||||
yieldObj['identifier'] = self.getIdentifier()
|
||||
|
||||
yieldObj['variants'] = [self.getVariant(),
|
||||
l.getVariant(),
|
||||
indicatorVariant,
|
||||
showVariant,
|
||||
releaseVariant]
|
||||
|
||||
yieldObj['payload'] = self.getPayload(indicator = indicator,
|
||||
label = l.getPayload(),
|
||||
show = show,
|
||||
release = release)
|
||||
|
||||
yieldObj['assertSelectors'] = ['B', 'L', 'I', 'S', 'R']
|
||||
|
||||
yieldObj['assertFuncs'] = [self.createAssertFunc(indicator,
|
||||
l.getPayload(),
|
||||
show = show,
|
||||
release = release),
|
||||
l.assertFunc,
|
||||
ic.assertFunc,
|
||||
sc.assertFunc,
|
||||
rc.assertFunc]
|
||||
|
||||
yieldObj['shouldFail'] = (self.shouldFail()
|
||||
| l.shouldFail()
|
||||
| ic.shouldFail()
|
||||
| sc.shouldFail()
|
||||
| rc.shouldFail())
|
||||
|
||||
yield yieldObj
|
||||
@ -1,13 +0,0 @@
|
||||
class Combinator():
|
||||
|
||||
def __init__(self, SubCombinations: dict):
|
||||
self._SubCombinators = SubCombinations
|
||||
|
||||
def getPayload(self):
|
||||
pass
|
||||
|
||||
def assertFunc(self, testObj):
|
||||
pass
|
||||
|
||||
def getYield(yieldObj: dict):
|
||||
pass
|
||||
@ -1,35 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
class DispositionCombinator2():
|
||||
|
||||
IDENTIFIER = 'disposition2'
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getIdentifier(self):
|
||||
return DispositionCombinator2.IDENTIFIER
|
||||
def getVariant(self):
|
||||
return self._variant
|
||||
|
||||
@staticmethod
|
||||
def list():
|
||||
basePath = os.path.dirname(__file__)
|
||||
return [os.path.basename(p)[25:-3]
|
||||
for p
|
||||
in glob.glob(f"{ basePath }/disposition_combinator_2_*.py", recursive = True)
|
||||
if p != __file__]
|
||||
|
||||
@staticmethod
|
||||
def getClassReference(identifier):
|
||||
importlib.import_module(f"ffx.test.disposition_combinator_2_{ identifier }")
|
||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.disposition_combinator_2_{ identifier }"]):
|
||||
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
||||
if inspect.isclass(obj) and name != 'DispositionCombinator2' and name.startswith('DispositionCombinator2'):
|
||||
return obj
|
||||
|
||||
@staticmethod
|
||||
def getAllClassReferences():
|
||||
return [DispositionCombinator2.getClassReference(i) for i in DispositionCombinator2.list()]
|
||||
@ -1,76 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from .disposition_combinator_2 import DispositionCombinator2
|
||||
|
||||
|
||||
class DispositionCombinator20(DispositionCombinator2):
|
||||
|
||||
# COMMENT
|
||||
# DESCRIPTIONS
|
||||
|
||||
VARIANT = 'D00'
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return DispositionCombinator20.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
subtrack0 = set()
|
||||
subtrack1 = set()
|
||||
|
||||
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||
# so some checks for preserved dispositions are omitted for now
|
||||
if self.__createPresets:
|
||||
subtrack0.add(TrackDisposition.FORCED) # COMMENT
|
||||
# subtrack1.add(TrackDisposition.DESCRIPTIONS) # DESCRIPTIONS
|
||||
|
||||
return (subtrack0,
|
||||
subtrack1)
|
||||
|
||||
def createAssertFunc(self):
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
def f(assertObj: dict):
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.FORCED)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved set 'forced' disposition"
|
||||
# source subIndex 1
|
||||
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||
# assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DESCRIPTIONS)
|
||||
# ), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved set 'descriptions' disposition"
|
||||
|
||||
else:
|
||||
|
||||
def f(assertObj: dict):
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||
# source subIndex 1
|
||||
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,79 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from .disposition_combinator_2 import DispositionCombinator2
|
||||
|
||||
|
||||
class DispositionCombinator21(DispositionCombinator2):
|
||||
|
||||
VARIANT = 'D10'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return DispositionCombinator21.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
if self.__createPresets:
|
||||
subtrack0 = set()
|
||||
subtrack1 = set([TrackDisposition.DEFAULT])
|
||||
else:
|
||||
subtrack0 = set([TrackDisposition.DEFAULT])
|
||||
subtrack1 = set()
|
||||
|
||||
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||
# so some checks for preserved dispositions are omitted for now
|
||||
if self.__createPresets:
|
||||
# subtrack0.add(TrackDisposition.COMMENT) # COMMENT
|
||||
subtrack1.add(TrackDisposition.FORCED) # DESCRIPTIONS
|
||||
|
||||
return (subtrack0,
|
||||
subtrack1)
|
||||
|
||||
def createAssertFunc(self):
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
|
||||
def f(assertObj: dict):
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not set default disposition"
|
||||
# assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.COMMENT)
|
||||
# ), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved set 'comment' disposition"
|
||||
# source subIndex 1
|
||||
assert not (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.FORCED)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved set 'forced' disposition"
|
||||
|
||||
else:
|
||||
|
||||
def f(assertObj: dict):
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not set default disposition"
|
||||
# source subIndex 1
|
||||
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,79 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from .disposition_combinator_2 import DispositionCombinator2
|
||||
|
||||
|
||||
class DispositionCombinator22(DispositionCombinator2):
|
||||
|
||||
VARIANT = 'D01'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return DispositionCombinator22.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
if self.__createPresets:
|
||||
subtrack0 = set([TrackDisposition.DEFAULT])
|
||||
subtrack1 = set()
|
||||
else:
|
||||
subtrack0 = set()
|
||||
subtrack1 = set([TrackDisposition.DEFAULT])
|
||||
|
||||
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||
# so some checks for preserved dispositions are omitted for now
|
||||
if self.__createPresets:
|
||||
subtrack0.add(TrackDisposition.FORCED) # COMMENT
|
||||
# subtrack1.add(TrackDisposition.DESCRIPTIONS) # DESCRIPTIONS
|
||||
|
||||
return (subtrack0,
|
||||
subtrack1)
|
||||
|
||||
|
||||
def createAssertFunc(self):
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
def f(assertObj: dict):
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.FORCED)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved set 'descriptions' disposition"
|
||||
# source subIndex 1
|
||||
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not set default disposition"
|
||||
# assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DESCRIPTIONS)
|
||||
# ), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved 'forced' disposition"
|
||||
|
||||
else:
|
||||
|
||||
def f(assertObj: dict):
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||
# source subIndex 1
|
||||
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not set default disposition"
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,43 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from .disposition_combinator_2 import DispositionCombinator2
|
||||
|
||||
|
||||
class DispositionCombinator23(DispositionCombinator2):
|
||||
|
||||
VARIANT = 'D11'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return DispositionCombinator23.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
subtrack0 = set([TrackDisposition.DEFAULT])
|
||||
subtrack1 = set([TrackDisposition.DEFAULT])
|
||||
|
||||
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||
# so some checks for preserved dispositions are omitted for now
|
||||
if self.__createPresets:
|
||||
# subtrack0.add(TrackDisposition.COMMENT) # COMMENT
|
||||
subtrack1.add(TrackDisposition.FORCED) # DESCRIPTIONS
|
||||
|
||||
return (subtrack0,
|
||||
subtrack1)
|
||||
|
||||
#TODO: tmdb cases
|
||||
def createAssertFunc(self):
|
||||
def f(assertObj: dict = {}):
|
||||
pass
|
||||
return f
|
||||
|
||||
def shouldFail(self):
|
||||
return True
|
||||
@ -1,34 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
class DispositionCombinator3():
|
||||
|
||||
IDENTIFIER = 'disposition3'
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getIdentifier(self):
|
||||
return DispositionCombinator3.IDENTIFIER
|
||||
|
||||
|
||||
@staticmethod
|
||||
def list():
|
||||
basePath = os.path.dirname(__file__)
|
||||
return [os.path.basename(p)[25:-3]
|
||||
for p
|
||||
in glob.glob(f"{ basePath }/disposition_combinator_3_*.py", recursive = True)
|
||||
if p != __file__]
|
||||
|
||||
@staticmethod
|
||||
def getClassReference(identifier):
|
||||
importlib.import_module(f"ffx.test.disposition_combinator_3_{ identifier }")
|
||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.disposition_combinator_3_{ identifier }"]):
|
||||
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
||||
if inspect.isclass(obj) and name != 'DispositionCombinator3' and name.startswith('DispositionCombinator3'):
|
||||
return obj
|
||||
|
||||
@staticmethod
|
||||
def getAllClassReferences():
|
||||
return [DispositionCombinator3.getClassReference(i) for i in DispositionCombinator3.list()]
|
||||
@ -1,92 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from .disposition_combinator_3 import DispositionCombinator3
|
||||
|
||||
|
||||
class DispositionCombinator30(DispositionCombinator3):
|
||||
|
||||
VARIANT = 'D000'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return DispositionCombinator30.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
subtrack0 = set()
|
||||
subtrack1 = set()
|
||||
subtrack2 = set()
|
||||
|
||||
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||
# so some checks for preserved dispositions are omitted for now
|
||||
if self.__createPresets:
|
||||
subtrack0.add(TrackDisposition.FORCED) # COMMENT
|
||||
# subtrack1.add(TrackDisposition.DESCRIPTIONS) # DESCRIPTIONS
|
||||
# subtrack2.add(TrackDisposition.HEARING_IMPAIRED) # HEARING_IMPAIRED
|
||||
|
||||
return (subtrack0,
|
||||
subtrack1,
|
||||
subtrack2)
|
||||
|
||||
|
||||
def createAssertFunc(self):
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
def f(assertObj: dict):
|
||||
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert not (trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.FORCED)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved default disposition"
|
||||
|
||||
# source subIndex 1
|
||||
assert not (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||
# assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DESCRIPTIONS)
|
||||
# ), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved default disposition"
|
||||
|
||||
# source subIndex 2
|
||||
assert not (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has set default disposition"
|
||||
# assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.HEARING_IMPAIRED)
|
||||
# ), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not preserved default disposition"
|
||||
|
||||
else:
|
||||
|
||||
def f(assertObj: dict):
|
||||
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert not (trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||
|
||||
# source subIndex 1
|
||||
assert not (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||
|
||||
# source subIndex 2
|
||||
assert not (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has set default disposition"
|
||||
return f
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,97 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from .disposition_combinator_3 import DispositionCombinator3
|
||||
|
||||
|
||||
class DispositionCombinator31(DispositionCombinator3):
|
||||
|
||||
VARIANT = 'D100'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return DispositionCombinator31.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
if self.__createPresets:
|
||||
subtrack0 = set()
|
||||
subtrack1 = set()
|
||||
subtrack2 = set([TrackDisposition.DEFAULT])
|
||||
else:
|
||||
subtrack0 = set([TrackDisposition.DEFAULT])
|
||||
subtrack1 = set()
|
||||
subtrack2 = set()
|
||||
|
||||
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||
# so some checks for preserved dispositions are omitted for now
|
||||
if self.__createPresets:
|
||||
# subtrack0.add(TrackDisposition.COMMENT) # COMMENT
|
||||
subtrack1.add(TrackDisposition.FORCED) # DESCRIPTIONS
|
||||
# subtrack2.add(TrackDisposition.HEARING_IMPAIRED) # HEARING_IMPAIRED
|
||||
|
||||
return (subtrack0,
|
||||
subtrack1,
|
||||
subtrack2)
|
||||
|
||||
def createAssertFunc(self):
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
def f(assertObj: dict):
|
||||
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not set default disposition"
|
||||
# assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.COMMENT)
|
||||
# ), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved default disposition"
|
||||
|
||||
# source subIndex 1
|
||||
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.FORCED)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved default disposition"
|
||||
|
||||
# source subIndex 2
|
||||
assert (not trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has set default disposition"
|
||||
# assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.HEARING_IMPAIRED)
|
||||
# ), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not preserved default disposition"
|
||||
|
||||
else:
|
||||
|
||||
def f(assertObj: dict):
|
||||
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not set default disposition"
|
||||
|
||||
# source subIndex 1
|
||||
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||
|
||||
# source subIndex 2
|
||||
assert (not trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has set default disposition"
|
||||
return f
|
||||
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,89 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from .disposition_combinator_3 import DispositionCombinator3
|
||||
|
||||
|
||||
class DispositionCombinator32(DispositionCombinator3):
|
||||
|
||||
VARIANT = 'D010'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return DispositionCombinator32.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
if self.__createPresets:
|
||||
subtrack0 = set([TrackDisposition.DEFAULT])
|
||||
subtrack1 = set()
|
||||
subtrack2 = set()
|
||||
else:
|
||||
subtrack0 = set()
|
||||
subtrack1 = set([TrackDisposition.DEFAULT])
|
||||
subtrack2 = set()
|
||||
|
||||
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||
# so some checks for preserved dispositions are omitted for now
|
||||
if self.__createPresets:
|
||||
# subtrack0.add(TrackDisposition.COMMENT) # COMMENT
|
||||
# subtrack1.add(TrackDisposition.DESCRIPTIONS) # DESCRIPTIONS
|
||||
subtrack2.add(TrackDisposition.FORCED) # HEARING_IMPAIRED
|
||||
|
||||
return (subtrack0,
|
||||
subtrack1,
|
||||
subtrack2)
|
||||
|
||||
|
||||
def createAssertFunc(self):
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
def f(assertObj: dict):
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||
# assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.COMMENT)
|
||||
# ), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved set default disposition"
|
||||
# source subIndex 1
|
||||
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not set default disposition"
|
||||
# assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DESCRIPTIONS)
|
||||
# ), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved descriptions disposition"
|
||||
# source subIndex 2
|
||||
assert (not trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has set default disposition"
|
||||
assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.FORCED)
|
||||
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not preserved default disposition"
|
||||
|
||||
else:
|
||||
|
||||
def f(assertObj: dict):
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||
# source subIndex 1
|
||||
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not set default disposition"
|
||||
# source subIndex 2
|
||||
assert (not trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has set default disposition"
|
||||
return f
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,89 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from .disposition_combinator_3 import DispositionCombinator3
|
||||
|
||||
|
||||
class DispositionCombinator33(DispositionCombinator3):
|
||||
|
||||
VARIANT = 'D001'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return DispositionCombinator33.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
if self.__createPresets:
|
||||
subtrack0 = set()
|
||||
subtrack1 = set([TrackDisposition.DEFAULT])
|
||||
subtrack2 = set()
|
||||
else:
|
||||
subtrack0 = set()
|
||||
subtrack1 = set()
|
||||
subtrack2 = set([TrackDisposition.DEFAULT])
|
||||
|
||||
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||
# so some checks for preserved dispositions are omitted for now
|
||||
if self.__createPresets:
|
||||
# subtrack0.add(TrackDisposition.COMMENT) # COMMENT
|
||||
subtrack1.add(TrackDisposition.FORCED) # DESCRIPTIONS
|
||||
# subtrack2.add(TrackDisposition.HEARING_IMPAIRED) # HEARING_IMPAIRED
|
||||
|
||||
return (subtrack0,
|
||||
subtrack1,
|
||||
subtrack2)
|
||||
|
||||
|
||||
def createAssertFunc(self):
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
def f(assertObj: dict):
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||
# assert (trackDescriptors[0].getDispositionFlag(TrackDisposition.COMMENT)
|
||||
# ), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has not preserved set default disposition"
|
||||
# source subIndex 1
|
||||
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||
assert (trackDescriptors[1].getDispositionFlag(TrackDisposition.FORCED)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has not preserved descriptions disposition"
|
||||
# source subIndex 2
|
||||
assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not set default disposition"
|
||||
# assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.HEARING_IMPAIRED)
|
||||
# ), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not preserved default disposition"
|
||||
|
||||
else:
|
||||
|
||||
def f(assertObj: dict):
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
trackDescriptors = assertObj['tracks']
|
||||
|
||||
# source subIndex 0
|
||||
assert (not trackDescriptors[0].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #0 index={trackDescriptors[0].getIndex()} [{trackDescriptors[0].getType().label()}:{trackDescriptors[0].getSubIndex()}] has set default disposition"
|
||||
# source subIndex 1
|
||||
assert (not trackDescriptors[1].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #1 index={trackDescriptors[1].getIndex()} [{trackDescriptors[1].getType().label()}:{trackDescriptors[1].getSubIndex()}] has set default disposition"
|
||||
# source subIndex 2
|
||||
assert (trackDescriptors[2].getDispositionFlag(TrackDisposition.DEFAULT)
|
||||
), f"Stream #2 index={trackDescriptors[2].getIndex()} [{trackDescriptors[2].getType().label()}:{trackDescriptors[2].getSubIndex()}] has not set default disposition"
|
||||
return f
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,46 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from .disposition_combinator_3 import DispositionCombinator3
|
||||
|
||||
|
||||
class DispositionCombinator34(DispositionCombinator3):
|
||||
|
||||
VARIANT = 'D101'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return DispositionCombinator34.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
subtrack0 = set([TrackDisposition.DEFAULT])
|
||||
subtrack1 = set()
|
||||
subtrack2 = set([TrackDisposition.DEFAULT])
|
||||
|
||||
#NOTE: Current ffmpeg version will not set most of the dispositions on arbitrary tracks
|
||||
# so some checks for preserved dispositions are omitted for now
|
||||
if self.__createPresets:
|
||||
subtrack0.add(TrackDisposition.FORCED) # COMMENT
|
||||
# subtrack1.add(TrackDisposition.DESCRIPTIONS) # DESCRIPTIONS
|
||||
# subtrack2.add(TrackDisposition.HEARING_IMPAIRED) # HEARING_IMPAIRED
|
||||
|
||||
return (subtrack0,
|
||||
subtrack1,
|
||||
subtrack2)
|
||||
|
||||
#TODO: tmpdb cases
|
||||
def createAssertFunc(self):
|
||||
def f(assertObj: dict = {}):
|
||||
pass
|
||||
return f
|
||||
|
||||
def shouldFail(self):
|
||||
return True
|
||||
@ -1,279 +0,0 @@
|
||||
import os, math, tempfile, click
|
||||
|
||||
|
||||
from ffx.ffx_controller import FfxController
|
||||
|
||||
from ffx.process import executeProcess
|
||||
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.helper import dictCache
|
||||
from ffx.configuration_controller import ConfigurationController
|
||||
|
||||
|
||||
SHORT_SUBTITLE_SEQUENCE = [{'start': 1, 'end': 2, 'text': 'yolo'},
|
||||
{'start': 3, 'end': 4, 'text': 'zolo'},
|
||||
{'start': 5, 'end': 6, 'text': 'golo'}]
|
||||
|
||||
def getTimeString(hours: float = 0.0,
|
||||
minutes: float = 0.0,
|
||||
seconds: float = 0.0,
|
||||
millis: float = 0.0,
|
||||
format: str = ''):
|
||||
|
||||
duration = (hours * 3600.0
|
||||
+ minutes * 60.0
|
||||
+ seconds
|
||||
+ millis / 1000.0)
|
||||
|
||||
hours = math.floor(duration / 3600.0)
|
||||
remaining = duration - 3600.0 * hours
|
||||
|
||||
minutes = math.floor(remaining / 60.0)
|
||||
remaining = remaining - 60.0 * minutes
|
||||
|
||||
seconds = math.floor(remaining)
|
||||
remaining = remaining - seconds
|
||||
|
||||
millis = math.floor(remaining * 1000)
|
||||
|
||||
if format == 'ass':
|
||||
return f"{hours:01d}:{minutes:02d}:{seconds:02d}.{millis:02d}"
|
||||
|
||||
# srt & vtt
|
||||
return f"{hours:02d}:{minutes:02d}:{seconds:02d}.{millis:03d}"
|
||||
|
||||
|
||||
def createAssFile(entries: dict, directory = None):
|
||||
|
||||
# [Script Info]
|
||||
# ; Script generated by FFmpeg/Lavc61.3.100
|
||||
# ScriptType: v4.00+
|
||||
# PlayResX: 384
|
||||
# PlayResY: 288
|
||||
# ScaledBorderAndShadow: yes
|
||||
# YCbCr Matrix: None
|
||||
#
|
||||
# [V4+ Styles]
|
||||
# Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
|
||||
# Style: Default,Arial,16,&Hffffff,&Hffffff,&H0,&H0,0,0,0,0,100,100,0,0,1,1,0,2,10,10,10,1
|
||||
#
|
||||
# [Events]
|
||||
# Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
# Dialogue: 0,0:00:01.00,0:00:02.00,Default,,0,0,0,,yolo
|
||||
# Dialogue: 0,0:00:03.00,0:00:04.00,Default,,0,0,0,,zolo
|
||||
# Dialogue: 0,0:00:05.00,0:00:06.00,Default,,0,0,0,,golo
|
||||
tmpFileName = tempfile.mktemp(suffix=".ass", dir = directory)
|
||||
|
||||
with open(tmpFileName, 'w') as tmpFile:
|
||||
|
||||
tmpFile.write("[Script Info]\n")
|
||||
tmpFile.write("; Script generated by Ffx\n")
|
||||
tmpFile.write("ScriptType: v4.00+\n")
|
||||
tmpFile.write("PlayResX: 384\n")
|
||||
tmpFile.write("PlayResY: 288\n")
|
||||
tmpFile.write("ScaledBorderAndShadow: yes\n")
|
||||
tmpFile.write("YCbCr Matrix: None\n")
|
||||
tmpFile.write("\n")
|
||||
tmpFile.write("[V4+ Styles]\n")
|
||||
tmpFile.write("Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding\n")
|
||||
tmpFile.write("Style: Default,Arial,16,&Hffffff,&Hffffff,&H0,&H0,0,0,0,0,100,100,0,0,1,1,0,2,10,10,10,1\n")
|
||||
tmpFile.write("\n")
|
||||
tmpFile.write("[Events]\n")
|
||||
tmpFile.write("Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n")
|
||||
|
||||
for entryIndex in range(len(entries)):
|
||||
tmpFile.write(f"Dialogue: 0,{getTimeString(seconds=entries[entryIndex]['start'], format='ass')},{getTimeString(seconds=entries[entryIndex]['end'], format='ass')},Default,,0,0,0,,{entries[entryIndex]['text']}\n")
|
||||
|
||||
return tmpFileName
|
||||
|
||||
def createSrtFile(entries: dict, directory = None):
|
||||
# 1
|
||||
# 00:00:00,000 --> 00:00:02,500
|
||||
# Welcome to the Example Subtitle File!
|
||||
#
|
||||
# 2
|
||||
# 00:00:03,000 --> 00:00:06,000
|
||||
# This is a demonstration of SRT subtitles.
|
||||
#
|
||||
# 3
|
||||
# 00:00:07,000 --> 00:00:10,500
|
||||
# You can use SRT files to add subtitles to your videos.
|
||||
|
||||
tmpFileName = tempfile.mktemp(suffix=".srt", dir = directory)
|
||||
|
||||
with open(tmpFileName, 'w') as tmpFile:
|
||||
|
||||
for entryIndex in range(len(entries)):
|
||||
|
||||
tmpFile.write(f"{entryIndex}\n")
|
||||
tmpFile.write(f"{getTimeString(seconds=entries[entryIndex]['start'])} --> {getTimeString(seconds=entries[entryIndex]['end'])}\n")
|
||||
tmpFile.write(f"{entries[entryIndex]['text']}\n\n")
|
||||
|
||||
return tmpFileName
|
||||
|
||||
|
||||
def createVttFile(entries: dict, directory = None):
|
||||
# WEBVTT
|
||||
#
|
||||
# 01:20:33.050 --> 01:20:35.050
|
||||
# Yolo
|
||||
|
||||
tmpFileName = tempfile.mktemp(suffix=".vtt", dir = directory)
|
||||
|
||||
with open(tmpFileName, 'w') as tmpFile:
|
||||
|
||||
tmpFile.write("WEBVTT\n")
|
||||
|
||||
for entryIndex in range(len(entries)):
|
||||
|
||||
tmpFile.write("\n")
|
||||
tmpFile.write(f"{getTimeString(seconds=entries[entryIndex]['start'])} --> {getTimeString(seconds=entries[entryIndex]['end'])}\n")
|
||||
tmpFile.write(f"{entries[entryIndex]['text']}\n")
|
||||
|
||||
|
||||
return tmpFileName
|
||||
|
||||
|
||||
def createMediaTestFile(mediaDescriptor: MediaDescriptor,
|
||||
directory: str = '',
|
||||
baseName: str = 'media',
|
||||
format: str = '',
|
||||
extension: str = 'mkv',
|
||||
sizeX: int = 1280,
|
||||
sizeY: int = 720,
|
||||
rate: int = 25,
|
||||
length: int = 10,
|
||||
logger = None):
|
||||
|
||||
# subtitleFilePath = createVttFile(SHORT_SUBTITLE_SEQUENCE)
|
||||
|
||||
# commandTokens = FfxController.COMMAND_TOKENS
|
||||
commandTokens = ['ffmpeg', '-y']
|
||||
|
||||
generatorCache = []
|
||||
generatorTokens = []
|
||||
mappingTokens = []
|
||||
importTokens = []
|
||||
metadataTokens = []
|
||||
|
||||
|
||||
for mediaTagKey, mediaTagValue in mediaDescriptor.getTags().items():
|
||||
metadataTokens += ['-metadata:g', f"{mediaTagKey}={mediaTagValue}"]
|
||||
|
||||
subIndexCounter = {}
|
||||
|
||||
# for trackDescriptor in mediaDescriptor.getAllTrackDescriptors():
|
||||
for trackDescriptor in mediaDescriptor.getTrackDescriptors():
|
||||
|
||||
trackType = trackDescriptor.getType()
|
||||
|
||||
if trackType == TrackType.VIDEO:
|
||||
|
||||
cacheIndex, generatorCache = dictCache({'type': TrackType.VIDEO}, generatorCache)
|
||||
# click.echo(f"createMediaTestFile() cache index={cacheIndex} size={len(generatorCache)}")
|
||||
|
||||
if cacheIndex == -1:
|
||||
generatorTokens += ['-f',
|
||||
'lavfi',
|
||||
'-i',
|
||||
f"color=size={sizeX}x{sizeY}:rate={rate}:color=black"]
|
||||
|
||||
sourceIndex = len(generatorCache) - 1 if cacheIndex == -1 else cacheIndex
|
||||
mappingTokens += ['-map', f"{sourceIndex}:v:0"]
|
||||
|
||||
if not trackType in subIndexCounter.keys():
|
||||
subIndexCounter[trackType] = 0
|
||||
for mediaTagKey, mediaTagValue in trackDescriptor.getTags().items():
|
||||
metadataTokens += [f"-metadata:s:{trackType.indicator()}:{subIndexCounter[trackType]}",
|
||||
f"{mediaTagKey}={mediaTagValue}"]
|
||||
subIndexCounter[trackType] += 1
|
||||
|
||||
if trackType == TrackType.AUDIO:
|
||||
|
||||
audioLayout = 'stereo'
|
||||
|
||||
cacheIndex, generatorCache = dictCache({'type': TrackType.AUDIO, 'layout': audioLayout}, generatorCache)
|
||||
# click.echo(f"createMediaTestFile() cache index={cacheIndex} size={len(generatorCache)}")
|
||||
|
||||
# click.echo(f"generartorCache index={cacheIndex} len={len(generatorCache)}")
|
||||
if cacheIndex == -1:
|
||||
generatorTokens += ['-f',
|
||||
'lavfi',
|
||||
'-i',
|
||||
f"anullsrc=channel_layout={audioLayout}:sample_rate=44100"]
|
||||
|
||||
sourceIndex = len(generatorCache) - 1 if cacheIndex == -1 else cacheIndex
|
||||
mappingTokens += ['-map', f"{sourceIndex}:a:0"]
|
||||
|
||||
if not trackType in subIndexCounter.keys():
|
||||
subIndexCounter[trackType] = 0
|
||||
for mediaTagKey, mediaTagValue in trackDescriptor.getTags().items():
|
||||
metadataTokens += [f"-metadata:s:{trackType.indicator()}:{subIndexCounter[trackType]}",
|
||||
f"{mediaTagKey}={mediaTagValue}"]
|
||||
subIndexCounter[trackType] += 1
|
||||
|
||||
if trackType == TrackType.SUBTITLE:
|
||||
|
||||
cacheIndex, generatorCache = dictCache({'type': TrackType.SUBTITLE}, generatorCache)
|
||||
# click.echo(f"createMediaTestFile() cache index={cacheIndex} size={len(generatorCache)}")
|
||||
|
||||
if cacheIndex == -1:
|
||||
importTokens = ['-i', createVttFile(SHORT_SUBTITLE_SEQUENCE, directory=directory if directory else None)]
|
||||
|
||||
sourceIndex = len(generatorCache) - 1 if cacheIndex == -1 else cacheIndex
|
||||
mappingTokens += ['-map', f"{sourceIndex}:s:0"]
|
||||
|
||||
if not trackType in subIndexCounter.keys():
|
||||
subIndexCounter[trackType] = 0
|
||||
for mediaTagKey, mediaTagValue in trackDescriptor.getTags().items():
|
||||
metadataTokens += [f"-metadata:s:{trackType.indicator()}:{subIndexCounter[trackType]}",
|
||||
f"{mediaTagKey}={mediaTagValue}"]
|
||||
subIndexCounter[trackType] += 1
|
||||
|
||||
#TODO: Optimize too many runs
|
||||
ffxContext = {'config': ConfigurationController(), 'logger': logger}
|
||||
fc = FfxController(ffxContext, mediaDescriptor)
|
||||
|
||||
commandTokens += (generatorTokens
|
||||
+ importTokens
|
||||
+ mappingTokens
|
||||
+ metadataTokens
|
||||
+ fc.generateDispositionTokens())
|
||||
|
||||
|
||||
commandTokens += ['-t', str(length)]
|
||||
|
||||
if format:
|
||||
commandTokens += ['-f', format]
|
||||
|
||||
fileName = f"{baseName}.{extension}"
|
||||
|
||||
if directory:
|
||||
outputPath = os.path.join(directory, fileName)
|
||||
else:
|
||||
outputPath = fileName
|
||||
|
||||
commandTokens += [outputPath]
|
||||
|
||||
|
||||
ctx = {'logger': logger}
|
||||
|
||||
out, err, rc = executeProcess(commandTokens, context = ctx)
|
||||
|
||||
if not logger is None:
|
||||
if out:
|
||||
logger.debug(f"createMediaTestFile(): Process output: {out}")
|
||||
if rc:
|
||||
logger.debug(f"createMediaTestFile(): Process returned ERROR {rc} ({err})")
|
||||
|
||||
|
||||
return outputPath
|
||||
|
||||
|
||||
def createEmptyDirectory():
|
||||
return tempfile.mkdtemp()
|
||||
|
||||
def createEmptyFile(suffix=None):
|
||||
return tempfile.mkstemp(suffix=suffix)
|
||||
@ -1,43 +0,0 @@
|
||||
class IndicatorCombinator():
|
||||
|
||||
IDENTIFIER = 'indicator'
|
||||
|
||||
MAX_SEASON = 2
|
||||
MAX_EPISODE = 3
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getIdentifier(self):
|
||||
return IndicatorCombinator.IDENTIFIER
|
||||
|
||||
def getPayload(self, season: int = -1, episode: int = -1):
|
||||
if season == -1 and episode == -1:
|
||||
return {
|
||||
'variant': 'S00E00',
|
||||
'indicator': '',
|
||||
'season': season,
|
||||
'episode': episode
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'variant': f"S{season:02d}E{episode:02d}",
|
||||
'indicator': f"S{season:02d}E{episode:02d}",
|
||||
'season': season,
|
||||
'episode': episode
|
||||
}
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
|
||||
yield self.getPayload()
|
||||
for season in range(IndicatorCombinator.MAX_SEASON):
|
||||
for episode in range(IndicatorCombinator.MAX_EPISODE):
|
||||
yield self.getPayload(season+1, episode+1)
|
||||
@ -1,36 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
class LabelCombinator():
|
||||
|
||||
IDENTIFIER = 'label'
|
||||
PREFIX = 'label_combinator_'
|
||||
|
||||
LABEL = 'ffx'
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getIdentifier(self):
|
||||
return LabelCombinator.IDENTIFIER
|
||||
|
||||
@staticmethod
|
||||
def list():
|
||||
basePath = os.path.dirname(__file__)
|
||||
return [os.path.basename(p)[len(LabelCombinator.PREFIX):-3]
|
||||
for p
|
||||
in glob.glob(f"{ basePath }/{LabelCombinator.PREFIX}*.py", recursive = True)
|
||||
if p != __file__]
|
||||
|
||||
@staticmethod
|
||||
def getClassReference(identifier):
|
||||
importlib.import_module(f"ffx.test.{LabelCombinator.PREFIX}{ identifier }")
|
||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.{LabelCombinator.PREFIX}{ identifier }"]):
|
||||
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
||||
if inspect.isclass(obj) and name != 'LabelCombinator' and name.startswith('LabelCombinator'):
|
||||
return obj
|
||||
|
||||
@staticmethod
|
||||
def getAllClassReferences():
|
||||
return [LabelCombinator.getClassReference(i) for i in LabelCombinator.list()]
|
||||
@ -1,30 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .label_combinator import LabelCombinator
|
||||
|
||||
|
||||
class LabelCombinator0(LabelCombinator):
|
||||
|
||||
VARIANT = 'L0'
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getVariant(self):
|
||||
return LabelCombinator0.VARIANT
|
||||
|
||||
def getPayload(self):
|
||||
return ''
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,30 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .label_combinator import LabelCombinator
|
||||
|
||||
class LabelCombinator1(LabelCombinator):
|
||||
|
||||
VARIANT = 'L1'
|
||||
|
||||
def __init__(self, context = None):
|
||||
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getVariant(self):
|
||||
return LabelCombinator1.VARIANT
|
||||
|
||||
def getPayload(self):
|
||||
return LabelCombinator.LABEL
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,33 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
class MediaCombinator():
|
||||
|
||||
IDENTIFIER = 'media'
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getIdentifier(self):
|
||||
return MediaCombinator.IDENTIFIER
|
||||
|
||||
@staticmethod
|
||||
def list():
|
||||
basePath = os.path.dirname(__file__)
|
||||
return [os.path.basename(p)[17:-3]
|
||||
for p
|
||||
in glob.glob(f"{ basePath }/media_combinator_*.py", recursive = True)
|
||||
if p != __file__]
|
||||
|
||||
@staticmethod
|
||||
def getClassReference(identifier):
|
||||
importlib.import_module(f"ffx.test.media_combinator_{ identifier }")
|
||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.media_combinator_{ identifier }"]):
|
||||
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
||||
if inspect.isclass(obj) and name != 'MediaCombinator' and name.startswith('MediaCombinator'):
|
||||
return obj
|
||||
|
||||
@staticmethod
|
||||
def getAllClassReferences():
|
||||
return [MediaCombinator.getClassReference(i) for i in MediaCombinator.list()]
|
||||
@ -1,103 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .media_combinator import MediaCombinator
|
||||
from .media_tag_combinator import MediaTagCombinator
|
||||
|
||||
|
||||
class MediaCombinator0(MediaCombinator):
|
||||
|
||||
VARIANT = 'VA'
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
|
||||
def getVariant(self):
|
||||
return MediaCombinator0.VARIANT
|
||||
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor1,
|
||||
trackDescriptor2]
|
||||
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||
# mediaDescriptor.reindexSubIndices()
|
||||
|
||||
return mediaDescriptor
|
||||
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
|
||||
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||
|
||||
mtc = MTC(self._context)
|
||||
|
||||
yObj = {}
|
||||
|
||||
yObj['identifier'] = self.getIdentifier()
|
||||
yObj['variants'] = [self.getVariant(),
|
||||
mtc.getVariant()]
|
||||
yObj['payload'] = self.getPayload()
|
||||
|
||||
yObj['assertSelectors'] = ['M', 'MT']
|
||||
yObj['assertFuncs'] = [self.assertFunc,
|
||||
mtc.createAssertFunc()]
|
||||
yObj['shouldFail'] = (self.shouldFail()
|
||||
| mtc.shouldFail())
|
||||
|
||||
yieldObj = {'target': yObj}
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
mtc_p = MTC(self._context, createPresets = True)
|
||||
|
||||
yObj_p = {}
|
||||
|
||||
yObj_p['identifier'] = self.getIdentifier()
|
||||
yObj_p['variants'] = [self.getVariant(),
|
||||
mtc_p.getVariant()]
|
||||
|
||||
yObj_p['payload'] = self.getPayload()
|
||||
|
||||
yObj_p['assertSelectors'] = ['M', 'MT']
|
||||
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||
mtc_p.createAssertFunc()]
|
||||
yObj_p['shouldFail'] = (self.shouldFail()
|
||||
| mtc_p.shouldFail())
|
||||
|
||||
yieldObj['preset'] = yObj_p
|
||||
|
||||
yield yieldObj
|
||||
@ -1,114 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .media_combinator import MediaCombinator
|
||||
from .media_tag_combinator import MediaTagCombinator
|
||||
|
||||
|
||||
class MediaCombinator1(MediaCombinator):
|
||||
|
||||
VARIANT = 'VAS'
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
|
||||
def getVariant(self):
|
||||
return MediaCombinator1.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||
trackDescriptor1,
|
||||
trackDescriptor2]
|
||||
|
||||
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||
# mediaDescriptor.reindexSubIndices()
|
||||
|
||||
return mediaDescriptor
|
||||
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
|
||||
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||
|
||||
mtc = MTC(self._context)
|
||||
|
||||
yObj = {}
|
||||
|
||||
yObj['identifier'] = self.getIdentifier()
|
||||
yObj['variants'] = [self.getVariant(),
|
||||
mtc.getVariant()]
|
||||
|
||||
yObj['payload'] = self.getPayload()
|
||||
|
||||
yObj['assertSelectors'] = ['M', 'MT']
|
||||
yObj['assertFuncs'] = [self.assertFunc,
|
||||
mtc.createAssertFunc()]
|
||||
|
||||
yObj['shouldFail'] = (self.shouldFail()
|
||||
| mtc.shouldFail())
|
||||
|
||||
yieldObj = {'target': yObj}
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
mtc_p = MTC(self._context, createPresets = True)
|
||||
|
||||
yObj_p = {}
|
||||
|
||||
yObj_p['identifier'] = self.getIdentifier()
|
||||
yObj_p['variants'] = [self.getVariant(),
|
||||
mtc_p.getVariant()]
|
||||
|
||||
yObj_p['payload'] = self.getPayload()
|
||||
|
||||
yObj_p['assertSelectors'] = ['M', 'MT']
|
||||
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||
mtc_p.createAssertFunc()]
|
||||
|
||||
yObj_p['shouldFail'] = (self.shouldFail()
|
||||
| mtc_p.shouldFail())
|
||||
yieldObj['preset'] = yObj_p
|
||||
|
||||
yield yieldObj
|
||||
@ -1,156 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools, click
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .media_combinator import MediaCombinator
|
||||
|
||||
from .disposition_combinator_2 import DispositionCombinator2
|
||||
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||
from .permutation_combinator_2 import PermutationCombinator2
|
||||
from .media_tag_combinator import MediaTagCombinator
|
||||
|
||||
|
||||
class MediaCombinator2(MediaCombinator):
|
||||
|
||||
VARIANT = 'VASS'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return MediaCombinator2.VARIANT
|
||||
|
||||
|
||||
def getPayload(self,
|
||||
subtitleDispositionTuple = (set(), set()),
|
||||
subtitleTagTuple = ({}, {})):
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[0]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[0]
|
||||
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 3
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 3
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[1]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[1]
|
||||
trackDescriptor3 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||
trackDescriptor1,
|
||||
trackDescriptor2,
|
||||
trackDescriptor3]
|
||||
|
||||
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||
# mediaDescriptor.reindexSubIndices()
|
||||
|
||||
return mediaDescriptor
|
||||
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
|
||||
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||
for DC2 in DispositionCombinator2.getAllClassReferences():
|
||||
for TC2 in TrackTagCombinator2.getAllClassReferences():
|
||||
|
||||
dc2 = DC2(self._context)
|
||||
tc2 = TC2(self._context)
|
||||
|
||||
mtc = MTC(self._context)
|
||||
|
||||
yObj = {}
|
||||
|
||||
yObj['identifier'] = self.getIdentifier()
|
||||
yObj['variants'] = [self.getVariant(),
|
||||
f"S:{dc2.getVariant()}",
|
||||
f"S:{tc2.getVariant()}",
|
||||
mtc.getVariant()]
|
||||
|
||||
yObj['payload'] = self.getPayload(dc2.getPayload(),
|
||||
tc2.getPayload())
|
||||
|
||||
yObj['assertSelectors'] = ['M', 'SD', 'ST', 'MT']
|
||||
yObj['assertFuncs'] = [self.assertFunc,
|
||||
dc2.createAssertFunc(),
|
||||
tc2.createAssertFunc(),
|
||||
mtc.createAssertFunc()]
|
||||
|
||||
yObj['shouldFail'] = (self.shouldFail()
|
||||
| dc2.shouldFail()
|
||||
| tc2.shouldFail()
|
||||
| mtc.shouldFail())
|
||||
|
||||
yieldObj = {'target': yObj}
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
dc2_p = DC2(self._context, createPresets = True)
|
||||
tc2_p = TC2(self._context, createPresets = True)
|
||||
|
||||
mtc_p = MTC(self._context, createPresets = True)
|
||||
|
||||
yObj_p = {}
|
||||
|
||||
yObj_p['identifier'] = self.getIdentifier()
|
||||
yObj_p['variants'] = [self.getVariant(),
|
||||
f"S:{dc2_p.getVariant()}",
|
||||
f"S:{tc2_p.getVariant()}",
|
||||
mtc_p.getVariant()]
|
||||
|
||||
yObj_p['payload'] = self.getPayload(dc2_p.getPayload(),
|
||||
tc2_p.getPayload())
|
||||
|
||||
yObj_p['assertSelectors'] = ['M', 'SD', 'ST', 'MT']
|
||||
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||
dc2_p.createAssertFunc(),
|
||||
tc2_p.createAssertFunc(),
|
||||
mtc_p.createAssertFunc()]
|
||||
|
||||
yObj_p['shouldFail'] = (self.shouldFail()
|
||||
| dc2_p.shouldFail()
|
||||
| tc2_p.shouldFail()
|
||||
| mtc_p.shouldFail())
|
||||
|
||||
yieldObj['preset'] = yObj_p
|
||||
|
||||
yield yieldObj
|
||||
@ -1,165 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .media_combinator import MediaCombinator
|
||||
|
||||
from .disposition_combinator_3 import DispositionCombinator3
|
||||
from .track_tag_combinator_3 import TrackTagCombinator3
|
||||
from .permutation_combinator_3 import PermutationCombinator3
|
||||
from .media_tag_combinator import MediaTagCombinator
|
||||
|
||||
|
||||
class MediaCombinator3(MediaCombinator):
|
||||
|
||||
VARIANT = 'VASSS'
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
|
||||
def getVariant(self):
|
||||
return MediaCombinator3.VARIANT
|
||||
|
||||
|
||||
def getPayload(self,
|
||||
subtitleDispositionTuple = (set(), set(), set()),
|
||||
subtitleTagTuple = ({}, {}, {})):
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[0]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[0]
|
||||
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 3
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 3
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[1]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[1]
|
||||
trackDescriptor3 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 4
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 4
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[2]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[2]
|
||||
trackDescriptor4 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||
trackDescriptor1,
|
||||
trackDescriptor2,
|
||||
trackDescriptor3,
|
||||
trackDescriptor4]
|
||||
|
||||
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||
# mediaDescriptor.reindexSubIndices()
|
||||
|
||||
return mediaDescriptor
|
||||
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
|
||||
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||
for DC3 in DispositionCombinator3.getAllClassReferences():
|
||||
for TC3 in TrackTagCombinator3.getAllClassReferences():
|
||||
|
||||
dc3 = DC3(self._context)
|
||||
tc3 = TC3(self._context)
|
||||
|
||||
mtc = MTC(self._context)
|
||||
|
||||
yObj = {}
|
||||
|
||||
yObj['identifier'] = self.getIdentifier()
|
||||
yObj['variants'] = [self.getVariant(),
|
||||
f"S:{dc3.getVariant()}",
|
||||
f"S:{tc3.getVariant()}",
|
||||
mtc.getVariant()]
|
||||
|
||||
yObj['payload'] = self.getPayload(dc3.getPayload(),
|
||||
tc3.getPayload())
|
||||
|
||||
yObj['assertSelectors'] = ['M', 'SD', 'ST', 'MT']
|
||||
yObj['assertFuncs'] = [self.assertFunc,
|
||||
dc3.createAssertFunc(),
|
||||
tc3.createAssertFunc(),
|
||||
mtc.createAssertFunc()]
|
||||
|
||||
yObj['shouldFail'] = (self.shouldFail()
|
||||
| dc3.shouldFail()
|
||||
| tc3.shouldFail()
|
||||
| mtc.shouldFail())
|
||||
yieldObj = {'target': yObj}
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
dc3_p = DC3(self._context, createPresets = True)
|
||||
tc3_p = TC3(self._context, createPresets = True)
|
||||
|
||||
mtc_p = MTC(self._context, createPresets = True)
|
||||
|
||||
yObj_p = {}
|
||||
|
||||
yObj_p['identifier'] = self.getIdentifier()
|
||||
yObj_p['variants'] = [self.getVariant(),
|
||||
f"S:{dc3_p.getVariant()}",
|
||||
f"S:{tc3_p.getVariant()}",
|
||||
mtc_p.getVariant()]
|
||||
|
||||
yObj_p['payload'] = self.getPayload(dc3_p.getPayload(),
|
||||
tc3_p.getPayload())
|
||||
|
||||
yObj_p['assertSelectors'] = ['M', 'SD', 'ST', 'MT']
|
||||
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||
dc3_p.createAssertFunc(),
|
||||
tc3_p.createAssertFunc(),
|
||||
mtc_p.createAssertFunc()]
|
||||
|
||||
yObj_p['shouldFail'] = (self.shouldFail()
|
||||
| dc3_p.shouldFail()
|
||||
| tc3_p.shouldFail()
|
||||
| mtc_p.shouldFail())
|
||||
yieldObj['preset'] = yObj_p
|
||||
|
||||
yield yieldObj
|
||||
@ -1,145 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .media_combinator import MediaCombinator
|
||||
|
||||
from .disposition_combinator_2 import DispositionCombinator2
|
||||
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||
from .permutation_combinator_2 import PermutationCombinator2
|
||||
from .media_tag_combinator import MediaTagCombinator
|
||||
|
||||
|
||||
class MediaCombinator4(MediaCombinator):
|
||||
|
||||
VARIANT = 'VAA'
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
|
||||
def getVariant(self):
|
||||
return MediaCombinator4.VARIANT
|
||||
|
||||
|
||||
def getPayload(self,
|
||||
audioDispositionTuple = (set(), set()),
|
||||
audioTagTuple = ({}, {})):
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[0]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[0]
|
||||
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[1]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[1]
|
||||
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||
trackDescriptor1,
|
||||
trackDescriptor2]
|
||||
|
||||
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||
# mediaDescriptor.reindexSubIndices()
|
||||
|
||||
return mediaDescriptor
|
||||
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
|
||||
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||
for DC2 in DispositionCombinator2.getAllClassReferences():
|
||||
for TC2 in TrackTagCombinator2.getAllClassReferences():
|
||||
|
||||
dc2 = DC2(self._context)
|
||||
tc2 = TC2(self._context)
|
||||
|
||||
mtc = MTC(self._context)
|
||||
|
||||
yObj = {}
|
||||
|
||||
yObj['identifier'] = self.getIdentifier()
|
||||
yObj['variants'] = [self.getVariant(),
|
||||
f"A:{dc2.getVariant()}",
|
||||
f"A:{tc2.getVariant()}",
|
||||
mtc.getVariant()]
|
||||
|
||||
yObj['payload'] = self.getPayload(dc2.getPayload(),
|
||||
tc2.getPayload())
|
||||
|
||||
yObj['assertSelectors'] = ['M', 'AD', 'AT', 'MT']
|
||||
yObj['assertFuncs'] = [self.assertFunc,
|
||||
dc2.createAssertFunc(),
|
||||
tc2.createAssertFunc(),
|
||||
mtc.createAssertFunc()]
|
||||
|
||||
yObj['shouldFail'] = (self.shouldFail()
|
||||
| dc2.shouldFail()
|
||||
| tc2.shouldFail()
|
||||
| mtc.shouldFail())
|
||||
yieldObj = {'target': yObj}
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
dc2_p = DC2(self._context, createPresets = True)
|
||||
tc2_p = TC2(self._context, createPresets = True)
|
||||
|
||||
mtc_p = MTC(self._context, createPresets = True)
|
||||
|
||||
yObj_p = {}
|
||||
|
||||
yObj_p['identifier'] = self.getIdentifier()
|
||||
yObj_p['variants'] = [self.getVariant(),
|
||||
f"A:{dc2_p.getVariant()}",
|
||||
f"A:{tc2_p.getVariant()}",
|
||||
mtc_p.getVariant()]
|
||||
|
||||
yObj_p['payload'] = self.getPayload(dc2_p.getPayload(),
|
||||
tc2_p.getPayload())
|
||||
|
||||
yObj_p['assertSelectors'] = ['M', 'AD', 'AT', 'MT']
|
||||
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||
dc2_p.createAssertFunc(),
|
||||
tc2_p.createAssertFunc(),
|
||||
mtc_p.createAssertFunc()]
|
||||
|
||||
yObj_p['shouldFail'] = (self.shouldFail()
|
||||
| dc2_p.shouldFail()
|
||||
| tc2_p.shouldFail()
|
||||
| mtc_p.shouldFail())
|
||||
yieldObj['preset'] = yObj_p
|
||||
|
||||
yield yieldObj
|
||||
@ -1,155 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .media_combinator import MediaCombinator
|
||||
|
||||
from .disposition_combinator_2 import DispositionCombinator2
|
||||
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||
from .permutation_combinator_2 import PermutationCombinator2
|
||||
from .media_tag_combinator import MediaTagCombinator
|
||||
|
||||
|
||||
class MediaCombinator5(MediaCombinator):
|
||||
|
||||
VARIANT = 'VAAS'
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
|
||||
def getVariant(self):
|
||||
return MediaCombinator5.VARIANT
|
||||
|
||||
|
||||
def getPayload(self,
|
||||
audioDispositionTuple = (set(), set()),
|
||||
audioTagTuple = ({}, {})):
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[0]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[0]
|
||||
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[1]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[1]
|
||||
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 3
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 3
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor3 = TrackDescriptor(**kwargs)
|
||||
|
||||
|
||||
kwargs = {}
|
||||
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||
trackDescriptor1,
|
||||
trackDescriptor2,
|
||||
trackDescriptor3]
|
||||
|
||||
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||
# mediaDescriptor.reindexSubIndices()
|
||||
|
||||
return mediaDescriptor
|
||||
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
|
||||
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||
for DC2 in DispositionCombinator2.getAllClassReferences():
|
||||
for TC2 in TrackTagCombinator2.getAllClassReferences():
|
||||
|
||||
dc2 = DC2(self._context)
|
||||
tc2 = TC2(self._context)
|
||||
|
||||
mtc = MTC(self._context)
|
||||
|
||||
yObj = {}
|
||||
|
||||
yObj['identifier'] = self.getIdentifier()
|
||||
yObj['variants'] = [self.getVariant(),
|
||||
f"A:{dc2.getVariant()}",
|
||||
f"A:{tc2.getVariant()}",
|
||||
mtc.getVariant()]
|
||||
|
||||
yObj['payload'] = self.getPayload(dc2.getPayload(),
|
||||
tc2.getPayload())
|
||||
|
||||
yObj['assertSelectors'] = ['M', 'AD', 'AT', 'MT']
|
||||
yObj['assertFuncs'] = [self.assertFunc,
|
||||
dc2.createAssertFunc(),
|
||||
tc2.createAssertFunc(),
|
||||
mtc.createAssertFunc()]
|
||||
|
||||
yObj['shouldFail'] = (self.shouldFail()
|
||||
| dc2.shouldFail()
|
||||
| tc2.shouldFail()
|
||||
| mtc.shouldFail())
|
||||
yieldObj = {'target': yObj}
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
dc2_p = DC2(self._context, createPresets = True)
|
||||
tc2_p = TC2(self._context, createPresets = True)
|
||||
|
||||
mtc_p = MTC(self._context, createPresets = True)
|
||||
|
||||
yObj_p = {}
|
||||
|
||||
yObj_p['identifier'] = self.getIdentifier()
|
||||
yObj_p['variants'] = [self.getVariant(),
|
||||
f"A:{dc2_p.getVariant()}",
|
||||
f"A:{tc2_p.getVariant()}",
|
||||
mtc_p.getVariant()]
|
||||
|
||||
yObj_p['payload'] = self.getPayload(dc2_p.getPayload(),
|
||||
tc2_p.getPayload())
|
||||
|
||||
yObj_p['assertSelectors'] = ['M', 'AD', 'AT', 'MT']
|
||||
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||
dc2_p.createAssertFunc(),
|
||||
tc2_p.createAssertFunc(),
|
||||
mtc_p.createAssertFunc()]
|
||||
|
||||
yObj_p['shouldFail'] = (self.shouldFail()
|
||||
| dc2_p.shouldFail()
|
||||
| tc2_p.shouldFail()
|
||||
| mtc_p.shouldFail())
|
||||
yieldObj['preset'] = yObj_p
|
||||
|
||||
yield yieldObj
|
||||
@ -1,191 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .media_combinator import MediaCombinator
|
||||
|
||||
from .disposition_combinator_2 import DispositionCombinator2
|
||||
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||
from .permutation_combinator_2 import PermutationCombinator2
|
||||
from .media_tag_combinator import MediaTagCombinator
|
||||
|
||||
|
||||
class MediaCombinator6(MediaCombinator):
|
||||
|
||||
VARIANT = 'VAASS'
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
|
||||
def getVariant(self):
|
||||
return MediaCombinator6.VARIANT
|
||||
|
||||
|
||||
def getPayload(self,
|
||||
audioDispositionTuple = (set(), set()),
|
||||
audioTagTuple = ({}, {}),
|
||||
subtitleDispositionTuple = (set(), set()),
|
||||
subtitleTagTuple = ({}, {})):
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[0]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[0]
|
||||
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[1]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[1]
|
||||
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 3
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 3
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[0]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[0]
|
||||
trackDescriptor3 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 4
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 4
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[1]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[1]
|
||||
trackDescriptor4 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||
trackDescriptor1,
|
||||
trackDescriptor2,
|
||||
trackDescriptor3,
|
||||
trackDescriptor4]
|
||||
|
||||
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||
# mediaDescriptor.reindexSubIndices()
|
||||
|
||||
return mediaDescriptor
|
||||
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
|
||||
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||
for DC2_A in DispositionCombinator2.getAllClassReferences():
|
||||
for TC2_A in TrackTagCombinator2.getAllClassReferences():
|
||||
for DC2_S in DispositionCombinator2.getAllClassReferences():
|
||||
for TC2_S in TrackTagCombinator2.getAllClassReferences():
|
||||
|
||||
dc2a = DC2_A(self._context)
|
||||
tc2a = TC2_A(self._context)
|
||||
dc2s = DC2_S(self._context)
|
||||
tc2s = TC2_S(self._context)
|
||||
|
||||
mtc = MTC(self._context)
|
||||
|
||||
yObj = {}
|
||||
|
||||
yObj['identifier'] = self.getIdentifier()
|
||||
yObj['variants'] = [self.getVariant(),
|
||||
f"A:{dc2a.getVariant()}",
|
||||
f"A:{tc2a.getVariant()}",
|
||||
f"S:{dc2s.getVariant()}",
|
||||
f"S:{tc2s.getVariant()}",
|
||||
mtc.getVariant()]
|
||||
|
||||
yObj['payload'] = self.getPayload(dc2a.getPayload(),
|
||||
tc2a.getPayload(),
|
||||
dc2s.getPayload(),
|
||||
tc2s.getPayload())
|
||||
|
||||
yObj['assertSelectors'] = ['M', 'AD', 'AT', 'SD', 'ST', 'MT']
|
||||
yObj['assertFuncs'] = [self.assertFunc,
|
||||
dc2a.createAssertFunc(),
|
||||
tc2a.createAssertFunc(),
|
||||
dc2s.createAssertFunc(),
|
||||
tc2s.createAssertFunc(),
|
||||
mtc.createAssertFunc()]
|
||||
|
||||
yObj['shouldFail'] = (self.shouldFail()
|
||||
| dc2a.shouldFail()
|
||||
| tc2a.shouldFail()
|
||||
| dc2s.shouldFail()
|
||||
| tc2s.shouldFail()
|
||||
| mtc.shouldFail())
|
||||
yieldObj = {'target': yObj}
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
dc2a_p = DC2_A(self._context, createPresets = True)
|
||||
tc2a_p = TC2_A(self._context, createPresets = True)
|
||||
dc2s_p = DC2_S(self._context, createPresets = True)
|
||||
tc2s_p = TC2_S(self._context, createPresets = True)
|
||||
|
||||
mtc_p = MTC(self._context, createPresets = True)
|
||||
|
||||
yObj_p = {}
|
||||
|
||||
yObj_p['identifier'] = self.getIdentifier()
|
||||
yObj_p['variants'] = [self.getVariant(),
|
||||
f"A:{dc2a_p.getVariant()}",
|
||||
f"A:{tc2a_p.getVariant()}",
|
||||
f"S:{dc2s_p.getVariant()}",
|
||||
f"S:{tc2s_p.getVariant()}",
|
||||
mtc_p.getVariant()]
|
||||
|
||||
yObj_p['payload'] = self.getPayload(dc2a_p.getPayload(),
|
||||
tc2a_p.getPayload(),
|
||||
dc2s_p.getPayload(),
|
||||
tc2s_p.getPayload())
|
||||
|
||||
yObj_p['assertSelectors'] = ['M', 'AD', 'AT', 'SD', 'ST', 'MT']
|
||||
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||
dc2a_p.createAssertFunc(),
|
||||
tc2a_p.createAssertFunc(),
|
||||
dc2s_p.createAssertFunc(),
|
||||
tc2s_p.createAssertFunc(),
|
||||
mtc_p.createAssertFunc()]
|
||||
|
||||
yObj_p['shouldFail'] = (self.shouldFail()
|
||||
| dc2a_p.shouldFail()
|
||||
| tc2a_p.shouldFail()
|
||||
| dc2s_p.shouldFail()
|
||||
| tc2s_p.shouldFail()
|
||||
| mtc_p.shouldFail())
|
||||
yieldObj['preset'] = yObj_p
|
||||
|
||||
yield yieldObj
|
||||
@ -1,230 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .media_combinator import MediaCombinator
|
||||
|
||||
from .disposition_combinator_2 import DispositionCombinator2
|
||||
from .disposition_combinator_3 import DispositionCombinator3
|
||||
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||
from .track_tag_combinator_3 import TrackTagCombinator3
|
||||
from .permutation_combinator_2 import PermutationCombinator2
|
||||
from .permutation_combinator_3 import PermutationCombinator3
|
||||
from .media_tag_combinator import MediaTagCombinator
|
||||
|
||||
class MediaCombinator7(MediaCombinator):
|
||||
|
||||
VARIANT = 'VAASSS'
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
|
||||
def getVariant(self):
|
||||
return MediaCombinator7.VARIANT
|
||||
|
||||
|
||||
def getPayload(self,
|
||||
audioPermutation,
|
||||
subtitlePermutation,
|
||||
audioDispositionTuple = (set(), set()),
|
||||
audioTagTuple = ({}, {}),
|
||||
subtitleDispositionTuple = (set(), set(), set()),
|
||||
subtitleTagTuple = ({}, {}, {})):
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.VIDEO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
trackDescriptor0 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[0]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[0]
|
||||
trackDescriptor1 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.AUDIO
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = audioDispositionTuple[1]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = audioTagTuple[1]
|
||||
trackDescriptor2 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 3
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 3
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 0
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[0]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[0]
|
||||
trackDescriptor3 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 4
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 4
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 1
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[1]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[1]
|
||||
trackDescriptor4 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[TrackDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[TrackDescriptor.INDEX_KEY] = 5
|
||||
kwargs[TrackDescriptor.SOURCE_INDEX_KEY] = 5
|
||||
kwargs[TrackDescriptor.TRACK_TYPE_KEY] = TrackType.SUBTITLE
|
||||
kwargs[TrackDescriptor.SUB_INDEX_KEY] = 2
|
||||
kwargs[TrackDescriptor.DISPOSITION_SET_KEY] = subtitleDispositionTuple[2]
|
||||
kwargs[TrackDescriptor.TAGS_KEY] = subtitleTagTuple[2]
|
||||
trackDescriptor5 = TrackDescriptor(**kwargs)
|
||||
|
||||
kwargs = {}
|
||||
kwargs[MediaDescriptor.CONTEXT_KEY] = self._context
|
||||
kwargs[MediaDescriptor.TRACK_DESCRIPTOR_LIST_KEY] = [trackDescriptor0,
|
||||
trackDescriptor1,
|
||||
trackDescriptor2,
|
||||
trackDescriptor3,
|
||||
trackDescriptor4,
|
||||
trackDescriptor5]
|
||||
|
||||
mediaDescriptor = MediaDescriptor(**kwargs)
|
||||
# mediaDescriptor.reindexSubIndices()
|
||||
|
||||
return mediaDescriptor
|
||||
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
|
||||
|
||||
pc2 = PermutationCombinator2(self._context)
|
||||
pc3 = PermutationCombinator3(self._context)
|
||||
|
||||
for MTC in MediaTagCombinator.getAllClassReferences():
|
||||
for DC2_A in DispositionCombinator2.getAllClassReferences():
|
||||
for TC2_A in TrackTagCombinator2.getAllClassReferences():
|
||||
for DC3_S in DispositionCombinator3.getAllClassReferences():
|
||||
for TC3_S in TrackTagCombinator3.getAllClassReferences():
|
||||
for p2y in pc2.getYield():
|
||||
for p3y in pc3.getYield():
|
||||
|
||||
dc2a = DC2_A(self._context)
|
||||
tc2a = TC2_A(self._context)
|
||||
dc3s = DC3_S(self._context)
|
||||
tc3s = TC3_S(self._context)
|
||||
|
||||
mtc = MTC(self._context)
|
||||
|
||||
yObj = {}
|
||||
|
||||
yObj['identifier'] = self.getIdentifier()
|
||||
yObj['variants'] = [self.getVariant(),
|
||||
f"A:{p2y['variant']}",
|
||||
f"S:{p3y['variant']}",
|
||||
f"A:{dc2a.getVariant()}",
|
||||
f"A:{tc2a.getVariant()}",
|
||||
f"S:{dc3s.getVariant()}",
|
||||
f"S:{tc3s.getVariant()}",
|
||||
mtc.getVariant()]
|
||||
|
||||
yObj['payload'] = self.getPayload(p2y['permutation'],
|
||||
p3y['permutation'],
|
||||
dc2a.getPayload(),
|
||||
tc2a.getPayload(),
|
||||
dc3s.getPayload(),
|
||||
tc3s.getPayload())
|
||||
|
||||
yObj['assertSelectors'] = ['M', 'AP', 'SP', 'AD', 'AT', 'SD', 'ST', 'MT']
|
||||
|
||||
yObj['assertFuncs'] = [self.assertFunc,
|
||||
p2y['assertFunc'],
|
||||
p3y['assertFunc'],
|
||||
dc2a.createAssertFunc(),
|
||||
tc2a.createAssertFunc(),
|
||||
dc3s.createAssertFunc(),
|
||||
tc3s.createAssertFunc(),
|
||||
mtc.createAssertFunc()]
|
||||
|
||||
yObj['shouldFail'] = (self.shouldFail()
|
||||
| p2y['shouldFail']
|
||||
| p3y['shouldFail']
|
||||
| dc2a.shouldFail()
|
||||
| tc2a.shouldFail()
|
||||
| dc3s.shouldFail()
|
||||
| tc3s.shouldFail()
|
||||
| mtc.shouldFail())
|
||||
yieldObj = {'target': yObj}
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
dc2a_p = DC2_A(self._context, createPresets = True)
|
||||
tc2a_p = TC2_A(self._context, createPresets = True)
|
||||
dc3s_p = DC3_S(self._context, createPresets = True)
|
||||
tc3s_p = TC3_S(self._context, createPresets = True)
|
||||
|
||||
mtc_p = MTC(self._context, createPresets = True)
|
||||
|
||||
yObj_p = {}
|
||||
|
||||
yObj_p['identifier'] = self.getIdentifier()
|
||||
yObj_p['variants'] = [self.getVariant(),
|
||||
f"A:{p2y['variant']}",
|
||||
f"S:{p3y['variant']}",
|
||||
f"A:{dc2a_p.getVariant()}",
|
||||
f"A:{tc2a_p.getVariant()}",
|
||||
f"S:{dc3s_p.getVariant()}",
|
||||
f"S:{tc3s_p.getVariant()}",
|
||||
mtc_p.getVariant()]
|
||||
|
||||
yObj_p['payload'] = self.getPayload(p2y['permutation'],
|
||||
p3y['permutation'],
|
||||
dc2a_p.getPayload(),
|
||||
tc2a_p.getPayload(),
|
||||
dc3s_p.getPayload(),
|
||||
tc3s_p.getPayload())
|
||||
|
||||
yObj_p['assertSelectors'] = ['M', 'AP', 'SP', 'AD', 'AT', 'SD', 'ST', 'MT']
|
||||
|
||||
yObj_p['assertFuncs'] = [self.assertFunc,
|
||||
p2y['assertFunc'],
|
||||
p3y['assertFunc'],
|
||||
dc2a_p.createAssertFunc(),
|
||||
tc2a_p.createAssertFunc(),
|
||||
dc3s_p.createAssertFunc(),
|
||||
tc3s_p.createAssertFunc(),
|
||||
mtc_p.createAssertFunc()]
|
||||
|
||||
yObj_p['shouldFail'] = (self.shouldFail()
|
||||
| p2y['shouldFail']
|
||||
| p3y['shouldFail']
|
||||
| dc2a_p.shouldFail()
|
||||
| tc2a_p.shouldFail()
|
||||
| dc3s_p.shouldFail()
|
||||
| tc3s_p.shouldFail()
|
||||
| mtc_p.shouldFail())
|
||||
yieldObj['preset'] = yObj_p
|
||||
|
||||
yield yieldObj
|
||||
@ -1,33 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
class MediaTagCombinator():
|
||||
|
||||
IDENTIFIER = 'mediaTag'
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getIdentifier(self):
|
||||
return MediaTagCombinator.IDENTIFIER
|
||||
|
||||
@staticmethod
|
||||
def list():
|
||||
basePath = os.path.dirname(__file__)
|
||||
return [os.path.basename(p)[21:-3]
|
||||
for p
|
||||
in glob.glob(f"{ basePath }/media_tag_combinator_*.py", recursive = True)
|
||||
if p != __file__]
|
||||
|
||||
@staticmethod
|
||||
def getClassReference(identifier):
|
||||
importlib.import_module(f"ffx.test.media_tag_combinator_{ identifier }")
|
||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.media_tag_combinator_{ identifier }"]):
|
||||
#HINT: Excluding MediaCombinator as it seems to be included by import (?)
|
||||
if inspect.isclass(obj) and name != 'MediaTagCombinator' and name.startswith('MediaTagCombinator'):
|
||||
return obj
|
||||
|
||||
@staticmethod
|
||||
def getAllClassReferences():
|
||||
return [MediaTagCombinator.getClassReference(i) for i in MediaTagCombinator.list()]
|
||||
@ -1,55 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .media_tag_combinator import MediaTagCombinator
|
||||
|
||||
|
||||
class MediaTagCombinator0(MediaTagCombinator):
|
||||
|
||||
VARIANT = 'MT0'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return MediaTagCombinator0.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
mediaTags = {}
|
||||
if self.__createPresets:
|
||||
mediaTags['THIS_IS'] = 'FFX'
|
||||
return mediaTags
|
||||
|
||||
|
||||
def createAssertFunc(self):
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
def f(assertObj: dict = {}):
|
||||
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tags'")
|
||||
mediaTags = assertObj['tags']
|
||||
|
||||
assert ('THIS_IS' in mediaTags.keys() and mediaTags.keys()['THIS_IS'] == 'FFX'
|
||||
), "Media tag 'THIS_IS' was not preserved"
|
||||
|
||||
else:
|
||||
|
||||
def f(assertObj: dict = {}):
|
||||
pass
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,59 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .media_tag_combinator import MediaTagCombinator
|
||||
|
||||
class MediaTagCombinator1(MediaTagCombinator):
|
||||
|
||||
VARIANT = 'MT1'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return MediaTagCombinator1.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
mediaTags = {'From': 'Encoders'}
|
||||
if self.__createPresets:
|
||||
mediaTags['THIS_IS'] = 'FFX'
|
||||
return mediaTags
|
||||
|
||||
|
||||
def createAssertFunc(self):
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
def f(assertObj: dict = {}):
|
||||
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tags'")
|
||||
mediaTags = assertObj['tags']
|
||||
|
||||
assert ('From' in mediaTags.keys()
|
||||
), "'From' not in media tag keys"
|
||||
assert (mediaTags.keys()['From'] == 'Encoders'
|
||||
), "Media tag value not 'Encoders' for key 'To'"
|
||||
|
||||
assert ('THIS_IS' in mediaTags.keys() and mediaTags.keys()['THIS_IS'] == 'FFX'
|
||||
), "Media tag 'THIS_IS' was not preserved"
|
||||
|
||||
else:
|
||||
|
||||
def f(assertObj: dict = {}):
|
||||
pass
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,64 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
|
||||
from .media_tag_combinator import MediaTagCombinator
|
||||
|
||||
class MediaTagCombinator2(MediaTagCombinator):
|
||||
|
||||
VARIANT = 'MT2'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return MediaTagCombinator2.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
mediaTags = {'To': 'Fanz',
|
||||
'Yolo': 'Holo'}
|
||||
if self.__createPresets:
|
||||
mediaTags['THIS_IS'] = 'FFX'
|
||||
return mediaTags
|
||||
|
||||
|
||||
def createAssertFunc(self):
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
def f(assertObj: dict = {}):
|
||||
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tags'")
|
||||
mediaTags = assertObj['tags']
|
||||
|
||||
assert ('To' in mediaTags.keys()
|
||||
), "'To' not in media tag keys"
|
||||
assert (mediaTags.keys()['To'] == 'Fanz'
|
||||
), "Media tag value not 'Fanz' for key 'To'"
|
||||
assert ('Yolo' in mediaTags.keys()
|
||||
), "'Yolo' not in media tag keys"
|
||||
assert (mediaTags.keys()['Yolo'] == 'Holo'
|
||||
), "Media tag value not 'Holo' for key 'Yolo'"
|
||||
|
||||
assert ('THIS_IS' in mediaTags.keys() and mediaTags.keys()['THIS_IS'] == 'FFX'
|
||||
), "Media tag 'THIS_IS' was not preserved"
|
||||
|
||||
else:
|
||||
|
||||
def f(assertObj: dict = {}):
|
||||
pass
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,38 +0,0 @@
|
||||
class PermutationCombinator2():
|
||||
|
||||
IDENTIFIER = 'permutation2'
|
||||
|
||||
PERMUTATION_LIST = [
|
||||
[0,1],
|
||||
[1,0]
|
||||
]
|
||||
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getIdentifier(self):
|
||||
return PermutationCombinator2.IDENTIFIER
|
||||
|
||||
|
||||
def getPayload(self, permutationIndex):
|
||||
return {
|
||||
'variant': f"P{permutationIndex}",
|
||||
'permutation': PermutationCombinator2.PERMUTATION_LIST[permutationIndex],
|
||||
'assertFunc': self.createAssertFunc(),
|
||||
'shouldFail': self.shouldFail()
|
||||
}
|
||||
|
||||
def createAssertFunc(self):
|
||||
def f(testObj = {}):
|
||||
pass
|
||||
return f
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
for permutationIndex in range(len(PermutationCombinator2.PERMUTATION_LIST)):
|
||||
yield self.getPayload(permutationIndex)
|
||||
@ -1,39 +0,0 @@
|
||||
class PermutationCombinator3():
|
||||
|
||||
IDENTIFIER = 'permutation3'
|
||||
|
||||
PERMUTATION_LIST = [
|
||||
[0,1,2],
|
||||
[0,2,1],
|
||||
[1,2,0]
|
||||
]
|
||||
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getIdentifier(self):
|
||||
return PermutationCombinator3.IDENTIFIER
|
||||
|
||||
|
||||
def getPayload(self, permutationIndex):
|
||||
return {
|
||||
'variant': f"P{permutationIndex}",
|
||||
'permutation': PermutationCombinator3.PERMUTATION_LIST[permutationIndex],
|
||||
'assertFunc': self.createAssertFunc(),
|
||||
'shouldFail': self.shouldFail()
|
||||
}
|
||||
|
||||
def createAssertFunc(self):
|
||||
def f(testObj = {}):
|
||||
pass
|
||||
return f
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
for permutationIndex in range(len(PermutationCombinator3.PERMUTATION_LIST)):
|
||||
yield self.getPayload(permutationIndex)
|
||||
@ -1,37 +0,0 @@
|
||||
class ReleaseCombinator():
|
||||
|
||||
IDENTIFIER = 'release'
|
||||
|
||||
RELEASE_LIST = [
|
||||
".GerEngSub.AAC.1080pINDICATOR.WebDL.x264-Tanuki",
|
||||
".German.AC3.DL.1080pINDICATOR.BluRay.x264-AST4u",
|
||||
"-720pINDICATOR"
|
||||
]
|
||||
|
||||
def __init__(self, context = None, indicator = ''):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
self.__indicator = indicator
|
||||
|
||||
def getIdentifier(self):
|
||||
return ReleaseCombinator.IDENTIFIER
|
||||
|
||||
def getPayload(self, releaseIndex):
|
||||
releaseStr: str = ReleaseCombinator.RELEASE_LIST[releaseIndex]
|
||||
return {
|
||||
'variant': f"R{releaseIndex}",
|
||||
'release': releaseStr.replace('INDICATOR', f".{self.__indicator}")
|
||||
if self.__indicator else releaseStr.replace('INDICATOR', '')
|
||||
}
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
for releaseIndex in range(len(ReleaseCombinator.RELEASE_LIST)):
|
||||
yield self.getPayload(releaseIndex)
|
||||
@ -1,153 +0,0 @@
|
||||
import os, glob, sys, importlib, glob, inspect
|
||||
|
||||
from ffx.show_controller import ShowController
|
||||
from ffx.pattern_controller import PatternController
|
||||
from ffx.media_controller import MediaController
|
||||
|
||||
from ffx.test.helper import createEmptyDirectory
|
||||
from ffx.database import databaseContext
|
||||
|
||||
class Scenario():
|
||||
"""Scenarios
|
||||
|
||||
Scenario1: MediaTags, Stream-Kombinationen, Dispositions und StreamTags per Kombinatoren
|
||||
|
||||
Scenario2: <pattern> mit 3 Files x Scenario1
|
||||
|
||||
Scenario3: <tmdb+pattern> mit 3 Files (wenn TMDB API Key verfügbar)
|
||||
|
||||
Naming:
|
||||
|
||||
1: test.mkv no tmdb, no pattern
|
||||
2: test_s01e02.mkv
|
||||
|
||||
Operationen:
|
||||
|
||||
tmdb lookup: Set Showname as prefix, append episode name
|
||||
pattern lookup: Set/update tags/dispositions; Filter/Reorder Tracks
|
||||
|
||||
MediaTag-Kombinationen (2)
|
||||
|
||||
0: nichs
|
||||
1: Yolo=Holo
|
||||
|
||||
Stream-Kombinationen (8)
|
||||
|
||||
VA D=1 T=1 =1
|
||||
VAS D=1 T=1 =1
|
||||
VASS D=4 T=4 =16
|
||||
VASSS D=5 T=5 =25
|
||||
VAA D=4 T=4 =16
|
||||
VAAS D=4 T=4 =16
|
||||
VAASS D=16 T=16 =256
|
||||
VAASSS D=20 T=20 =400
|
||||
=731
|
||||
|
||||
Dispositions-Kombinationen (per TrackType)
|
||||
|
||||
0 = keine
|
||||
1 = DEFAULT
|
||||
|
||||
2 Streams (4):
|
||||
|
||||
D1: 00
|
||||
D2: 01
|
||||
D3: 10
|
||||
D4: 11
|
||||
|
||||
3 Streams (5):
|
||||
|
||||
D5: 000
|
||||
D6: 001
|
||||
D7: 010
|
||||
D8: 100
|
||||
D9: 101
|
||||
|
||||
Stream-Tag-Kombinationen (per TrackType)
|
||||
|
||||
0 = keine
|
||||
1 = lang+title
|
||||
|
||||
2 Streams:
|
||||
|
||||
00
|
||||
01
|
||||
10
|
||||
11
|
||||
|
||||
3 Streams:
|
||||
|
||||
000
|
||||
001
|
||||
010
|
||||
100
|
||||
101
|
||||
|
||||
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._testDirectory = createEmptyDirectory()
|
||||
self._ffxExecutablePath = os.path.join(
|
||||
os.path.dirname(
|
||||
os.path.dirname(
|
||||
os.path.dirname(__file__))),
|
||||
'ffx.py')
|
||||
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
self._testDbFilePath = os.path.join(self._testDirectory, 'test.db')
|
||||
self.createEmptyTestDatabase()
|
||||
|
||||
# Convenience
|
||||
self._niceness = self._context['resource_limits']['niceness'] if 'resource_limits' in self._context.keys() and 'niceness' in self._context['resource_limits'].keys() else 99
|
||||
self._cpuPercent = self._context['resource_limits']['cpu_percent'] if 'resource_limits' in self._context.keys() and 'cpu_percent' in self._context['resource_limits'].keys() else 99
|
||||
|
||||
|
||||
def createEmptyTestDatabase(self):
|
||||
|
||||
if not self._context['database'] is None:
|
||||
self._context['database']['engine'].dispose()
|
||||
|
||||
if os.path.isfile(self._testDbFilePath):
|
||||
os.unlink(self._testDbFilePath)
|
||||
self._context['database'] = None
|
||||
|
||||
self._logger.debug(f"Creating test db with path {self._testDbFilePath}")
|
||||
self._context['database'] = databaseContext(databasePath=self._testDbFilePath)
|
||||
|
||||
self._sc = ShowController(context = self._context)
|
||||
self._pc = PatternController(context = self._context)
|
||||
self._mc = MediaController(context = self._context)
|
||||
|
||||
|
||||
def clearTestDirectory(self):
|
||||
testFiles = glob.glob(f"{self._testDirectory}/*")
|
||||
for f in testFiles:
|
||||
os.remove(f)
|
||||
|
||||
def getFilePathsInTestDirectory(self):
|
||||
return [f for f in glob.glob(f"{self._testDirectory}/*")]
|
||||
|
||||
def getFilenamesInTestDirectory(self):
|
||||
return [os.path.basename(f) for f in self.getFilePathsInTestDirectory()]
|
||||
|
||||
|
||||
@staticmethod
|
||||
def list():
|
||||
basePath = os.path.dirname(__file__)
|
||||
return [os.path.basename(p)[9:-3]
|
||||
for p
|
||||
in glob.glob(f"{ basePath }/scenario_*.py", recursive = True)
|
||||
if p != __file__]
|
||||
|
||||
@staticmethod
|
||||
def getClassReference(identifier):
|
||||
importlib.import_module(f"ffx.test.scenario_{ identifier }")
|
||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.scenario_{ identifier }"]):
|
||||
#HINT: Excluding Scenario as it seems to be included by import (?)
|
||||
if inspect.isclass(obj) and name != 'Scenario' and name.startswith('Scenario'):
|
||||
return obj
|
||||
@ -1,173 +0,0 @@
|
||||
import os, sys, click, glob
|
||||
|
||||
from .scenario import Scenario
|
||||
|
||||
from ffx.test.helper import createMediaTestFile
|
||||
from ffx.process import executeProcess
|
||||
|
||||
from ffx.file_properties import FileProperties
|
||||
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
|
||||
from ffx.test.media_combinator_0 import MediaCombinator0
|
||||
|
||||
from ffx.test.basename_combinator import BasenameCombinator
|
||||
|
||||
|
||||
class Scenario1(Scenario):
|
||||
"""Creating file VAa, h264/aac/aac
|
||||
Converting to VaA, vp9/opus/opus
|
||||
No tmdb, default parameters"""
|
||||
|
||||
TEST_FILE_EXTENSION = 'mkv'
|
||||
EXPECTED_FILE_EXTENSION = 'webm'
|
||||
|
||||
|
||||
def __init__(self, context):
|
||||
|
||||
context['use_tmdb'] = False
|
||||
context['use_pattern'] = False
|
||||
|
||||
super().__init__(context)
|
||||
|
||||
def getScenario(self):
|
||||
return self.__class__.__name__[8:]
|
||||
|
||||
|
||||
def job(self, yieldObj: dict):
|
||||
|
||||
testContext = self._context.copy()
|
||||
|
||||
identifier = yieldObj['identifier']
|
||||
variantList = yieldObj['variants']
|
||||
|
||||
variantIdentifier = '-'.join(variantList)
|
||||
variantLabel = f"{self.__class__.__name__} Variant {variantIdentifier}"
|
||||
|
||||
mc0 = MediaCombinator0(context = testContext)
|
||||
sourceMediaDescriptor: MediaDescriptor = mc0.getPayload()
|
||||
|
||||
assertSelectorList: list = yieldObj['assertSelectors']
|
||||
assertFuncList = yieldObj['assertFuncs']
|
||||
shouldFail = yieldObj['shouldFail']
|
||||
|
||||
variantPayload = yieldObj['payload']
|
||||
variantBasename = variantPayload['basename']
|
||||
variantFilenameLabel = variantPayload['label']
|
||||
expectedBasename = variantPayload['expectedBasename']
|
||||
|
||||
variantFilename = f"{variantBasename}.{Scenario1.TEST_FILE_EXTENSION}"
|
||||
expectedFilename = f"{expectedBasename}.{Scenario1.EXPECTED_FILE_EXTENSION}"
|
||||
|
||||
|
||||
if self._context['test_variant'] and not variantIdentifier.startswith(self._context['test_variant']):
|
||||
return
|
||||
|
||||
if (self._context['test_limit'] and (self._context['test_passed_counter'] + self._context['test_failed_counter'])
|
||||
>= self._context['test_limit']):
|
||||
return
|
||||
|
||||
self._logger.debug(f"Running Job: {variantLabel}")
|
||||
|
||||
|
||||
# Phase 1: Setup source files
|
||||
|
||||
if not variantBasename:
|
||||
raise ValueError(f"{variantLabel}: Testfile basename is falsy")
|
||||
|
||||
|
||||
self.clearTestDirectory()
|
||||
|
||||
self._logger.debug(f"Creating test file: {variantFilename}")
|
||||
mediaFilePath = createMediaTestFile(mediaDescriptor=sourceMediaDescriptor,
|
||||
baseName=variantBasename,
|
||||
directory=self._testDirectory,
|
||||
logger=self._logger,
|
||||
length = 2)
|
||||
|
||||
|
||||
# Phase 2: Run ffx
|
||||
|
||||
commandSequence = [sys.executable,
|
||||
self._ffxExecutablePath]
|
||||
|
||||
if self._context['verbosity']:
|
||||
commandSequence += ['--verbose',
|
||||
str(self._context['verbosity'])]
|
||||
|
||||
commandSequence += ['convert',
|
||||
mediaFilePath,
|
||||
'--no-prompt',
|
||||
'--no-signature']
|
||||
|
||||
if variantFilenameLabel:
|
||||
commandSequence += ['--label', variantFilenameLabel]
|
||||
|
||||
|
||||
commandSequence += ['--no-pattern']
|
||||
commandSequence += ['--no-tmdb']
|
||||
|
||||
out, err, rc = executeProcess(commandSequence, directory = self._testDirectory, context = self._context)
|
||||
|
||||
if out and self._context['verbosity'] >= 9:
|
||||
self._logger.debug(f"{variantLabel}: Process output: {out}")
|
||||
if rc:
|
||||
self._logger.debug(f"{variantLabel}: Process returned ERROR {rc} ({err})")
|
||||
|
||||
|
||||
# Phase 3: Evaluate results
|
||||
|
||||
resultFilenames = [rf for rf in self.getFilenamesInTestDirectory() if rf != 'ffmpeg2pass-0.log' and rf != variantFilename]
|
||||
self._logger.debug(f"{variantLabel}: Result filenames: {resultFilenames}")
|
||||
|
||||
|
||||
try:
|
||||
|
||||
jobFailed = bool(rc)
|
||||
|
||||
self._logger.debug(f"{variantLabel}: Should fail: {shouldFail} / actually failed: {jobFailed}")
|
||||
|
||||
assert (jobFailed == shouldFail
|
||||
), f"Process {'failed' if jobFailed else 'did not fail'}"
|
||||
|
||||
if not jobFailed:
|
||||
|
||||
expectedResultFilePath = os.path.join(self._testDirectory, f"{expectedFilename}")
|
||||
|
||||
assert (os.path.isfile(expectedResultFilePath)
|
||||
), f"Result file {expectedFilename} in path '{self._testDirectory}' wasn't created"
|
||||
|
||||
for assertIndex in range(len(assertSelectorList)):
|
||||
|
||||
assertSelector = assertSelectorList[assertIndex]
|
||||
assertFunc = assertFuncList[assertIndex]
|
||||
assertVariant = variantList[assertIndex]
|
||||
|
||||
if assertSelector == 'B':
|
||||
#TODO: per file find
|
||||
testObj = {'filenames': resultFilenames}
|
||||
assertFunc(testObj=testObj)
|
||||
if assertSelector == 'L':
|
||||
assertFunc()
|
||||
if assertSelector == 'I':
|
||||
assertFunc()
|
||||
|
||||
self._context['test_passed_counter'] += 1
|
||||
self._reportLogger.info(f"{variantLabel}: Test passed")
|
||||
|
||||
except AssertionError as ae:
|
||||
|
||||
self._context['test_failed_counter'] += 1
|
||||
self._reportLogger.error(f"{variantLabel}: Test FAILED ({ae})")
|
||||
|
||||
|
||||
def run(self):
|
||||
for BC in BasenameCombinator.getAllClassReferences():
|
||||
self._logger.debug(f"BC={BC.__name__}")
|
||||
bc = BC(context = self._context)
|
||||
for y in bc.getYield():
|
||||
self.job(y)
|
||||
@ -1,166 +0,0 @@
|
||||
import os, sys, click
|
||||
|
||||
from .scenario import Scenario
|
||||
|
||||
from ffx.test.helper import createMediaTestFile
|
||||
from ffx.process import executeProcess
|
||||
|
||||
from ffx.file_properties import FileProperties
|
||||
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
|
||||
from ffx.test.media_combinator import MediaCombinator
|
||||
|
||||
|
||||
class Scenario2(Scenario):
|
||||
"""Creating file VAa, h264/aac/aac
|
||||
Converting to VaA, vp9/opus/opus
|
||||
No tmdb, default parameters"""
|
||||
|
||||
TEST_FILE_EXTENSION = 'mkv'
|
||||
EXPECTED_FILE_EXTENSION = 'webm'
|
||||
|
||||
|
||||
def __init__(self, context):
|
||||
|
||||
context['use_tmdb'] = False
|
||||
context['use_pattern'] = False
|
||||
|
||||
super().__init__(context)
|
||||
|
||||
def getScenario(self):
|
||||
return self.__class__.__name__[8:]
|
||||
|
||||
|
||||
def job(self, yieldObj: dict):
|
||||
|
||||
testContext = self._context.copy()
|
||||
|
||||
targetYieldObj = yieldObj['target']
|
||||
# presetYieldObj = yieldObj['preset'] # not used here
|
||||
|
||||
identifier = targetYieldObj['identifier']
|
||||
variantList = targetYieldObj['variants']
|
||||
|
||||
variantIdentifier = '-'.join(variantList)
|
||||
variantLabel = f"{self.__class__.__name__} Variant {variantIdentifier}"
|
||||
|
||||
sourceMediaDescriptor: MediaDescriptor = targetYieldObj['payload']
|
||||
|
||||
assertSelectorList: list = targetYieldObj['assertSelectors']
|
||||
assertFuncList = targetYieldObj['assertFuncs']
|
||||
shouldFail = targetYieldObj['shouldFail']
|
||||
|
||||
|
||||
if self._context['test_variant'] and not variantIdentifier.startswith(self._context['test_variant']):
|
||||
return
|
||||
|
||||
if (self._context['test_limit'] and (self._context['test_passed_counter'] + self._context['test_failed_counter'])
|
||||
>= self._context['test_limit']):
|
||||
return
|
||||
|
||||
self._logger.debug(f"Running Job: {variantLabel}")
|
||||
|
||||
|
||||
# Phase 1: Setup source files
|
||||
|
||||
self.clearTestDirectory()
|
||||
mediaFilePath = createMediaTestFile(mediaDescriptor=sourceMediaDescriptor,
|
||||
directory=self._testDirectory,
|
||||
logger=self._logger,
|
||||
length = 2)
|
||||
|
||||
|
||||
# Phase 2: Run ffx
|
||||
|
||||
commandSequence = [sys.executable,
|
||||
self._ffxExecutablePath]
|
||||
|
||||
if self._context['verbosity']:
|
||||
commandSequence += ['--verbose',
|
||||
str(self._context['verbosity'])]
|
||||
|
||||
commandSequence += ['convert',
|
||||
mediaFilePath,
|
||||
'--no-prompt',
|
||||
'--no-signature']
|
||||
|
||||
out, err, rc = executeProcess(commandSequence, directory = self._testDirectory, context = self._context)
|
||||
|
||||
if out and self._context['verbosity'] >= 9:
|
||||
self._logger.debug(f"{variantLabel}: Process output: {out}")
|
||||
if rc:
|
||||
self._logger.debug(f"{variantLabel}: Process returned ERROR {rc} ({err})")
|
||||
|
||||
|
||||
# Phase 3: Evaluate results
|
||||
|
||||
resultFilenames = [rf for rf in self.getFilenamesInTestDirectory() if rf.endswith(f".{Scenario2.EXPECTED_FILE_EXTENSION}")]
|
||||
|
||||
self._logger.debug(f"{variantLabel}: Result filenames: {resultFilenames}")
|
||||
|
||||
try:
|
||||
|
||||
jobFailed = bool(rc)
|
||||
self._logger.debug(f"{variantLabel}: Should fail: {shouldFail} / actually failed: {jobFailed}")
|
||||
|
||||
assert (jobFailed == shouldFail
|
||||
), f"Process {'failed' if jobFailed else 'did not fail'}"
|
||||
|
||||
|
||||
if not jobFailed:
|
||||
|
||||
resultFile = os.path.join(self._testDirectory, 'media.webm')
|
||||
|
||||
assert (os.path.isfile(resultFile)
|
||||
), f"Result file 'media.webm' in path '{self._testDirectory}' wasn't created"
|
||||
|
||||
resultFileProperties = FileProperties(testContext, resultFile)
|
||||
resultMediaDescriptor = resultFileProperties.getMediaDescriptor()
|
||||
|
||||
# resultMediaTracks = resultMediaDescriptor.getAllTrackDescriptors()
|
||||
resultMediaTracks = resultMediaDescriptor.getTrackDescriptors()
|
||||
|
||||
for assertIndex in range(len(assertSelectorList)):
|
||||
|
||||
assertSelector = assertSelectorList[assertIndex]
|
||||
assertFunc = assertFuncList[assertIndex]
|
||||
assertVariant = variantList[assertIndex]
|
||||
|
||||
if assertSelector == 'M':
|
||||
assertFunc()
|
||||
for variantIndex in range(len(assertVariant)):
|
||||
assert (assertVariant[variantIndex].lower() == resultMediaTracks[variantIndex].getType().indicator()
|
||||
), f"Stream #{variantIndex} is not of type {resultMediaTracks[variantIndex].getType().label()}"
|
||||
|
||||
elif assertSelector == 'AD' or assertSelector == 'AT':
|
||||
assertFunc({'tracks': resultMediaDescriptor.getAudioTracks()})
|
||||
|
||||
elif assertSelector == 'SD' or assertSelector == 'ST':
|
||||
assertFunc({'tracks': resultMediaDescriptor.getSubtitleTracks()})
|
||||
|
||||
elif type(assertSelector) is str:
|
||||
if assertSelector == 'J':
|
||||
assertFunc()
|
||||
|
||||
|
||||
self._context['test_passed_counter'] += 1
|
||||
self._reportLogger.info(f"{variantLabel}: Test passed")
|
||||
|
||||
except AssertionError as ae:
|
||||
|
||||
self._context['test_failed_counter'] += 1
|
||||
self._reportLogger.error(f"{variantLabel}: Test FAILED ({ae})")
|
||||
|
||||
|
||||
def run(self):
|
||||
MC_list = MediaCombinator.getAllClassReferences()
|
||||
for MC in MC_list:
|
||||
self._logger.debug(f"MC={MC.__name__}")
|
||||
mc = MC(context = self._context)
|
||||
for y in mc.getYield():
|
||||
self.job(y)
|
||||
@ -1,275 +0,0 @@
|
||||
import os, sys, click
|
||||
|
||||
from .scenario import Scenario
|
||||
|
||||
from ffx.test.helper import createMediaTestFile
|
||||
from ffx.process import executeProcess
|
||||
from ffx.database import databaseContext
|
||||
|
||||
from ffx.test.helper import createEmptyDirectory
|
||||
from ffx.helper import getEpisodeFileBasename
|
||||
|
||||
from ffx.file_properties import FileProperties
|
||||
|
||||
from ffx.media_descriptor import MediaDescriptor
|
||||
from ffx.track_descriptor import TrackDescriptor
|
||||
|
||||
from ffx.track_type import TrackType
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
|
||||
from ffx.test.media_combinator import MediaCombinator
|
||||
from ffx.test.indicator_combinator import IndicatorCombinator
|
||||
|
||||
from ffx.show_descriptor import ShowDescriptor
|
||||
|
||||
|
||||
from ffx.tmdb_controller import TmdbController
|
||||
from ffx.tmdb_controller import TMDB_API_KEY_NOT_PRESENT_EXCEPTION
|
||||
|
||||
class Scenario4(Scenario):
|
||||
|
||||
TEST_SHOW_IDENTIFIER = 83095
|
||||
TEST_SHOW_NAME = 'The Rising of the Shield Hero'
|
||||
TEST_SHOW_YEAR = 2019
|
||||
|
||||
TEST_FILE_LABEL = 'rotsh'
|
||||
TEST_FILE_EXTENSION = 'mkv'
|
||||
|
||||
TEST_PATTERN = f"{TEST_FILE_LABEL}_{FileProperties.SE_INDICATOR_PATTERN}.{TEST_FILE_EXTENSION}"
|
||||
|
||||
EXPECTED_FILE_EXTENSION = 'webm'
|
||||
|
||||
|
||||
def __init__(self, context):
|
||||
super().__init__(context)
|
||||
|
||||
self.__tmdbApiKey = os.environ.get('TMDB_API_KEY', None)
|
||||
if self.__tmdbApiKey is None:
|
||||
raise TMDB_API_KEY_NOT_PRESENT_EXCEPTION
|
||||
|
||||
self.__ic = IndicatorCombinator(context = context)
|
||||
self.__tc = TmdbController()
|
||||
|
||||
|
||||
kwargs = {}
|
||||
kwargs[ShowDescriptor.ID_KEY] = Scenario4.TEST_SHOW_IDENTIFIER
|
||||
kwargs[ShowDescriptor.NAME_KEY] = Scenario4.TEST_SHOW_NAME
|
||||
kwargs[ShowDescriptor.YEAR_KEY] = Scenario4.TEST_SHOW_YEAR
|
||||
|
||||
self.__testShowDescriptor = ShowDescriptor(**kwargs)
|
||||
|
||||
def getScenario(self):
|
||||
return self.__class__.__name__[8:]
|
||||
|
||||
|
||||
def prepareTestDatabase(self, sourceMediaDescriptor: MediaDescriptor):
|
||||
|
||||
if not self._context['database'] is None:
|
||||
self._context['database']['engine'].dispose()
|
||||
|
||||
if os.path.isfile(self._testDbFilePath):
|
||||
os.unlink(self._testDbFilePath)
|
||||
self._context['database'] = None
|
||||
|
||||
self._logger.debug(f"Creating test db with path {self._testDbFilePath}")
|
||||
self._context['database'] = databaseContext(databasePath=self._testDbFilePath)
|
||||
|
||||
|
||||
self._logger.debug(f"Adding test show '{self.__testShowDescriptor.getFilenamePrefix()}' to test db")
|
||||
if not self._sc.updateShow(self.__testShowDescriptor):
|
||||
raise click.ClickException('Could not create test show in db')
|
||||
|
||||
testPatternDescriptor = {
|
||||
'show_id': Scenario4.TEST_SHOW_IDENTIFIER,
|
||||
'pattern': Scenario4.TEST_PATTERN
|
||||
}
|
||||
patternId = self._pc.addPattern(testPatternDescriptor)
|
||||
|
||||
if patternId:
|
||||
self._mc.setPatternMediaDescriptor(sourceMediaDescriptor, patternId)
|
||||
|
||||
|
||||
def job(self, yieldObj: dict):
|
||||
|
||||
testContext = self._context.copy()
|
||||
|
||||
if 'preset' not in yieldObj.keys():
|
||||
raise KeyError('yieldObj did not contain presets')
|
||||
|
||||
targetYieldObj = yieldObj['target']
|
||||
presetYieldObj = yieldObj['preset']
|
||||
|
||||
identifier = targetYieldObj['identifier']
|
||||
variantList = targetYieldObj['variants']
|
||||
|
||||
variantIdentifier = '-'.join(variantList)
|
||||
variantLabel = f"{self.__class__.__name__} Variant {variantIdentifier}"
|
||||
|
||||
sourceMediaDescriptor: MediaDescriptor = targetYieldObj['payload']
|
||||
presetMediaDescriptor: MediaDescriptor = presetYieldObj['payload']
|
||||
|
||||
assertSelectorList: list = presetYieldObj['assertSelectors']
|
||||
assertFuncList = presetYieldObj['assertFuncs']
|
||||
shouldFail = presetYieldObj['shouldFail']
|
||||
|
||||
|
||||
if self._context['test_variant'] and not variantIdentifier.startswith(self._context['test_variant']):
|
||||
return
|
||||
|
||||
if (self._context['test_limit'] and (self._context['test_passed_counter'] + self._context['test_failed_counter'])
|
||||
>= self._context['test_limit']):
|
||||
return
|
||||
|
||||
self._logger.debug(f"Running Job: {variantLabel}")
|
||||
|
||||
|
||||
for l in presetMediaDescriptor.getConfiguration(label = 'presetMediaDescriptor'):
|
||||
self._logger.debug(l)
|
||||
|
||||
for l in sourceMediaDescriptor.getConfiguration(label = 'sourceMediaDescriptor'):
|
||||
self._logger.debug(l)
|
||||
|
||||
|
||||
# Phase 1: Setup source files
|
||||
|
||||
self.clearTestDirectory()
|
||||
|
||||
testFileList = []
|
||||
for indicatorObj in [y for y in self.__ic.getYield() if y['indicator']]:
|
||||
|
||||
indicator = indicatorObj['indicator']
|
||||
|
||||
testFileObj = {}
|
||||
testFileObj['season'] = indicatorObj['season']
|
||||
testFileObj['episode'] = indicatorObj['episode']
|
||||
|
||||
testFileObj['basename'] = f"{Scenario4.TEST_FILE_LABEL}_{indicator}"
|
||||
|
||||
testFileObj['path'] = createMediaTestFile(mediaDescriptor = presetMediaDescriptor,
|
||||
directory = self._testDirectory,
|
||||
baseName = testFileObj['basename'],
|
||||
logger=self._logger,
|
||||
length = 2)
|
||||
testFileObj['filename'] = f"{testFileObj['basename']}.{Scenario4.TEST_FILE_EXTENSION}"
|
||||
|
||||
testFileList.append(testFileObj)
|
||||
|
||||
|
||||
# Phase 2: Prepare database
|
||||
|
||||
self.createEmptyTestDatabase()
|
||||
self.prepareTestDatabase(sourceMediaDescriptor)
|
||||
|
||||
|
||||
# Phase 3: Run ffx
|
||||
|
||||
commandSequence = [sys.executable,
|
||||
self._ffxExecutablePath]
|
||||
|
||||
if self._context['verbosity']:
|
||||
commandSequence += ['--verbose',
|
||||
str(self._context['verbosity'])]
|
||||
|
||||
commandSequence += ['--database-file',
|
||||
self._testDbFilePath,
|
||||
'convert']
|
||||
commandSequence += [tfo['filename'] for tfo in testFileList]
|
||||
|
||||
commandSequence += ['--no-prompt', '--no-signature']
|
||||
|
||||
out, err, rc = executeProcess(commandSequence, directory = self._testDirectory, context = self._context)
|
||||
|
||||
if out and self._context['verbosity'] >= 9:
|
||||
self._logger.debug(f"{variantLabel}: Process output: {out}")
|
||||
if rc:
|
||||
self._logger.debug(f"{variantLabel}: Process returned ERROR {rc} ({err})")
|
||||
|
||||
|
||||
# Phase 4: Evaluate results
|
||||
|
||||
resultFilenames = [rf for rf in self.getFilenamesInTestDirectory() if rf.endswith(f".{Scenario4.EXPECTED_FILE_EXTENSION}")]
|
||||
|
||||
self._logger.debug(f"{variantLabel}: Result filenames: {resultFilenames}")
|
||||
|
||||
|
||||
try:
|
||||
|
||||
jobFailed = bool(rc)
|
||||
self._logger.debug(f"{variantLabel}: Should fail: {shouldFail} / actually failed: {jobFailed}")
|
||||
|
||||
assert (jobFailed == shouldFail
|
||||
), f"Process {'failed' if jobFailed else 'did not fail'}"
|
||||
|
||||
if not jobFailed:
|
||||
|
||||
for tfo in testFileList:
|
||||
|
||||
tmdbEpisodeResult = self.__tc.queryEpisode(Scenario4.TEST_SHOW_IDENTIFIER,
|
||||
tfo['season'], tfo['episode'])
|
||||
|
||||
expectedFileBasename = getEpisodeFileBasename(self.__testShowDescriptor.getFilenamePrefix(),
|
||||
tmdbEpisodeResult['name'],
|
||||
tfo['season'], tfo['episode'],
|
||||
context=testContext)
|
||||
|
||||
expectedFilename = f"{expectedFileBasename}.{Scenario4.EXPECTED_FILE_EXTENSION}"
|
||||
expectedFilePath = os.path.join(self._testDirectory, expectedFilename)
|
||||
|
||||
assert (os.path.isfile(expectedFilePath)
|
||||
), f"Result file '{expectedFilename}' in path '{self._testDirectory}' wasn't created"
|
||||
|
||||
|
||||
rfp = FileProperties(testContext, expectedFilePath)
|
||||
self._logger.debug(f"{variantLabel}: Result file properties: {rfp.getFilename()} season={rfp.getSeason()} episode={rfp.getEpisode()}")
|
||||
|
||||
rmd = rfp.getMediaDescriptor()
|
||||
# rmt = rmd.getAllTrackDescriptors()
|
||||
rmt = rmd.getTrackDescriptors()
|
||||
|
||||
for l in rmd.getConfiguration(label = 'resultMediaDescriptor'):
|
||||
self._logger.debug(l)
|
||||
|
||||
# num tracks differ
|
||||
rmd.applySourceIndices(sourceMediaDescriptor)
|
||||
|
||||
|
||||
for assertIndex in range(len(assertSelectorList)):
|
||||
|
||||
assertSelector = assertSelectorList[assertIndex]
|
||||
assertFunc = assertFuncList[assertIndex]
|
||||
assertVariant = variantList[assertIndex]
|
||||
|
||||
if assertSelector == 'M':
|
||||
assertFunc()
|
||||
for variantIndex in range(len(assertVariant)):
|
||||
assert (assertVariant[variantIndex].lower() == rmt[variantIndex].getType().indicator()
|
||||
), f"Stream #{variantIndex} is not of type {rmt[variantIndex].getType().label()}"
|
||||
|
||||
if assertSelector == 'AD' or assertSelector == 'AT':
|
||||
assertFunc({'tracks': rmd.getAudioTracks()})
|
||||
|
||||
elif assertSelector == 'SD' or assertSelector == 'ST':
|
||||
assertFunc({'tracks': rmd.getSubtitleTracks()})
|
||||
|
||||
elif type(assertSelector) is str:
|
||||
if assertSelector == 'J':
|
||||
assertFunc()
|
||||
|
||||
|
||||
self._context['test_passed_counter'] += 1
|
||||
self._reportLogger.info(f"\n{variantLabel}: Test passed\n")
|
||||
|
||||
except AssertionError as ae:
|
||||
|
||||
self._context['test_failed_counter'] += 1
|
||||
self._reportLogger.error(f"\n{variantLabel}: Test FAILED ({ae})\n")
|
||||
|
||||
|
||||
def run(self):
|
||||
|
||||
MC_list = [MediaCombinator.getClassReference(6)]
|
||||
for MC in MC_list:
|
||||
self._logger.debug(f"MC={MC.__name__}")
|
||||
mc = MC(context = self._context, createPresets = True)
|
||||
for y in mc.getYield():
|
||||
self.job(y)
|
||||
@ -1,33 +0,0 @@
|
||||
class ShowCombinator():
|
||||
|
||||
IDENTIFIER = 'show'
|
||||
|
||||
SHOW_LIST = [
|
||||
'Boruto; Naruto Next Generations (2017)',
|
||||
'The Rising of the Shield Hero (2019)',
|
||||
'Scrubs - Die Anfänger (2001)'
|
||||
]
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getIdentifier(self):
|
||||
return ShowCombinator.IDENTIFIER
|
||||
|
||||
def getPayload(self, showIndex):
|
||||
return {
|
||||
'variant': f"S{showIndex}",
|
||||
'show': ShowCombinator.SHOW_LIST[showIndex]
|
||||
}
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
for showIndex in range(len(ShowCombinator.SHOW_LIST)):
|
||||
yield self.getPayload(showIndex)
|
||||
@ -1,33 +0,0 @@
|
||||
class TitleCombinator():
|
||||
|
||||
IDENTIFIER = 'title'
|
||||
|
||||
TITLE_LIST = [
|
||||
'The Sound of Space',
|
||||
'2001; Odyssee im Weltraum (1968)',
|
||||
'Ansible 101'
|
||||
]
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getIdentifier(self):
|
||||
return TitleCombinator.IDENTIFIER
|
||||
|
||||
def getPayload(self, titleIndex):
|
||||
return {
|
||||
'variant': f"S{titleIndex}",
|
||||
'show': TitleCombinator.TITLE_LIST[titleIndex]
|
||||
}
|
||||
|
||||
def assertFunc(self, testObj = {}):
|
||||
pass
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
def getYield(self):
|
||||
for titleIndex in range(len(TitleCombinator.TITLE_LIST)):
|
||||
yield self.getPayload(titleIndex)
|
||||
@ -1,33 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect, itertools
|
||||
|
||||
class TrackTagCombinator2():
|
||||
|
||||
IDENTIFIER = 'trackTag2'
|
||||
|
||||
def __init__(self, context = None):
|
||||
self._context = context
|
||||
self._logger = context['logger']
|
||||
self._reportLogger = context['report_logger']
|
||||
|
||||
def getIdentifier(self):
|
||||
return TrackTagCombinator2.IDENTIFIER
|
||||
|
||||
@staticmethod
|
||||
def list():
|
||||
basePath = os.path.dirname(__file__)
|
||||
return [os.path.basename(p)[23:-3]
|
||||
for p
|
||||
in glob.glob(f"{ basePath }/track_tag_combinator_2_*.py", recursive = True)
|
||||
if p != __file__]
|
||||
|
||||
@staticmethod
|
||||
def getClassReference(identifier):
|
||||
importlib.import_module(f"ffx.test.track_tag_combinator_2_{ identifier }")
|
||||
for name, obj in inspect.getmembers(sys.modules[f"ffx.test.track_tag_combinator_2_{ identifier }"]):
|
||||
#HINT: Excluding DispositionCombination as it seems to be included by import (?)
|
||||
if inspect.isclass(obj) and name != 'TrackTagCombinator2' and name.startswith('TrackTagCombinator2'):
|
||||
return obj
|
||||
|
||||
@staticmethod
|
||||
def getAllClassReferences():
|
||||
return [TrackTagCombinator2.getClassReference(i) for i in TrackTagCombinator2.list()]
|
||||
@ -1,89 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||
|
||||
|
||||
class TrackTagCombinator20(TrackTagCombinator2):
|
||||
|
||||
VARIANT = 'T00'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return TrackTagCombinator20.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
subTrack0 = {}
|
||||
subTrack1 = {}
|
||||
|
||||
if self.__createPresets:
|
||||
subTrack0['THIS_IS'] = 'track0'
|
||||
subTrack1['THIS_IS'] = 'track1'
|
||||
|
||||
return (subTrack0,
|
||||
subTrack1)
|
||||
|
||||
|
||||
def createAssertFunc(self):
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
def f(assertObj: dict = {}):
|
||||
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||
|
||||
# source subIndex 0
|
||||
assert (not 'language' in resortedTrackDescriptors[0].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||
assert (not 'title' in resortedTrackDescriptors[0].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||
|
||||
assert ('THIS_IS' in resortedTrackDescriptors[0].getTags().keys() and resortedTrackDescriptors[0].getTags()['THIS_IS'] == 'track0'
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not preserved tag THIS_IS"
|
||||
|
||||
# source subIndex 1
|
||||
assert (not 'language' in resortedTrackDescriptors[1].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set language tag"
|
||||
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set title tag"
|
||||
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set title tag"
|
||||
|
||||
assert ('THIS_IS' in resortedTrackDescriptors[1].getTags().keys() and resortedTrackDescriptors[1].getTags()['THIS_IS'] == 'track1'
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not preserved tag THIS_IS"
|
||||
|
||||
else:
|
||||
|
||||
def f(assertObj: dict = {}):
|
||||
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||
|
||||
# source subIndex 0
|
||||
assert (not 'language' in resortedTrackDescriptors[0].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||
assert (not 'title' in resortedTrackDescriptors[0].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||
|
||||
# source subIndex 1
|
||||
assert (not 'language' in resortedTrackDescriptors[1].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set language tag"
|
||||
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has set title tag"
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,96 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||
|
||||
|
||||
class TrackTagCombinator21(TrackTagCombinator2):
|
||||
|
||||
VARIANT = 'T10'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return TrackTagCombinator21.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
subTrack0 = ({'language': 'rus', 'title': 'Russisch'}
|
||||
if self.__createPresets else {'language': 'fra', 'title': 'Französisch'})
|
||||
subTrack1 = {}
|
||||
|
||||
if self.__createPresets:
|
||||
subTrack0['THIS_IS'] = 'track0'
|
||||
subTrack1['THIS_IS'] = 'track1'
|
||||
|
||||
return (subTrack0,
|
||||
subTrack1)
|
||||
|
||||
|
||||
def createAssertFunc(self):
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
def f(assertObj: dict = {}):
|
||||
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||
|
||||
# source subIndex 0
|
||||
assert ('language' in resortedTrackDescriptors[0].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||
assert (resortedTrackDescriptors[0].getTags()['language'] == 'fra'
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'fra'"
|
||||
assert ('title' in resortedTrackDescriptors[0].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||
assert (resortedTrackDescriptors[0].getTags()['title'] == 'Französisch'
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Französisch'"
|
||||
|
||||
assert ('THIS_IS' in resortedTrackDescriptors[0].getTags().keys() and resortedTrackDescriptors[0].getTags()['THIS_IS'] == 'track0'
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not preserved tag THIS_IS"
|
||||
|
||||
# source subIndex 1
|
||||
assert (not 'language' in resortedTrackDescriptors[1].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||
|
||||
assert ('THIS_IS' in resortedTrackDescriptors[1].getTags().keys() and resortedTrackDescriptors[1].getTags()['THIS_IS'] == 'track1'
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not preserved tag THIS_IS"
|
||||
|
||||
else:
|
||||
|
||||
def f(assertObj: dict = {}):
|
||||
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||
|
||||
# source subIndex 0
|
||||
assert ('language' in resortedTrackDescriptors[0].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag"
|
||||
assert (resortedTrackDescriptors[0].getTags()['language'] == 'fra'
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set language tag 'fra'"
|
||||
assert ('title' in resortedTrackDescriptors[0].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title tag"
|
||||
assert (resortedTrackDescriptors[0].getTags()['title'] == 'Französisch'
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not set title 'Französisch'"
|
||||
|
||||
# source subIndex 1
|
||||
assert (not 'language' in resortedTrackDescriptors[1].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||
assert (not 'title' in resortedTrackDescriptors[1].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
@ -1,97 +0,0 @@
|
||||
import os, sys, importlib, glob, inspect
|
||||
|
||||
from ffx.track_disposition import TrackDisposition
|
||||
from .track_tag_combinator_2 import TrackTagCombinator2
|
||||
|
||||
|
||||
class TrackTagCombinator21(TrackTagCombinator2):
|
||||
|
||||
VARIANT = 'T01'
|
||||
|
||||
|
||||
def __init__(self, context = None,
|
||||
createPresets: bool = False):
|
||||
super().__init__(context)
|
||||
|
||||
self.__createPresets = createPresets
|
||||
|
||||
def getVariant(self):
|
||||
return TrackTagCombinator21.VARIANT
|
||||
|
||||
|
||||
def getPayload(self):
|
||||
|
||||
subTrack0 = {}
|
||||
subTrack1 = ({'language': 'chn', 'title': 'China'}
|
||||
if self.__createPresets else {'language': 'bas', 'title': 'Baskisch'})
|
||||
|
||||
if self.__createPresets:
|
||||
subTrack0['THIS_IS'] = 'track0'
|
||||
subTrack1['THIS_IS'] = 'track1'
|
||||
|
||||
return (subTrack0,
|
||||
subTrack1)
|
||||
|
||||
|
||||
def createAssertFunc(self):
|
||||
|
||||
if self.__createPresets:
|
||||
|
||||
def f(assertObj: dict = {}):
|
||||
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||
|
||||
# source subIndex 0
|
||||
assert (not 'language' in resortedTrackDescriptors[0].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||
assert (not 'title' in resortedTrackDescriptors[0].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||
|
||||
assert ('THIS_IS' in resortedTrackDescriptors[0].getTags().keys() and resortedTrackDescriptors[0].getTags()['THIS_IS'] == 'track0'
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has not preserved tag THIS_IS"
|
||||
|
||||
# source subIndex 1
|
||||
assert ('language' in resortedTrackDescriptors[1].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set language tag"
|
||||
assert (resortedTrackDescriptors[1].getTags()['language'] == 'bas'
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set language tag 'bas'"
|
||||
assert ('title' in resortedTrackDescriptors[1].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set title tag"
|
||||
assert (resortedTrackDescriptors[1].getTags()['title'] == 'Baskisch'
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set title 'Baskisch'"
|
||||
|
||||
assert ('THIS_IS' in resortedTrackDescriptors[1].getTags().keys() and resortedTrackDescriptors[1].getTags()['THIS_IS'] == 'track1'
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not preserved tag THIS_IS"
|
||||
|
||||
else:
|
||||
|
||||
def f(assertObj: dict = {}):
|
||||
|
||||
if not 'tracks' in assertObj.keys():
|
||||
raise KeyError("assertObj does not contain key 'tracks'")
|
||||
resortedTrackDescriptors = sorted(assertObj['tracks'], key=lambda d: d.getSourceIndex())
|
||||
|
||||
# source subIndex 0
|
||||
assert (not 'language' in resortedTrackDescriptors[0].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set language tag"
|
||||
assert (not 'title' in resortedTrackDescriptors[0].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[0].getSourceIndex()} index={resortedTrackDescriptors[0].getIndex()} [{resortedTrackDescriptors[0].getType().label()}:{resortedTrackDescriptors[0].getSubIndex()}] has set title tag"
|
||||
|
||||
# source subIndex 1
|
||||
assert ('language' in resortedTrackDescriptors[1].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set language tag"
|
||||
assert (resortedTrackDescriptors[1].getTags()['language'] == 'bas'
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set language tag 'bas'"
|
||||
assert ('title' in resortedTrackDescriptors[1].getTags().keys()
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set title tag"
|
||||
assert (resortedTrackDescriptors[1].getTags()['title'] == 'Baskisch'
|
||||
), f"Stream src_index={resortedTrackDescriptors[1].getSourceIndex()} index={resortedTrackDescriptors[1].getIndex()} [{resortedTrackDescriptors[1].getType().label()}:{resortedTrackDescriptors[1].getSubIndex()}] has not set title 'Baskisch'"
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def shouldFail(self):
|
||||
return False
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue