diff --git a/bin/.ipynb_checkpoints/ffx-checkpoint.py b/bin/.ipynb_checkpoints/ffx-checkpoint.py index 3401df4..b1849f1 100755 --- a/bin/.ipynb_checkpoints/ffx-checkpoint.py +++ b/bin/.ipynb_checkpoints/ffx-checkpoint.py @@ -2,6 +2,11 @@ import os, sys, subprocess, json, click, time +from textual.app import App, ComposeResult +from textual.screen import Screen +from textual.widgets import Header, Footer, Placeholder + + VERSION='0.1.0' DEFAULT_VIDEO_ENCODER = 'vp9' @@ -46,6 +51,65 @@ STREAM_LAYOUT_STEREO = 'stereo' STREAM_LAYOUT_6CH = '6ch' + +class DashboardScreen(Screen): + + def __init__(self): + super().__init__() + + context = self.app.getContext() + context['dashboard'] = 'dashboard' + + def compose(self) -> ComposeResult: + yield Header(show_clock=True) + yield Placeholder("Dashboard Screen") + yield Footer() + + +class SettingsScreen(Screen): + def __init__(self): + super().__init__() + context = self.app.getContext() + def compose(self) -> ComposeResult: + yield Placeholder("Settings Screen") + yield Footer() + + +class HelpScreen(Screen): + def __init__(self): + super().__init__() + context = self.app.getContext() + def compose(self) -> ComposeResult: + yield Placeholder("Help Screen") + yield Footer() + + +class ModesApp(App): + + BINDINGS = [ + ("d", "switch_mode('dashboard')", "Dashboard"), + ("s", "switch_mode('settings')", "Settings"), + ("h", "switch_mode('help')", "Help"), + ] + + MODES = { + "dashboard": DashboardScreen, + "settings": SettingsScreen, + "help": HelpScreen, + } + + def __init__(self, context = {}): + super().__init__() + self.context = context + + def on_mount(self) -> None: + self.switch_mode("dashboard") + + def getContext(self): + return self.context + + + def executeProcess(commandSequence): process = subprocess.Popen(commandSequence, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -291,7 +355,7 @@ def streams(filename): @click.option("-d", "--denoise", is_flag=True, default=False) -def convert(ctx, paths, label, video_encoder, quality, preset, stereo_bitrate, ac3_bitrate, dts_bitrate, crop, clear_metadata, default_subtitle, default_audio, denoise): +def convert(ctx, paths, label, video_encoder, quality, preset, stereo_bitrate, ac3_bitrate, dts_bitrate, crop, clear_metadata, default_subtitle, forced_audio, default_audio, denoise): """Batch conversion of audiovideo files in format suitable for web playback, e.g. jellyfin Files found under PATHS will be converted according to parameters. @@ -299,144 +363,149 @@ def convert(ctx, paths, label, video_encoder, quality, preset, stereo_bitrate, a Suffices will we appended to filename in case of multiple created files or if the filename has not changed.""" - startTime = time.perf_counter() + #startTime = time.perf_counter() - sourcePath = paths[0] - targetFilename = paths[1] + #sourcePath = paths[0] + #targetFilename = paths[1] - if not os.path.isfile(sourcePath): - raise click.ClickException(f"There is no file with path {sourcePath}") + #if not os.path.isfile(sourcePath): + # raise click.ClickException(f"There is no file with path {sourcePath}") - click.echo(f"src: {sourcePath} tgt: {targetFilename}") + #click.echo(f"src: {sourcePath} tgt: {targetFilename}") - click.echo(f"ve={video_encoder}") + #click.echo(f"ve={video_encoder}") - qualityTokens = quality.split(',') + #qualityTokens = quality.split(',') - q_list = [q for q in qualityTokens if q.isnumeric()] + #q_list = [q for q in qualityTokens if q.isnumeric()] - click.echo(q_list) + #click.echo(q_list) - ctx.obj['bitrates'] = {} - ctx.obj['bitrates']['stereo'] = str(stereo_bitrate) if str(stereo_bitrate).endswith('k') else f"{stereo_bitrate}k" - ctx.obj['bitrates']['ac3'] = str(ac3_bitrate) if str(ac3_bitrate).endswith('k') else f"{ac3_bitrate}k" - ctx.obj['bitrates']['dts'] = str(dts_bitrate) if str(dts_bitrate).endswith('k') else f"{dts_bitrate}k" + #ctx.obj['bitrates'] = {} + #ctx.obj['bitrates']['stereo'] = str(stereo_bitrate) if str(stereo_bitrate).endswith('k') else f"{stereo_bitrate}k" + #ctx.obj['bitrates']['ac3'] = str(ac3_bitrate) if str(ac3_bitrate).endswith('k') else f"{ac3_bitrate}k" + #ctx.obj['bitrates']['dts'] = str(dts_bitrate) if str(dts_bitrate).endswith('k') else f"{dts_bitrate}k" - click.echo(f"a={ctx.obj['bitrates']['stereo']}") - click.echo(f"ac3={ctx.obj['bitrates']['ac3']}") - click.echo(f"dts={ctx.obj['bitrates']['dts']}") + #click.echo(f"a={ctx.obj['bitrates']['stereo']}") + #click.echo(f"ac3={ctx.obj['bitrates']['ac3']}") + #click.echo(f"dts={ctx.obj['bitrates']['dts']}") - performCrop = (crop != 'none') + #performCrop = (crop != 'none') - if performCrop: + #if performCrop: - cropTokens = crop.split(',') + #cropTokens = crop.split(',') - if cropTokens and len(cropTokens) == 2: + #if cropTokens and len(cropTokens) == 2: - cropStart, cropLength = crop.split(',') - else: - cropStart = DEFAULT_CROP_START - cropLength = DEFAULT_CROP_LENGTH + #cropStart, cropLength = crop.split(',') + #else: + #cropStart = DEFAULT_CROP_START + #cropLength = DEFAULT_CROP_LENGTH - click.echo(f"crop start={cropStart} length={cropLength}") + #click.echo(f"crop start={cropStart} length={cropLength}") - click.echo(f"\nRunning {len(q_list)} jobs") + #click.echo(f"\nRunning {len(q_list)} jobs") - streamDescriptor = getStreamDescriptor(sourcePath) + #streamDescriptor = getStreamDescriptor(sourcePath) - commandTokens = COMMAND_TOKENS + [sourcePath] + #commandTokens = COMMAND_TOKENS + [sourcePath] - for q in q_list: + #for q in q_list: - click.echo(f"\nRunning job q={q}") + #click.echo(f"\nRunning job q={q}") - mappingVideoTokens = ['-map', 'v:0'] - mappingTokens = mappingVideoTokens.copy() - audioTokens = [] + #mappingVideoTokens = ['-map', 'v:0'] + #mappingTokens = mappingVideoTokens.copy() + #audioTokens = [] - audioIndex = 0 - for audioStreamDescriptor in streamDescriptor: + #audioIndex = 0 + #for audioStreamDescriptor in streamDescriptor: - if audioStreamDescriptor['type'] == STREAM_TYPE_AUDIO: + #if audioStreamDescriptor['type'] == STREAM_TYPE_AUDIO: - mappingTokens += ['-map', f"a:{audioIndex}"] - audioTokens += generateAudioTokens(ctx.obj, audioIndex, audioStreamDescriptor['layout']) - audioIndex += 1 + #mappingTokens += ['-map', f"a:{audioIndex}"] + #audioTokens += generateAudioTokens(ctx.obj, audioIndex, audioStreamDescriptor['layout']) + #audioIndex += 1 - for s in range(len([d for d in streamDescriptor if d['type'] == STREAM_TYPE_SUBTITLE])): - mappingTokens += ['-map', f"s:{s}"] + #for s in range(len([d for d in streamDescriptor if d['type'] == STREAM_TYPE_SUBTITLE])): + #mappingTokens += ['-map', f"s:{s}"] - if video_encoder == 'av1': + #if video_encoder == 'av1': - commandSequence = commandTokens + mappingTokens + audioTokens + generateAV1Tokens(q, preset) + audioTokens + #commandSequence = commandTokens + mappingTokens + audioTokens + generateAV1Tokens(q, preset) + audioTokens - if clear_metadata: - commandSequence += generateClearTokens(streamDescriptor) + #if clear_metadata: + #commandSequence += generateClearTokens(streamDescriptor) - if performCrop: - commandSequence += generateCropTokens(cropStart, cropLength) + #if performCrop: + #commandSequence += generateCropTokens(cropStart, cropLength) - commandSequence += generateOutputTokens(targetFilename, DEFAULT_FILE_SUFFIX, q) + #commandSequence += generateOutputTokens(targetFilename, DEFAULT_FILE_SUFFIX, q) - click.echo(f"Command: {' '.join(commandSequence)}") + #click.echo(f"Command: {' '.join(commandSequence)}") - executeProcess(commandSequence) + #executeProcess(commandSequence) - if video_encoder == 'vp9': + #if video_encoder == 'vp9': - commandSequence1 = commandTokens + mappingVideoTokens + generateVP9Pass1Tokens(q) + #commandSequence1 = commandTokens + mappingVideoTokens + generateVP9Pass1Tokens(q) - if performCrop: - commandSequence1 += generateCropTokens(cropStart, cropLength) + #if performCrop: + # commandSequence1 += generateCropTokens(cropStart, cropLength) - commandSequence1 += NULL_TOKENS + #commandSequence1 += NULL_TOKENS - click.echo(f"Command 1: {' '.join(commandSequence1)}") + #click.echo(f"Command 1: {' '.join(commandSequence1)}") - if os.path.exists(TEMP_FILE_NAME): - os.remove(TEMP_FILE_NAME) + #if os.path.exists(TEMP_FILE_NAME): + # os.remove(TEMP_FILE_NAME) - executeProcess(commandSequence1) + #executeProcess(commandSequence1) - commandSequence2 = commandTokens + mappingTokens + #commandSequence2 = commandTokens + mappingTokens - if denoise: - commandSequence2 += generateDenoiseTokens() + #if denoise: + # commandSequence2 += generateDenoiseTokens() - commandSequence2 += generateVP9Pass2Tokens(q) + audioTokens + #commandSequence2 += generateVP9Pass2Tokens(q) + audioTokens - if clear_metadata: - commandSequence2 += generateClearTokens(streamDescriptor) + #if clear_metadata: + # commandSequence2 += generateClearTokens(streamDescriptor) - if performCrop: - commandSequence2 += generateCropTokens(cropStart, cropLength) + #if performCrop: + # commandSequence2 += generateCropTokens(cropStart, cropLength) - commandSequence2 += generateOutputTokens(targetFilename, DEFAULT_FILE_SUFFIX, q) + #commandSequence2 += generateOutputTokens(targetFilename, DEFAULT_FILE_SUFFIX, q) - click.echo(f"Command 2: {' '.join(commandSequence2)}") + #click.echo(f"Command 2: {' '.join(commandSequence2)}") - executeProcess(commandSequence2) + #executeProcess(commandSequence2) + + + #click.echo('\nDONE\n') + #endTime = time.perf_counter() + #click.echo(f"Time elapsed {endTime - startTime}") - click.echo('\nDONE\n') + app = ModesApp(ctx.obj) + app.run() - endTime = time.perf_counter() - click.echo(f"Time elapsed {endTime - startTime}") + click.echo(f"app result: {app.getContext()}") diff --git a/bin/ffx.py b/bin/ffx.py index 3401df4..b1849f1 100755 --- a/bin/ffx.py +++ b/bin/ffx.py @@ -2,6 +2,11 @@ import os, sys, subprocess, json, click, time +from textual.app import App, ComposeResult +from textual.screen import Screen +from textual.widgets import Header, Footer, Placeholder + + VERSION='0.1.0' DEFAULT_VIDEO_ENCODER = 'vp9' @@ -46,6 +51,65 @@ STREAM_LAYOUT_STEREO = 'stereo' STREAM_LAYOUT_6CH = '6ch' + +class DashboardScreen(Screen): + + def __init__(self): + super().__init__() + + context = self.app.getContext() + context['dashboard'] = 'dashboard' + + def compose(self) -> ComposeResult: + yield Header(show_clock=True) + yield Placeholder("Dashboard Screen") + yield Footer() + + +class SettingsScreen(Screen): + def __init__(self): + super().__init__() + context = self.app.getContext() + def compose(self) -> ComposeResult: + yield Placeholder("Settings Screen") + yield Footer() + + +class HelpScreen(Screen): + def __init__(self): + super().__init__() + context = self.app.getContext() + def compose(self) -> ComposeResult: + yield Placeholder("Help Screen") + yield Footer() + + +class ModesApp(App): + + BINDINGS = [ + ("d", "switch_mode('dashboard')", "Dashboard"), + ("s", "switch_mode('settings')", "Settings"), + ("h", "switch_mode('help')", "Help"), + ] + + MODES = { + "dashboard": DashboardScreen, + "settings": SettingsScreen, + "help": HelpScreen, + } + + def __init__(self, context = {}): + super().__init__() + self.context = context + + def on_mount(self) -> None: + self.switch_mode("dashboard") + + def getContext(self): + return self.context + + + def executeProcess(commandSequence): process = subprocess.Popen(commandSequence, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -291,7 +355,7 @@ def streams(filename): @click.option("-d", "--denoise", is_flag=True, default=False) -def convert(ctx, paths, label, video_encoder, quality, preset, stereo_bitrate, ac3_bitrate, dts_bitrate, crop, clear_metadata, default_subtitle, default_audio, denoise): +def convert(ctx, paths, label, video_encoder, quality, preset, stereo_bitrate, ac3_bitrate, dts_bitrate, crop, clear_metadata, default_subtitle, forced_audio, default_audio, denoise): """Batch conversion of audiovideo files in format suitable for web playback, e.g. jellyfin Files found under PATHS will be converted according to parameters. @@ -299,144 +363,149 @@ def convert(ctx, paths, label, video_encoder, quality, preset, stereo_bitrate, a Suffices will we appended to filename in case of multiple created files or if the filename has not changed.""" - startTime = time.perf_counter() + #startTime = time.perf_counter() - sourcePath = paths[0] - targetFilename = paths[1] + #sourcePath = paths[0] + #targetFilename = paths[1] - if not os.path.isfile(sourcePath): - raise click.ClickException(f"There is no file with path {sourcePath}") + #if not os.path.isfile(sourcePath): + # raise click.ClickException(f"There is no file with path {sourcePath}") - click.echo(f"src: {sourcePath} tgt: {targetFilename}") + #click.echo(f"src: {sourcePath} tgt: {targetFilename}") - click.echo(f"ve={video_encoder}") + #click.echo(f"ve={video_encoder}") - qualityTokens = quality.split(',') + #qualityTokens = quality.split(',') - q_list = [q for q in qualityTokens if q.isnumeric()] + #q_list = [q for q in qualityTokens if q.isnumeric()] - click.echo(q_list) + #click.echo(q_list) - ctx.obj['bitrates'] = {} - ctx.obj['bitrates']['stereo'] = str(stereo_bitrate) if str(stereo_bitrate).endswith('k') else f"{stereo_bitrate}k" - ctx.obj['bitrates']['ac3'] = str(ac3_bitrate) if str(ac3_bitrate).endswith('k') else f"{ac3_bitrate}k" - ctx.obj['bitrates']['dts'] = str(dts_bitrate) if str(dts_bitrate).endswith('k') else f"{dts_bitrate}k" + #ctx.obj['bitrates'] = {} + #ctx.obj['bitrates']['stereo'] = str(stereo_bitrate) if str(stereo_bitrate).endswith('k') else f"{stereo_bitrate}k" + #ctx.obj['bitrates']['ac3'] = str(ac3_bitrate) if str(ac3_bitrate).endswith('k') else f"{ac3_bitrate}k" + #ctx.obj['bitrates']['dts'] = str(dts_bitrate) if str(dts_bitrate).endswith('k') else f"{dts_bitrate}k" - click.echo(f"a={ctx.obj['bitrates']['stereo']}") - click.echo(f"ac3={ctx.obj['bitrates']['ac3']}") - click.echo(f"dts={ctx.obj['bitrates']['dts']}") + #click.echo(f"a={ctx.obj['bitrates']['stereo']}") + #click.echo(f"ac3={ctx.obj['bitrates']['ac3']}") + #click.echo(f"dts={ctx.obj['bitrates']['dts']}") - performCrop = (crop != 'none') + #performCrop = (crop != 'none') - if performCrop: + #if performCrop: - cropTokens = crop.split(',') + #cropTokens = crop.split(',') - if cropTokens and len(cropTokens) == 2: + #if cropTokens and len(cropTokens) == 2: - cropStart, cropLength = crop.split(',') - else: - cropStart = DEFAULT_CROP_START - cropLength = DEFAULT_CROP_LENGTH + #cropStart, cropLength = crop.split(',') + #else: + #cropStart = DEFAULT_CROP_START + #cropLength = DEFAULT_CROP_LENGTH - click.echo(f"crop start={cropStart} length={cropLength}") + #click.echo(f"crop start={cropStart} length={cropLength}") - click.echo(f"\nRunning {len(q_list)} jobs") + #click.echo(f"\nRunning {len(q_list)} jobs") - streamDescriptor = getStreamDescriptor(sourcePath) + #streamDescriptor = getStreamDescriptor(sourcePath) - commandTokens = COMMAND_TOKENS + [sourcePath] + #commandTokens = COMMAND_TOKENS + [sourcePath] - for q in q_list: + #for q in q_list: - click.echo(f"\nRunning job q={q}") + #click.echo(f"\nRunning job q={q}") - mappingVideoTokens = ['-map', 'v:0'] - mappingTokens = mappingVideoTokens.copy() - audioTokens = [] + #mappingVideoTokens = ['-map', 'v:0'] + #mappingTokens = mappingVideoTokens.copy() + #audioTokens = [] - audioIndex = 0 - for audioStreamDescriptor in streamDescriptor: + #audioIndex = 0 + #for audioStreamDescriptor in streamDescriptor: - if audioStreamDescriptor['type'] == STREAM_TYPE_AUDIO: + #if audioStreamDescriptor['type'] == STREAM_TYPE_AUDIO: - mappingTokens += ['-map', f"a:{audioIndex}"] - audioTokens += generateAudioTokens(ctx.obj, audioIndex, audioStreamDescriptor['layout']) - audioIndex += 1 + #mappingTokens += ['-map', f"a:{audioIndex}"] + #audioTokens += generateAudioTokens(ctx.obj, audioIndex, audioStreamDescriptor['layout']) + #audioIndex += 1 - for s in range(len([d for d in streamDescriptor if d['type'] == STREAM_TYPE_SUBTITLE])): - mappingTokens += ['-map', f"s:{s}"] + #for s in range(len([d for d in streamDescriptor if d['type'] == STREAM_TYPE_SUBTITLE])): + #mappingTokens += ['-map', f"s:{s}"] - if video_encoder == 'av1': + #if video_encoder == 'av1': - commandSequence = commandTokens + mappingTokens + audioTokens + generateAV1Tokens(q, preset) + audioTokens + #commandSequence = commandTokens + mappingTokens + audioTokens + generateAV1Tokens(q, preset) + audioTokens - if clear_metadata: - commandSequence += generateClearTokens(streamDescriptor) + #if clear_metadata: + #commandSequence += generateClearTokens(streamDescriptor) - if performCrop: - commandSequence += generateCropTokens(cropStart, cropLength) + #if performCrop: + #commandSequence += generateCropTokens(cropStart, cropLength) - commandSequence += generateOutputTokens(targetFilename, DEFAULT_FILE_SUFFIX, q) + #commandSequence += generateOutputTokens(targetFilename, DEFAULT_FILE_SUFFIX, q) - click.echo(f"Command: {' '.join(commandSequence)}") + #click.echo(f"Command: {' '.join(commandSequence)}") - executeProcess(commandSequence) + #executeProcess(commandSequence) - if video_encoder == 'vp9': + #if video_encoder == 'vp9': - commandSequence1 = commandTokens + mappingVideoTokens + generateVP9Pass1Tokens(q) + #commandSequence1 = commandTokens + mappingVideoTokens + generateVP9Pass1Tokens(q) - if performCrop: - commandSequence1 += generateCropTokens(cropStart, cropLength) + #if performCrop: + # commandSequence1 += generateCropTokens(cropStart, cropLength) - commandSequence1 += NULL_TOKENS + #commandSequence1 += NULL_TOKENS - click.echo(f"Command 1: {' '.join(commandSequence1)}") + #click.echo(f"Command 1: {' '.join(commandSequence1)}") - if os.path.exists(TEMP_FILE_NAME): - os.remove(TEMP_FILE_NAME) + #if os.path.exists(TEMP_FILE_NAME): + # os.remove(TEMP_FILE_NAME) - executeProcess(commandSequence1) + #executeProcess(commandSequence1) - commandSequence2 = commandTokens + mappingTokens + #commandSequence2 = commandTokens + mappingTokens - if denoise: - commandSequence2 += generateDenoiseTokens() + #if denoise: + # commandSequence2 += generateDenoiseTokens() - commandSequence2 += generateVP9Pass2Tokens(q) + audioTokens + #commandSequence2 += generateVP9Pass2Tokens(q) + audioTokens - if clear_metadata: - commandSequence2 += generateClearTokens(streamDescriptor) + #if clear_metadata: + # commandSequence2 += generateClearTokens(streamDescriptor) - if performCrop: - commandSequence2 += generateCropTokens(cropStart, cropLength) + #if performCrop: + # commandSequence2 += generateCropTokens(cropStart, cropLength) - commandSequence2 += generateOutputTokens(targetFilename, DEFAULT_FILE_SUFFIX, q) + #commandSequence2 += generateOutputTokens(targetFilename, DEFAULT_FILE_SUFFIX, q) - click.echo(f"Command 2: {' '.join(commandSequence2)}") + #click.echo(f"Command 2: {' '.join(commandSequence2)}") - executeProcess(commandSequence2) + #executeProcess(commandSequence2) + + + #click.echo('\nDONE\n') + #endTime = time.perf_counter() + #click.echo(f"Time elapsed {endTime - startTime}") - click.echo('\nDONE\n') + app = ModesApp(ctx.obj) + app.run() - endTime = time.perf_counter() - click.echo(f"Time elapsed {endTime - startTime}") + click.echo(f"app result: {app.getContext()}")