From 009656a61d566e2a344e55f3dd718e8fba28748d Mon Sep 17 00:00:00 2001 From: tassaron Date: Thu, 5 May 2022 19:33:37 -0400 Subject: remove Python threads from video export process --- src/video_thread.py | 161 ++++++++++++++++++---------------------------------- 1 file changed, 55 insertions(+), 106 deletions(-) (limited to 'src/video_thread.py') diff --git a/src/video_thread.py b/src/video_thread.py index 894a870..8a19e2b 100644 --- a/src/video_thread.py +++ b/src/video_thread.py @@ -13,8 +13,6 @@ import numpy import subprocess as sp import sys import os -from queue import Queue, PriorityQueue -from threading import Thread, Event import time import signal import logging @@ -46,7 +44,6 @@ class Worker(QtCore.QObject): parent.createVideo.connect(self.createVideo) self.previewEnabled = type(parent.core).previewEnabled - #self.parent = parent self.components = components self.outputFile = outputFile self.inputFile = inputFile @@ -55,9 +52,8 @@ class Worker(QtCore.QObject): self.sampleSize = 1470 # 44100 / 30 = 1470 self.canceled = False self.error = False - self.stopped = False - def renderNode(self): + def renderFrame(self, audioI): ''' Grabs audio data indices at frames to export, from compositeQueue. Sends it to the components' frameRender methods in layer order @@ -68,52 +64,39 @@ class Worker(QtCore.QObject): self.closePipe() self.cancelExport() self.error = True - msg = 'A render node failed critically.' + msg = 'A call to renderFrame in the video thread failed critically.' log.critical(msg) comp._error.emit(msg, str(e)) - while not self.stopped: - audioI = self.compositeQueue.get() - bgI = int(audioI / self.sampleSize) - frame = None - for layerNo, comp in enumerate(reversed((self.components))): - try: - if layerNo in self.staticComponents: - if self.staticComponents[layerNo] is None: - # this layer was merged into a following layer - continue - # static component - if frame is None: # bottom-most layer - frame = self.staticComponents[layerNo] - else: - frame = Image.alpha_composite( - frame, self.staticComponents[layerNo] - ) - + bgI = int(audioI / self.sampleSize) + frame = None + for layerNo, comp in enumerate(reversed((self.components))): + if self.canceled: + break + try: + if layerNo in self.staticComponents: + if self.staticComponents[layerNo] is None: + # this layer was merged into a following layer + continue + # static component + if frame is None: # bottom-most layer + frame = self.staticComponents[layerNo] else: - # animated component - if frame is None: # bottom-most layer - frame = comp.frameRender(bgI) - else: - frame = Image.alpha_composite( - frame, comp.frameRender(bgI) - ) - except Exception as e: - err() - - self.renderQueue.put([audioI, frame]) - self.compositeQueue.task_done() - - def renderDispatch(self): - ''' - Places audio data indices in the compositeQueue, to be used - by a renderNode later. All indices are multiples of self.sampleSize - sampleSize * frameNo = audioI, AKA audio data starting at frameNo - ''' - log.debug('Dispatching Frames for Compositing...') - - for audioI in range(0, self.audioArrayLen, self.sampleSize): - self.compositeQueue.put(audioI) + frame = Image.alpha_composite( + frame, self.staticComponents[layerNo] + ) + + else: + # animated component + if frame is None: # bottom-most layer + frame = comp.frameRender(bgI) + else: + frame = Image.alpha_composite( + frame, comp.frameRender(bgI) + ) + except Exception as e: + err() + return frame def showPreview(self, frame): ''' @@ -138,12 +121,9 @@ class Worker(QtCore.QObject): self.width = int(self.settings.value('outputWidth')) self.height = int(self.settings.value('outputHeight')) - self.compositeQueue = Queue() - self.compositeQueue.maxsize = 20 - self.renderQueue = PriorityQueue() - self.renderQueue.maxsize = 20 - + # set Core.canceled to False and call .reset() on each component self.reset() + # initialize progress bar progressBarValue = 0 self.progressBarUpdate.emit(progressBarValue) @@ -285,65 +265,36 @@ class Worker(QtCore.QObject): # START CREATING THE VIDEO # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~ - # Make 2 or 3 renderNodes in new threads to create the frames - self.renderThreads = [] - try: - numCpus = len(os.sched_getaffinity(0)) - except Exception: - numCpus = os.cpu_count() - - for i in range(2 if numCpus <= 2 else 3): - self.renderThreads.append( - Thread(target=self.renderNode, name="Render Thread")) - self.renderThreads[i].daemon = True - self.renderThreads[i].start() - - self.dispatchThread = Thread( - target=self.renderDispatch, name="Render Dispatch Thread") - self.dispatchThread.daemon = True - self.dispatchThread.start() - # Last time preview was drawn self.lastPreview = time.time() # Begin piping into ffmpeg! - frameBuffer = { - # audioI: bytes ready to be piped - } progressBarValue = 0 self.progressBarUpdate.emit(progressBarValue) self.progressBarSetText.emit("Exporting video...") - if not self.canceled: - for audioI in range( - 0, self.audioArrayLen, self.sampleSize): - while True: - if audioI in frameBuffer or self.canceled: - # if frame's in buffer, pipe it to ffmpeg - break - # else fetch the next frame & add to the buffer - audioI_, frame = self.renderQueue.get() - frameBuffer[audioI_] = frame - self.renderQueue.task_done() - if self.canceled: - break - - # Update live preview - if self.previewEnabled and time.time() - self.lastPreview > 0.5: - self.showPreview(frameBuffer[audioI]) - - try: - self.out_pipe.stdin.write(frameBuffer[audioI].tobytes()) - except Exception: - break - - # increase progress bar value - completion = (audioI / self.audioArrayLen) * 100 - if progressBarValue + 1 <= completion: - progressBarValue = numpy.floor(completion).astype(int) - self.progressBarUpdate.emit(progressBarValue) - self.progressBarSetText.emit( - "Exporting video: %s%%" % str(int(progressBarValue)) - ) + for audioI in range(0, self.audioArrayLen, self.sampleSize): + if self.canceled: + break + # fetch the next frame & add to the FFmpeg pipe + frame = self.renderFrame(audioI) + + # Update live preview + if self.previewEnabled and time.time() - self.lastPreview > 0.5: + self.showPreview(frame) + + try: + self.out_pipe.stdin.write(frame.tobytes()) + except Exception: + break + + # increase progress bar value + completion = (audioI / self.audioArrayLen) * 100 + if progressBarValue + 1 <= completion: + progressBarValue = numpy.floor(completion).astype(int) + self.progressBarUpdate.emit(progressBarValue) + self.progressBarSetText.emit( + "Exporting video: %s%%" % str(int(progressBarValue)) + ) numpy.seterr(all='print') @@ -371,7 +322,6 @@ class Worker(QtCore.QObject): self.error = False self.canceled = False - self.stopped = True self.encoding.emit(False) self.videoCreated.emit() @@ -401,7 +351,6 @@ class Worker(QtCore.QObject): def cancel(self): self.canceled = True - self.stopped = True self.core.cancel() for comp in self.components: -- cgit v1.2.3 From 47e63842bcf8ee65c2bb2caa8d4f5252f8e42204 Mon Sep 17 00:00:00 2001 From: tassaron Date: Thu, 5 May 2022 20:18:08 -0400 Subject: show every frame of preview during export --- src/video_thread.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'src/video_thread.py') diff --git a/src/video_thread.py b/src/video_thread.py index 8a19e2b..81e517f 100644 --- a/src/video_thread.py +++ b/src/video_thread.py @@ -108,7 +108,6 @@ class Worker(QtCore.QObject): # or else Qt will garbage-collect it on the C++ side self.latestPreview = ImageQt(frame) self.imageCreated.emit(QtGui.QImage(self.latestPreview)) - self.lastPreview = time.time() @pyqtSlot() def createVideo(self): @@ -265,9 +264,6 @@ class Worker(QtCore.QObject): # START CREATING THE VIDEO # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~ - # Last time preview was drawn - self.lastPreview = time.time() - # Begin piping into ffmpeg! progressBarValue = 0 self.progressBarUpdate.emit(progressBarValue) @@ -279,7 +275,7 @@ class Worker(QtCore.QObject): frame = self.renderFrame(audioI) # Update live preview - if self.previewEnabled and time.time() - self.lastPreview > 0.5: + if self.previewEnabled: self.showPreview(frame) try: -- cgit v1.2.3 From 3041fcac0ed74bef039ef7ccade786ff969f5154 Mon Sep 17 00:00:00 2001 From: tassaron Date: Thu, 5 May 2022 20:29:13 -0400 Subject: move "determine audio duration" into its own method --- src/video_thread.py | 48 +++++++++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 21 deletions(-) (limited to 'src/video_thread.py') diff --git a/src/video_thread.py b/src/video_thread.py index 81e517f..850f999 100644 --- a/src/video_thread.py +++ b/src/video_thread.py @@ -53,6 +53,29 @@ class Worker(QtCore.QObject): self.canceled = False self.error = False + def determineAudioLength(self): + '''Returns longest audio length of loaded components, or False if failure occurs''' + if any([ + True if 'pcm' in comp.properties() else False + for comp in self.components + ]): + self.progressBarSetText.emit("Loading audio file...") + audioFileTraits = readAudioFile( + self.inputFile, self + ) + if audioFileTraits is None: + self.cancelExport() + return False + self.completeAudioArray, duration = audioFileTraits + self.audioArrayLen = len(self.completeAudioArray) + else: + duration = getAudioDuration(self.inputFile) + self.completeAudioArray = [] + self.audioArrayLen = int( + ((duration * self.hertz) + + self.hertz) - self.sampleSize) + return duration + def renderFrame(self, audioI): ''' Grabs audio data indices at frames to export, from compositeQueue. @@ -130,25 +153,9 @@ class Worker(QtCore.QObject): # READ AUDIO, INITIALIZE COMPONENTS, OPEN A PIPE TO FFMPEG # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~ log.debug("Determining length of audio...") - if any([ - True if 'pcm' in comp.properties() else False - for comp in self.components - ]): - self.progressBarSetText.emit("Loading audio file...") - audioFileTraits = readAudioFile( - self.inputFile, self - ) - if audioFileTraits is None: - self.cancelExport() - return - self.completeAudioArray, duration = audioFileTraits - self.audioArrayLen = len(self.completeAudioArray) - else: - duration = getAudioDuration(self.inputFile) - self.completeAudioArray = [] - self.audioArrayLen = int( - ((duration * self.hertz) + - self.hertz) - self.sampleSize) + duration = self.determineAudioLength() + if not duration: + return self.progressBarUpdate.emit(0) self.progressBarSetText.emit("Starting components...") @@ -263,10 +270,9 @@ class Worker(QtCore.QObject): # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~ # START CREATING THE VIDEO # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~ - - # Begin piping into ffmpeg! progressBarValue = 0 self.progressBarUpdate.emit(progressBarValue) + # Begin piping into ffmpeg! self.progressBarSetText.emit("Exporting video...") for audioI in range(0, self.audioArrayLen, self.sampleSize): if self.canceled: -- cgit v1.2.3 From 5cc3738ec71815f48bc498ee94c88ba04ae4606d Mon Sep 17 00:00:00 2001 From: tassaron Date: Thu, 5 May 2022 20:53:52 -0400 Subject: move more createVideo work into its own methods rename renderFrame to frameRender for consistency with components --- src/video_thread.py | 215 ++++++++++++++++++++++++++++------------------------ 1 file changed, 116 insertions(+), 99 deletions(-) (limited to 'src/video_thread.py') diff --git a/src/video_thread.py b/src/video_thread.py index 850f999..47afe35 100644 --- a/src/video_thread.py +++ b/src/video_thread.py @@ -53,6 +53,25 @@ class Worker(QtCore.QObject): self.canceled = False self.error = False + def createFfmpegCommand(self, duration): + try: + ffmpegCommand = createFfmpegCommand( + self.inputFile, self.outputFile, self.components, duration + ) + except sp.CalledProcessError as e: + #FIXME video_thread should own this error signal, not components + self.components[0]._error.emit("Ffmpeg could not be found. Is it installed?", str(e)) + self.error = True + return + + if not ffmpegCommand: + #FIXME video_thread should own this error signal, not components + self.components[0]._error.emit("The FFmpeg command could not be generated.", "") + log.critical("Cancelling render process due to failure while generating the ffmpeg command.") + self.failExport() + return + return ffmpegCommand + def determineAudioLength(self): '''Returns longest audio length of loaded components, or False if failure occurs''' if any([ @@ -76,7 +95,90 @@ class Worker(QtCore.QObject): self.hertz) - self.sampleSize) return duration - def renderFrame(self, audioI): + def preFrameRender(self): + self.staticComponents = {} + + # Call preFrameRender on each component + canceledByComponent = False + initText = ", ".join([ + "%s) %s" % (num, str(component)) + for num, component in enumerate(reversed(self.components)) + ]) + print('Loaded Components:', initText) + log.info('Calling preFrameRender for %s', initText) + for compNo, comp in enumerate(reversed(self.components)): + try: + comp.preFrameRender( + audioFile=self.inputFile, + completeAudioArray=self.completeAudioArray, + audioArrayLen=self.audioArrayLen, + sampleSize=self.sampleSize, + progressBarUpdate=self.progressBarUpdate, + progressBarSetText=self.progressBarSetText + ) + except ComponentError: + log.warning( + '#%s %s encountered an error in its preFrameRender method', + compNo, + comp + ) + + compProps = comp.properties() + if 'error' in compProps or comp._lockedError is not None: + self.cancel() + self.canceled = True + canceledByComponent = True + compError = comp.error() \ + if type(comp.error()) is tuple else (comp.error(), '') + errMsg = ( + "Component #%s (%s) encountered an error!" % ( + str(compNo), comp.name + ) + if comp.error() is None else + 'Export cancelled by component #%s (%s): %s' % ( + str(compNo), + comp.name, + compError[0] + ) + ) + log.error(errMsg) + comp._error.emit(errMsg, compError[1]) + break + if 'static' in compProps: + log.info('Saving static frame from #%s %s', compNo, comp) + self.staticComponents[compNo] = \ + comp.frameRender(0).copy() + + # Check if any errors occured + log.debug("Checking if a component wishes to cancel the export...") + if self.canceled: + if canceledByComponent: + log.error( + 'Export cancelled by component #%s (%s): %s', + compNo, + comp.name, + 'No message.' if comp.error() is None else ( + comp.error() if type(comp.error()) is str + else comp.error()[0] + ) + ) + self.cancelExport() + + # Merge static frames that can be merged to reduce workload + def mergeConsecutiveStaticComponentFrames(self): + log.info("Merging consecutive static component frames") + for compNo in range(len(self.components)): + if compNo not in self.staticComponents \ + or compNo + 1 not in self.staticComponents: + continue + self.staticComponents[compNo + 1] = Image.alpha_composite( + self.staticComponents.pop(compNo), + self.staticComponents[compNo + 1] + ) + self.staticComponents[compNo] = None + mergeConsecutiveStaticComponentFrames(self) + + def frameRender(self, audioI): ''' Grabs audio data indices at frames to export, from compositeQueue. Sends it to the components' frameRender methods in layer order @@ -143,128 +245,43 @@ class Worker(QtCore.QObject): self.width = int(self.settings.value('outputWidth')) self.height = int(self.settings.value('outputHeight')) - # set Core.canceled to False and call .reset() on each component + # Set core.Core.canceled to False and call .reset() on each component self.reset() - # initialize progress bar + # Initialize progress bar to 0 progressBarValue = 0 self.progressBarUpdate.emit(progressBarValue) - # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~ - # READ AUDIO, INITIALIZE COMPONENTS, OPEN A PIPE TO FFMPEG - # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~ + # Determine longest length of audio which will be the final video's duration log.debug("Determining length of audio...") duration = self.determineAudioLength() if not duration: return + # Call preFrameRender on each component to perform initialization self.progressBarUpdate.emit(0) self.progressBarSetText.emit("Starting components...") - canceledByComponent = False - initText = ", ".join([ - "%s) %s" % (num, str(component)) - for num, component in enumerate(reversed(self.components)) - ]) - print('Loaded Components:', initText) - log.info('Calling preFrameRender for %s', initText) - self.staticComponents = {} - for compNo, comp in enumerate(reversed(self.components)): - try: - comp.preFrameRender( - audioFile=self.inputFile, - completeAudioArray=self.completeAudioArray, - audioArrayLen=self.audioArrayLen, - sampleSize=self.sampleSize, - progressBarUpdate=self.progressBarUpdate, - progressBarSetText=self.progressBarSetText - ) - except ComponentError: - log.warning( - '#%s %s encountered an error in its preFrameRender method', - compNo, - comp - ) - - compProps = comp.properties() - if 'error' in compProps or comp._lockedError is not None: - self.cancel() - self.canceled = True - canceledByComponent = True - compError = comp.error() \ - if type(comp.error()) is tuple else (comp.error(), '') - errMsg = ( - "Component #%s (%s) encountered an error!" % ( - str(compNo), comp.name - ) - if comp.error() is None else - 'Export cancelled by component #%s (%s): %s' % ( - str(compNo), - comp.name, - compError[0] - ) - ) - log.error(errMsg) - comp._error.emit(errMsg, compError[1]) - break - if 'static' in compProps: - log.info('Saving static frame from #%s %s', compNo, comp) - self.staticComponents[compNo] = \ - comp.frameRender(0).copy() - - log.debug("Checking if a component wishes to cancel the export...") + self.preFrameRender() if self.canceled: - if canceledByComponent: - log.error( - 'Export cancelled by component #%s (%s): %s', - compNo, - comp.name, - 'No message.' if comp.error() is None else ( - comp.error() if type(comp.error()) is str - else comp.error()[0] - ) - ) - self.cancelExport() return - log.info("Merging consecutive static component frames") - for compNo in range(len(self.components)): - if compNo not in self.staticComponents \ - or compNo + 1 not in self.staticComponents: - continue - self.staticComponents[compNo + 1] = Image.alpha_composite( - self.staticComponents.pop(compNo), - self.staticComponents[compNo + 1] - ) - self.staticComponents[compNo] = None - - try: - ffmpegCommand = createFfmpegCommand( - self.inputFile, self.outputFile, self.components, duration - ) - except sp.CalledProcessError as e: - #FIXME video_thread should own this error signal, not components - self.components[0]._error.emit("Ffmpeg could not be found. Is it installed?", str(e)) - self.error = True + # Create FFmpeg command + ffmpegCommand = self.createFfmpegCommand(duration) + if not ffmpegCommand: return - cmd = " ".join(ffmpegCommand) print('###### FFMPEG COMMAND ######\n%s' % cmd) print('############################') - if not cmd: - #FIXME video_thread should own this error signal, not components - self.components[0]._error.emit("The ffmpeg command could not be generated.", "") - log.critical("Cancelling render process due to failure while generating the ffmpeg command.") - self.failExport() - return - - log.info('Opening pipe to ffmpeg') log.info(cmd) + + # Open pipe to FFmpeg + log.info('Opening pipe to FFmpeg') try: self.out_pipe = openPipe( ffmpegCommand, stdin=sp.PIPE, stdout=sys.stdout, stderr=sys.stdout ) except sp.CalledProcessError: - log.critical('Ffmpeg pipe couldn\'t be created!', exc_info=True) + log.critical("Out_Pipe to FFmpeg couldn't be created!", exc_info=True) raise # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~ @@ -278,7 +295,7 @@ class Worker(QtCore.QObject): if self.canceled: break # fetch the next frame & add to the FFmpeg pipe - frame = self.renderFrame(audioI) + frame = self.frameRender(audioI) # Update live preview if self.previewEnabled: -- cgit v1.2.3 From 8f169eef453a7c19ed6b785aeae5e1c0780cc00f Mon Sep 17 00:00:00 2001 From: tassaron Date: Thu, 5 May 2022 21:14:10 -0400 Subject: docstrings --- src/video_thread.py | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) (limited to 'src/video_thread.py') diff --git a/src/video_thread.py b/src/video_thread.py index 47afe35..5123f9a 100644 --- a/src/video_thread.py +++ b/src/video_thread.py @@ -73,7 +73,9 @@ class Worker(QtCore.QObject): return ffmpegCommand def determineAudioLength(self): - '''Returns longest audio length of loaded components, or False if failure occurs''' + ''' + Returns audio length which determines length of final video, or False if failure occurs + ''' if any([ True if 'pcm' in comp.properties() else False for comp in self.components @@ -96,6 +98,10 @@ class Worker(QtCore.QObject): return duration def preFrameRender(self): + ''' + Initializes components that need to pre-compute stuff. + Also prerenders "static" components like text and merges them if possible + ''' self.staticComponents = {} # Call preFrameRender on each component @@ -180,10 +186,8 @@ class Worker(QtCore.QObject): def frameRender(self, audioI): ''' - Grabs audio data indices at frames to export, from compositeQueue. - Sends it to the components' frameRender methods in layer order - to create subframes & composite them into the final frame. - The resulting frames are collected in the renderQueue + Renders a frame composited together from the framse returned by each component + audioI is a multiple of self.sampleSize, which can be divided to determine frameNo ''' def err(): self.closePipe() @@ -225,9 +229,8 @@ class Worker(QtCore.QObject): def showPreview(self, frame): ''' - Receives a final frame that will be piped to FFmpeg, - adds it to the checkerboard and emits a final QImage - to the MainWindow for the live preview + Receives a final frame that will be piped to FFmpeg, + adds it to the MainWindow for the live preview ''' # We must store a reference to this QImage # or else Qt will garbage-collect it on the C++ side @@ -236,6 +239,16 @@ class Worker(QtCore.QObject): @pyqtSlot() def createVideo(self): + ''' + 1. Numpy is set to ignore division errors during this method + 2. Determine length of final video + 3. Call preFrameRender on each component + 4. Create the main FFmpeg command + 5. Open the out_pipe to FFmpeg process + 6. Iterate over the audio data array and call frameRender on the components to get frames + 7. Close the out_pipe + 8. Call postFrameRender on each component + ''' log.debug("Video worker received signal to createVideo") log.debug( 'Video thread id: {}'.format(int(QtCore.QThread.currentThreadId()))) @@ -315,6 +328,9 @@ class Worker(QtCore.QObject): "Exporting video: %s%%" % str(int(progressBarValue)) ) + # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~ + # Finished creating the video! + # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~ numpy.seterr(all='print') -- cgit v1.2.3