diff options
| author | tassaron | 2017-05-18 19:14:27 -0400 |
|---|---|---|
| committer | tassaron | 2017-05-18 19:14:27 -0400 |
| commit | ff818836dd2221c544afe1fcc17369b17f90b0db (patch) | |
| tree | b654ca68fb946dc91cb30f4cb96ac0b933ca98ab /video_thread.py | |
| parent | 01de150f7401a3f2640999298f9b6f1c0c7a101a (diff) | |
added ability to use an mp4 as the background
might not be the best way to do this (dumping all the video frames to a temp location), but it works for clips of a few minutes or less
Diffstat (limited to 'video_thread.py')
| -rw-r--r-- | video_thread.py | 39 |
1 files changed, 26 insertions, 13 deletions
diff --git a/video_thread.py b/video_thread.py index 1d1d44b..6bad504 100644 --- a/video_thread.py +++ b/video_thread.py @@ -21,18 +21,26 @@ class Worker(QtCore.QObject): @pyqtSlot(str, str, QtGui.QFont, int, int, int, int, str, str) def createVideo(self, backgroundImage, titleText, titleFont, fontSize, alignment, xOffset, yOffset, inputFile, outputFile): # print('worker thread id: {}'.format(QtCore.QThread.currentThreadId())) - - imBackground = self.core.drawBaseImage( - backgroundImage, - titleText, - titleFont, - fontSize, - alignment, - xOffset, - yOffset) + def getBackgroundAtIndex(i): + return self.core.drawBaseImage( + backgroundFrames[i], + titleText, + titleFont, + fontSize, + alignment, + xOffset, + yOffset) + + backgroundFrames = self.core.parseBaseImage(backgroundImage) + if len(backgroundFrames) < 2: + # the base image is not a video so we can draw it now + imBackground = getBackgroundAtIndex(0) + else: + # base images will be drawn while drawing the audio bars + imBackground = None self.progressBarUpdate.emit(0) - + completeAudioArray = self.core.readAudioFile(inputFile) # test if user has libfdk_aac @@ -64,7 +72,7 @@ class Worker(QtCore.QObject): ffmpegCommand.append('-2') ffmpegCommand.append(outputFile) - + out_pipe = sp.Popen(ffmpegCommand, stdin=sp.PIPE,stdout=sys.stdout, stderr=sys.stdout) @@ -75,7 +83,7 @@ class Worker(QtCore.QObject): sampleSize = 1470 numpy.seterr(divide='ignore') - + bgI = 0 for i in range(0, len(completeAudioArray), sampleSize): # create video for output lastSpectrum = self.core.transformData( @@ -85,7 +93,12 @@ class Worker(QtCore.QObject): smoothConstantDown, smoothConstantUp, lastSpectrum) - im = self.core.drawBars(lastSpectrum, imBackground) + if imBackground != None: + im = self.core.drawBars(lastSpectrum, imBackground) + else: + im = self.core.drawBars(lastSpectrum, getBackgroundAtIndex(bgI)) + if bgI < len(backgroundFrames)-1: + bgI += 1 # write to out_pipe try: |
