aboutsummaryrefslogtreecommitdiff
path: root/video_thread.py
diff options
context:
space:
mode:
authormartin2017-05-22 10:47:55 +0200
committerGitHub2017-05-22 10:47:55 +0200
commite507cf0b6c22a9be880c22dd78723c007de8b73c (patch)
tree2541d0570c43237f8faff97922f687d9d1d06bc4 /video_thread.py
parent01de150f7401a3f2640999298f9b6f1c0c7a101a (diff)
parente77199219521ab819730574c17a819c7e2bfe84d (diff)
Merge pull request #13 from tassaron2/master
Added the ability to use an mp4 as the background image
Diffstat (limited to 'video_thread.py')
-rw-r--r--video_thread.py50
1 files changed, 34 insertions, 16 deletions
diff --git a/video_thread.py b/video_thread.py
index 1d1d44b..bd832be 100644
--- a/video_thread.py
+++ b/video_thread.py
@@ -11,6 +11,7 @@ class Worker(QtCore.QObject):
videoCreated = pyqtSignal()
progressBarUpdate = pyqtSignal(int)
+ progressBarSetText = pyqtSignal(str)
def __init__(self, parent=None):
QtCore.QObject.__init__(self)
@@ -21,18 +22,29 @@ class Worker(QtCore.QObject):
@pyqtSlot(str, str, QtGui.QFont, int, int, int, int, str, str)
def createVideo(self, backgroundImage, titleText, titleFont, fontSize, alignment, xOffset, yOffset, inputFile, outputFile):
# print('worker thread id: {}'.format(QtCore.QThread.currentThreadId()))
-
- imBackground = self.core.drawBaseImage(
- backgroundImage,
- titleText,
- titleFont,
- fontSize,
- alignment,
- xOffset,
- yOffset)
-
- self.progressBarUpdate.emit(0)
-
+ def getBackgroundAtIndex(i):
+ return self.core.drawBaseImage(
+ backgroundFrames[i],
+ titleText,
+ titleFont,
+ fontSize,
+ alignment,
+ xOffset,
+ yOffset)
+
+ progressBarValue = 0
+ self.progressBarUpdate.emit(progressBarValue)
+ self.progressBarSetText.emit('Loading background image…')
+
+ backgroundFrames = self.core.parseBaseImage(backgroundImage)
+ if len(backgroundFrames) < 2:
+ # the base image is not a video so we can draw it now
+ imBackground = getBackgroundAtIndex(0)
+ else:
+ # base images will be drawn while drawing the audio bars
+ imBackground = None
+
+ self.progressBarSetText.emit('Loading audio file…')
completeAudioArray = self.core.readAudioFile(inputFile)
# test if user has libfdk_aac
@@ -71,11 +83,10 @@ class Worker(QtCore.QObject):
smoothConstantDown = 0.08
smoothConstantUp = 0.8
lastSpectrum = None
- progressBarValue = 0
sampleSize = 1470
-
+
numpy.seterr(divide='ignore')
-
+ bgI = 0
for i in range(0, len(completeAudioArray), sampleSize):
# create video for output
lastSpectrum = self.core.transformData(
@@ -85,7 +96,12 @@ class Worker(QtCore.QObject):
smoothConstantDown,
smoothConstantUp,
lastSpectrum)
- im = self.core.drawBars(lastSpectrum, imBackground)
+ if imBackground != None:
+ im = self.core.drawBars(lastSpectrum, imBackground)
+ else:
+ im = self.core.drawBars(lastSpectrum, getBackgroundAtIndex(bgI))
+ if bgI < len(backgroundFrames)-1:
+ bgI += 1
# write to out_pipe
try:
@@ -97,6 +113,7 @@ class Worker(QtCore.QObject):
if progressBarValue + 1 <= (i / len(completeAudioArray)) * 100:
progressBarValue = numpy.floor((i / len(completeAudioArray)) * 100)
self.progressBarUpdate.emit(progressBarValue)
+ self.progressBarSetText.emit('%s%%' % str(int(progressBarValue)))
numpy.seterr(all='print')
@@ -108,4 +125,5 @@ class Worker(QtCore.QObject):
out_pipe.wait()
print("Video file created")
self.progressBarUpdate.emit(100)
+ self.progressBarSetText.emit('100%')
self.videoCreated.emit()