aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authortassaron2017-07-09 14:31:19 -0400
committertassaron2017-07-09 14:31:19 -0400
commitf6fbc8d2423ac5ae683a7613b53648db3e02e323 (patch)
tree218c5a056349f9eb1cc0b7952a09b254f1962b95 /src
parent94d4acc1f4f4abe4029e8f9c050932b67cae8cec (diff)
a basic Sound component for mixing sounds
to be greatly expanded...
Diffstat (limited to 'src')
-rw-r--r--src/component.py20
-rw-r--r--src/components/image.py3
-rw-r--r--src/components/sound.py74
-rw-r--r--src/components/sound.ui122
-rw-r--r--src/core.py5
-rw-r--r--src/frame.py2
-rw-r--r--src/mainwindow.py2
-rw-r--r--src/preview_thread.py9
-rw-r--r--src/video_thread.py169
9 files changed, 325 insertions, 81 deletions
diff --git a/src/component.py b/src/component.py
index 648a6d6..306072c 100644
--- a/src/component.py
+++ b/src/component.py
@@ -48,14 +48,18 @@ class Component(QtCore.QObject):
if presetName is not None else presetDict['preset']
def preFrameRender(self, **kwargs):
- ''' Triggered only before a video is exported (video_thread.py)
+ '''
+ Triggered only before a video is exported (video_thread.py)
self.worker = the video thread worker
self.completeAudioArray = a list of audio samples
self.sampleSize = number of audio samples per video frame
self.progressBarUpdate = signal to set progress bar number
self.progressBarSetText = signal to set progress bar text
- Use the latter two signals to update the MainWindow if needed
+ Use the latter two signals to update the MainWindow if needed
for a long initialization procedure (i.e., for a visualizer)
+
+ Return a list of properties to signify if your component is
+ non-animated ('static') or returns sound ('audio').
'''
for var, value in kwargs.items():
exec('self.%s = value' % var)
@@ -135,8 +139,8 @@ class Component(QtCore.QObject):
return page
def update(self):
- super().update()
self.parent.drawPreview()
+ super().update()
def previewRender(self, previewWorker):
width = int(previewWorker.core.settings.value('outputWidth'))
@@ -153,9 +157,17 @@ class Component(QtCore.QObject):
image = BlankFrame(width, height)
return image
+ def audio(self):
+ \'''
+ Return audio to mix into master as a string (path to audio file),
+ or an object that returns raw audio data [future feature].
+ \'''
+
@classmethod
def names(cls):
- # Alternative names for renaming a component between project files
+ \'''
+ Alternative names for renaming a component between project files.
+ \'''
return []
'''
diff --git a/src/components/image.py b/src/components/image.py
index 6edd893..55fa6dd 100644
--- a/src/components/image.py
+++ b/src/components/image.py
@@ -42,7 +42,6 @@ class Component(Component):
super().update()
def previewRender(self, previewWorker):
- self.imageFormats = previewWorker.core.imageFormats
width = int(previewWorker.core.settings.value('outputWidth'))
height = int(previewWorker.core.settings.value('outputHeight'))
return self.drawFrame(width, height)
@@ -110,7 +109,7 @@ class Component(Component):
imgDir = self.settings.value("componentDir", os.path.expanduser("~"))
filename, _ = QtWidgets.QFileDialog.getOpenFileName(
self.page, "Choose Image", imgDir,
- "Image Files (%s)" % " ".join(self.imageFormats))
+ "Image Files (%s)" % " ".join(self.core.imageFormats))
if filename:
self.settings.setValue("componentDir", os.path.dirname(filename))
self.page.lineEdit_image.setText(filename)
diff --git a/src/components/sound.py b/src/components/sound.py
new file mode 100644
index 0000000..d3589b3
--- /dev/null
+++ b/src/components/sound.py
@@ -0,0 +1,74 @@
+from PyQt5 import QtGui, QtCore, QtWidgets
+import os
+
+from component import Component
+from frame import BlankFrame
+
+
+class Component(Component):
+ '''Sound'''
+
+ modified = QtCore.pyqtSignal(int, dict)
+
+ def widget(self, parent):
+ self.parent = parent
+ self.settings = parent.settings
+ page = self.loadUi('sound.ui')
+
+ page.lineEdit_sound.textChanged.connect(self.update)
+ page.pushButton_sound.clicked.connect(self.pickSound)
+
+ self.page = page
+ return page
+
+ def update(self):
+ self.sound = self.page.lineEdit_sound.text()
+ super().update()
+
+ def previewRender(self, previewWorker):
+ width = int(previewWorker.core.settings.value('outputWidth'))
+ height = int(previewWorker.core.settings.value('outputHeight'))
+ return self.frameRender(self.compPos, 0)
+
+ def preFrameRender(self, **kwargs):
+ # super().preFrameRender(**kwargs)
+ return ['static', 'audio']
+
+ def audio(self):
+ return self.sound
+
+ def pickSound(self):
+ sndDir = self.settings.value("componentDir", os.path.expanduser("~"))
+ filename, _ = QtWidgets.QFileDialog.getOpenFileName(
+ self.page, "Choose Sound", sndDir,
+ "Audio Files (%s)" % " ".join(self.core.audioFormats))
+ if filename:
+ self.settings.setValue("componentDir", os.path.dirname(filename))
+ self.page.lineEdit_sound.setText(filename)
+ self.update()
+
+ def frameRender(self, layerNo, frameNo):
+ width = int(self.core.settings.value('outputWidth'))
+ height = int(self.core.settings.value('outputHeight'))
+ return BlankFrame(width, height)
+
+ def loadPreset(self, pr, presetName=None):
+ super().loadPreset(pr, presetName)
+ self.page.lineEdit_sound.setText(pr['sound'])
+
+ def savePreset(self):
+ return {
+ 'preset': self.currentPreset,
+ 'sound': self.sound,
+ }
+
+ def commandHelp(self):
+ print('Path to audio file:\n path=/filepath/to/sound.ogg')
+
+ def command(self, arg):
+ if not arg.startswith('preset=') and '=' in arg:
+ key, arg = arg.split('=', 1)
+ if key == 'path':
+ self.page.lineEdit_sound.setText(arg)
+ return
+ super().command(arg)
diff --git a/src/components/sound.ui b/src/components/sound.ui
new file mode 100644
index 0000000..5fc00c1
--- /dev/null
+++ b/src/components/sound.ui
@@ -0,0 +1,122 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>Form</class>
+ <widget class="QWidget" name="Form">
+ <property name="geometry">
+ <rect>
+ <x>0</x>
+ <y>0</y>
+ <width>586</width>
+ <height>197</height>
+ </rect>
+ </property>
+ <property name="windowTitle">
+ <string>Form</string>
+ </property>
+ <layout class="QVBoxLayout" name="verticalLayout_2">
+ <item>
+ <layout class="QVBoxLayout" name="verticalLayout">
+ <property name="leftMargin">
+ <number>4</number>
+ </property>
+ <item>
+ <layout class="QHBoxLayout" name="horizontalLayout_8">
+ <item>
+ <widget class="QLabel" name="label_textColor">
+ <property name="sizePolicy">
+ <sizepolicy hsizetype="Fixed" vsizetype="Preferred">
+ <horstretch>0</horstretch>
+ <verstretch>0</verstretch>
+ </sizepolicy>
+ </property>
+ <property name="minimumSize">
+ <size>
+ <width>31</width>
+ <height>0</height>
+ </size>
+ </property>
+ <property name="text">
+ <string>Audio File</string>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QLineEdit" name="lineEdit_sound">
+ <property name="minimumSize">
+ <size>
+ <width>1</width>
+ <height>0</height>
+ </size>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QPushButton" name="pushButton_sound">
+ <property name="sizePolicy">
+ <sizepolicy hsizetype="Fixed" vsizetype="Fixed">
+ <horstretch>0</horstretch>
+ <verstretch>0</verstretch>
+ </sizepolicy>
+ </property>
+ <property name="minimumSize">
+ <size>
+ <width>1</width>
+ <height>0</height>
+ </size>
+ </property>
+ <property name="maximumSize">
+ <size>
+ <width>32</width>
+ <height>32</height>
+ </size>
+ </property>
+ <property name="text">
+ <string>...</string>
+ </property>
+ <property name="MaximumSize" stdset="0">
+ <size>
+ <width>32</width>
+ <height>32</height>
+ </size>
+ </property>
+ </widget>
+ </item>
+ </layout>
+ </item>
+ </layout>
+ </item>
+ <item>
+ <layout class="QHBoxLayout" name="horizontalLayout_2">
+ <item>
+ <spacer name="horizontalSpacer_2">
+ <property name="orientation">
+ <enum>Qt::Horizontal</enum>
+ </property>
+ <property name="sizeHint" stdset="0">
+ <size>
+ <width>40</width>
+ <height>20</height>
+ </size>
+ </property>
+ </spacer>
+ </item>
+ </layout>
+ </item>
+ <item>
+ <spacer name="verticalSpacer">
+ <property name="orientation">
+ <enum>Qt::Vertical</enum>
+ </property>
+ <property name="sizeHint" stdset="0">
+ <size>
+ <width>20</width>
+ <height>40</height>
+ </size>
+ </property>
+ </spacer>
+ </item>
+ </layout>
+ </widget>
+ <resources/>
+ <connections/>
+</ui>
diff --git a/src/core.py b/src/core.py
index 9792e88..db430d1 100644
--- a/src/core.py
+++ b/src/core.py
@@ -485,7 +485,8 @@ class Core:
'-ac', '1', # mono (set to '2' for stereo)
'-']
in_pipe = toolkit.openPipe(
- command, stdout=sp.PIPE, stderr=sp.DEVNULL, bufsize=10**8)
+ command, stdout=sp.PIPE, stderr=sp.DEVNULL, bufsize=10**8
+ )
completeAudioArray = numpy.empty(0, dtype="int16")
@@ -495,7 +496,7 @@ class Core:
if self.canceled:
break
# read 2 seconds of audio
- progress = progress + 4
+ progress += 4
raw_audio = in_pipe.stdout.read(88200*4)
if len(raw_audio) == 0:
break
diff --git a/src/frame.py b/src/frame.py
index 57d33b0..c066cdb 100644
--- a/src/frame.py
+++ b/src/frame.py
@@ -14,7 +14,7 @@ class FramePainter(QtGui.QPainter):
'''
def __init__(self, width, height):
image = BlankFrame(width, height)
- self.image = ImageQt(image)
+ self.image = QtGui.QImage(ImageQt(image))
super().__init__(self.image)
def setPen(self, RgbTuple):
diff --git a/src/mainwindow.py b/src/mainwindow.py
index 165b5bd..3cd45d6 100644
--- a/src/mainwindow.py
+++ b/src/mainwindow.py
@@ -557,9 +557,11 @@ class MainWindow(QtWidgets.QMainWindow):
self.window.progressLabel.setHidden(True)
self.drawPreview(True)
+ @QtCore.pyqtSlot(int)
def progressBarUpdated(self, value):
self.window.progressBar_createVideo.setValue(value)
+ @QtCore.pyqtSlot(str)
def progressBarSetText(self, value):
if sys.platform == 'darwin':
self.window.progressLabel.setText(value)
diff --git a/src/preview_thread.py b/src/preview_thread.py
index 95a26ec..a72845b 100644
--- a/src/preview_thread.py
+++ b/src/preview_thread.py
@@ -69,10 +69,13 @@ class Worker(QtCore.QObject):
str(component),
detail=str(e),
icon='Warning',
- parent=None # mainwindow is in a different thread
+ parent=None # MainWindow is in a different thread
+ )
+ self.imageCreated.emit(
+ QtGui.QImage(ImageQt(
+ FloodFrame(width, height, (0, 0, 0, 0))
+ ))
)
- from frame import BlankFrame
- self.imageCreated.emit(ImageQt(BlankFrame))
self.error.emit()
break
else:
diff --git a/src/video_thread.py b/src/video_thread.py
index e7f1ac7..bd94be3 100644
--- a/src/video_thread.py
+++ b/src/video_thread.py
@@ -19,7 +19,7 @@ import time
import signal
import core
-from toolkit import openPipe
+from toolkit import openPipe, checkOutput
from frame import FloodFrame
@@ -102,32 +102,71 @@ class Worker(QtCore.QObject):
audioI, frame = self.previewQueue.get()
if time.time() - self.lastPreview >= 0.06 or audioI == 0:
image = Image.alpha_composite(background.copy(), frame)
- self.imageCreated.emit(ImageQt(image))
+ self.imageCreated.emit(QtGui.QImage(ImageQt(image)))
self.lastPreview = time.time()
self.previewQueue.task_done()
@pyqtSlot(str, str, list)
def createVideo(self, inputFile, outputFile, components):
+ numpy.seterr(divide='ignore')
self.encoding.emit(True)
self.components = components
self.outputFile = outputFile
-
- self.reset()
-
+ self.extraAudio = []
self.width = int(self.core.settings.value('outputWidth'))
self.height = int(self.core.settings.value('outputHeight'))
+
+ self.compositeQueue = Queue()
+ self.compositeQueue.maxsize = 20
+ self.renderQueue = PriorityQueue()
+ self.renderQueue.maxsize = 20
+ self.previewQueue = PriorityQueue()
+
+ self.reset()
progressBarValue = 0
self.progressBarUpdate.emit(progressBarValue)
- self.progressBarSetText.emit('Loading audio file...')
+ # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~
+ # READ AUDIO AND INITIALIZE COMPONENTS
+ # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~
+
+ self.progressBarSetText.emit("Loading audio file...")
self.completeAudioArray = self.core.readAudioFile(inputFile, self)
- # test if user has libfdk_aac
- encoders = sp.check_output(
- self.core.FFMPEG_BIN + " -encoders -hide_banner",
- shell=True)
+ self.progressBarUpdate.emit(0)
+ self.progressBarSetText.emit("Starting components...")
+ print('Loaded Components:', ", ".join([
+ "%s) %s" % (num, str(component))
+ for num, component in enumerate(reversed(self.components))
+ ]))
+ self.staticComponents = {}
+ numComps = len(self.components)
+ for compNo, comp in enumerate(self.components):
+ properties = None
+ properties = comp.preFrameRender(
+ worker=self,
+ completeAudioArray=self.completeAudioArray,
+ sampleSize=self.sampleSize,
+ progressBarUpdate=self.progressBarUpdate,
+ progressBarSetText=self.progressBarSetText
+ )
+
+ if properties:
+ if 'static' in properties:
+ self.staticComponents[compNo] = \
+ comp.frameRender(compNo, 0).copy()
+ if 'audio' in properties:
+ self.extraAudio.append(comp.audio())
+
+ # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~
+ # DEDUCE ENCODERS
+ # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~
+ # test if user has libfdk_aac
+ encoders = checkOutput(
+ "%s -encoders -hide_banner" % self.core.FFMPEG_BIN, shell=True
+ )
encoders = encoders.decode("utf-8")
acodec = self.core.settings.value('outputAudioCodec')
@@ -157,72 +196,66 @@ class Worker(QtCore.QObject):
aencoder = encoder
break
+ # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~
+ # CREATE PIPE TO FFMPEG
+ # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~
+
ffmpegCommand = [
self.core.FFMPEG_BIN,
'-thread_queue_size', '512',
'-y', # overwrite the output file if it already exists.
+
+ # INPUT VIDEO
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-s', str(self.width)+'x'+str(self.height), # size of one frame
'-pix_fmt', 'rgba',
-
- # frames per second
'-r', self.core.settings.value('outputFrameRate'),
- '-i', '-', # The input comes from a pipe
- '-an',
- '-i', inputFile,
+ '-i', '-', # the video input comes from a pipe
+ '-an', # the video input has no sound
+
+ # INPUT SOUND
+ '-i', inputFile
+ ]
+
+ if self.extraAudio:
+ for extraInputFile in self.extraAudio:
+ ffmpegCommand.extend([
+ '-i', extraInputFile
+ ])
+ ffmpegCommand.extend([
+ '-filter_complex',
+ 'amix=inputs=%s:duration=longest:dropout_transition=3' % str(
+ len(self.extraAudio) + 1
+ )
+ ])
+
+ ffmpegCommand.extend([
+ # OUTPUT
'-vcodec', vencoder,
- '-acodec', aencoder, # output audio codec
+ '-acodec', aencoder,
'-b:v', vbitrate,
'-b:a', abitrate,
'-pix_fmt', self.core.settings.value('outputVideoFormat'),
'-preset', self.core.settings.value('outputPreset'),
'-f', container
- ]
+ ])
+ print(ffmpegCommand)
if acodec == 'aac':
ffmpegCommand.append('-strict')
ffmpegCommand.append('-2')
ffmpegCommand.append(outputFile)
-
- # ### Now start creating video for output ###
- numpy.seterr(divide='ignore')
-
- # Call preFrameRender on all components
- print('Loaded Components:', ", ".join([
- "%s) %s" % (num, str(component))
- for num, component in enumerate(reversed(self.components))
- ]))
- self.staticComponents = {}
- numComps = len(self.components)
- for compNo, comp in enumerate(self.components):
- pStr = "Starting components..."
- self.progressBarSetText.emit(pStr)
- properties = None
- properties = comp.preFrameRender(
- worker=self,
- completeAudioArray=self.completeAudioArray,
- sampleSize=self.sampleSize,
- progressBarUpdate=self.progressBarUpdate,
- progressBarSetText=self.progressBarSetText
- )
-
- if properties and 'static' in properties:
- self.staticComponents[compNo] = \
- comp.frameRender(compNo, 0).copy()
- self.progressBarUpdate.emit(100)
-
- # Create ffmpeg pipe and queues for frames
self.out_pipe = openPipe(
- ffmpegCommand, stdin=sp.PIPE, stdout=sys.stdout, stderr=sys.stdout)
- self.compositeQueue = Queue()
- self.compositeQueue.maxsize = 20
- self.renderQueue = PriorityQueue()
- self.renderQueue.maxsize = 20
- self.previewQueue = PriorityQueue()
+ ffmpegCommand, stdin=sp.PIPE, stdout=sys.stdout, stderr=sys.stdout
+ )
+
+ # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~
+ # START CREATING THE VIDEO
+ # =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~
- # Threads to render frames and send them back here for piping out
+ # Make three renderNodes in new threads to create the frames
self.renderThreads = []
for i in range(3):
self.renderThreads.append(
@@ -235,16 +268,17 @@ class Worker(QtCore.QObject):
self.dispatchThread.daemon = True
self.dispatchThread.start()
+ self.lastPreview = 0.0
self.previewDispatch = Thread(
target=self.previewDispatch, name="Render Dispatch Thread")
self.previewDispatch.daemon = True
self.previewDispatch.start()
+ # Begin piping into ffmpeg!
frameBuffer = {}
- self.lastPreview = 0.0
- self.progressBarUpdate.emit(0)
- pStr = "Exporting video..."
- self.progressBarSetText.emit(pStr)
+ progressBarValue = 0
+ self.progressBarUpdate.emit(progressBarValue)
+ self.progressBarSetText.emit("Exporting video...")
if not self.canceled:
for audioI in range(
0, len(self.completeAudioArray), self.sampleSize):
@@ -253,29 +287,26 @@ class Worker(QtCore.QObject):
# if frame's in buffer, pipe it to ffmpeg
break
# else fetch the next frame & add to the buffer
- data = self.renderQueue.get()
- frameBuffer[data[0]] = data[1]
+ audioI_, frame = self.renderQueue.get()
+ frameBuffer[audioI_] = frame
self.renderQueue.task_done()
if self.canceled:
break
try:
self.out_pipe.stdin.write(frameBuffer[audioI].tobytes())
- self.previewQueue.put([audioI, frameBuffer[audioI]])
- del frameBuffer[audioI]
+ self.previewQueue.put([audioI, frameBuffer.pop(audioI)])
except:
break
# increase progress bar value
- if progressBarValue + 1 <= (
- audioI / len(self.completeAudioArray)
- ) * 100:
- progressBarValue = numpy.floor(
- (i / len(self.completeAudioArray)) * 100)
+ completion = (audioI / len(self.completeAudioArray)) * 100
+ if progressBarValue + 1 <= completion:
+ progressBarValue = numpy.floor(completion)
self.progressBarUpdate.emit(progressBarValue)
- pStr = "Exporting video: " + str(int(progressBarValue)) \
- + "%"
- self.progressBarSetText.emit(pStr)
+ self.progressBarSetText.emit(
+ "Exporting video: %s%%" % str(int(progressBarValue))
+ )
numpy.seterr(all='print')