diff options
| author | Brianna | 2017-07-27 22:47:40 -0400 |
|---|---|---|
| committer | GitHub | 2017-07-27 22:47:40 -0400 |
| commit | ae2af28808816d67e964b63bee1b5dbc18073672 (patch) | |
| tree | 93818b30c43761288e0414e620e27871ac32cc8d /src/toolkit | |
| parent | c799305eff66432d1d42ff3a8d1b7fd24448e1b6 (diff) | |
| parent | 6ecb6df23628de65c9efd8cac4810fdf74238c3d (diff) | |
Merge pull request #49 from djfun/toolkit
Code reorganization, more readable component code, better error messages
Diffstat (limited to 'src/toolkit')
| -rw-r--r-- | src/toolkit/__init__.py | 1 | ||||
| -rw-r--r-- | src/toolkit/common.py | 117 | ||||
| -rw-r--r-- | src/toolkit/ffmpeg.py | 313 | ||||
| -rw-r--r-- | src/toolkit/frame.py | 82 |
4 files changed, 513 insertions, 0 deletions
diff --git a/src/toolkit/__init__.py b/src/toolkit/__init__.py new file mode 100644 index 0000000..3fca275 --- /dev/null +++ b/src/toolkit/__init__.py @@ -0,0 +1 @@ +from toolkit.common import * diff --git a/src/toolkit/common.py b/src/toolkit/common.py new file mode 100644 index 0000000..251a2c1 --- /dev/null +++ b/src/toolkit/common.py @@ -0,0 +1,117 @@ +''' + Common functions +''' +from PyQt5 import QtWidgets +import string +import os +import sys +import subprocess +from collections import OrderedDict + + +def badName(name): + '''Returns whether a name contains non-alphanumeric chars''' + return any([letter in string.punctuation for letter in name]) + + +def alphabetizeDict(dictionary): + '''Alphabetizes a dict into OrderedDict ''' + return OrderedDict(sorted(dictionary.items(), key=lambda t: t[0])) + + +def presetToString(dictionary): + '''Returns string repr of a preset''' + return repr(alphabetizeDict(dictionary)) + + +def presetFromString(string): + '''Turns a string repr of OrderedDict into a regular dict''' + return dict(eval(string)) + + +def appendUppercase(lst): + for form, i in zip(lst, range(len(lst))): + lst.append(form.upper()) + return lst + + +def hideCmdWin(func): + ''' Stops CMD window from appearing on Windows. + Adapted from here: http://code.activestate.com/recipes/409002/ + ''' + def decorator(commandList, **kwargs): + if sys.platform == 'win32': + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + kwargs['startupinfo'] = startupinfo + return func(commandList, **kwargs) + return decorator + + +@hideCmdWin +def checkOutput(commandList, **kwargs): + return subprocess.check_output(commandList, **kwargs) + + +@hideCmdWin +def openPipe(commandList, **kwargs): + return subprocess.Popen(commandList, **kwargs) + + +def disableWhenEncoding(func): + def decorator(self, *args, **kwargs): + if self.encoding: + return + else: + return func(self, *args, **kwargs) + return decorator + + +def disableWhenOpeningProject(func): + def decorator(self, *args, **kwargs): + if self.core.openingProject: + return + else: + return func(self, *args, **kwargs) + return decorator + + +def pickColor(): + ''' + Use color picker to get color input from the user, + and return this as an RGB string and QPushButton stylesheet. + In a subclass apply stylesheet to any color selection widgets + ''' + dialog = QtWidgets.QColorDialog() + dialog.setOption(QtWidgets.QColorDialog.ShowAlphaChannel, True) + color = dialog.getColor() + if color.isValid(): + RGBstring = '%s,%s,%s' % ( + str(color.red()), str(color.green()), str(color.blue())) + btnStyle = "QPushButton{background-color: %s; outline: none;}" \ + % color.name() + return RGBstring, btnStyle + else: + return None, None + + +def rgbFromString(string): + '''Turns an RGB string like "255, 255, 255" into a tuple''' + try: + tup = tuple([int(i) for i in string.split(',')]) + if len(tup) != 3: + raise ValueError + for i in tup: + if i > 255 or i < 0: + raise ValueError + return tup + except: + return (255, 255, 255) + + +def formatTraceback(tb=None): + import traceback + if tb is None: + import sys + tb = sys.exc_info()[2] + return 'Traceback:\n%s' % "\n".join(traceback.format_tb(tb)) diff --git a/src/toolkit/ffmpeg.py b/src/toolkit/ffmpeg.py new file mode 100644 index 0000000..2fffc7b --- /dev/null +++ b/src/toolkit/ffmpeg.py @@ -0,0 +1,313 @@ +''' + Tools for using ffmpeg +''' +import numpy +import sys +import os +import subprocess + +import core +from toolkit.common import checkOutput, openPipe + + +def findFfmpeg(): + if getattr(sys, 'frozen', False): + # The application is frozen + if sys.platform == "win32": + return os.path.join(core.Core.wd, 'ffmpeg.exe') + else: + return os.path.join(core.Core.wd, 'ffmpeg') + + else: + if sys.platform == "win32": + return "ffmpeg" + else: + try: + with open(os.devnull, "w") as f: + checkOutput( + ['ffmpeg', '-version'], stderr=f + ) + return "ffmpeg" + except subprocess.CalledProcessError: + return "avconv" + + +def createFfmpegCommand(inputFile, outputFile, components, duration=-1): + ''' + Constructs the major ffmpeg command used to export the video + ''' + if duration == -1: + duration = getAudioDuration(inputFile) + safeDuration = "{0:.3f}".format(duration - 0.05) # used by filters + duration = "{0:.3f}".format(duration + 0.1) # used by input sources + Core = core.Core + + # Test if user has libfdk_aac + encoders = checkOutput( + "%s -encoders -hide_banner" % Core.FFMPEG_BIN, shell=True + ) + encoders = encoders.decode("utf-8") + + acodec = Core.settings.value('outputAudioCodec') + + options = Core.encoderOptions + containerName = Core.settings.value('outputContainer') + vcodec = Core.settings.value('outputVideoCodec') + vbitrate = str(Core.settings.value('outputVideoBitrate'))+'k' + acodec = Core.settings.value('outputAudioCodec') + abitrate = str(Core.settings.value('outputAudioBitrate'))+'k' + + for cont in options['containers']: + if cont['name'] == containerName: + container = cont['container'] + break + + vencoders = options['video-codecs'][vcodec] + aencoders = options['audio-codecs'][acodec] + + for encoder in vencoders: + if encoder in encoders: + vencoder = encoder + break + + for encoder in aencoders: + if encoder in encoders: + aencoder = encoder + break + + ffmpegCommand = [ + Core.FFMPEG_BIN, + '-thread_queue_size', '512', + '-y', # overwrite the output file if it already exists. + + # INPUT VIDEO + '-f', 'rawvideo', + '-vcodec', 'rawvideo', + '-s', '%sx%s' % ( + Core.settings.value('outputWidth'), + Core.settings.value('outputHeight'), + ), + '-pix_fmt', 'rgba', + '-r', Core.settings.value('outputFrameRate'), + '-t', duration, + '-i', '-', # the video input comes from a pipe + '-an', # the video input has no sound + + # INPUT SOUND + '-t', duration, + '-i', inputFile + ] + + # Add extra audio inputs and any needed avfilters + # NOTE: Global filters are currently hard-coded here for debugging use + globalFilters = 0 # increase to add global filters + extraAudio = [ + comp.audio for comp in components + if 'audio' in comp.properties() + ] + if extraAudio or globalFilters > 0: + # Add -i options for extra input files + extraFilters = {} + for streamNo, params in enumerate(reversed(extraAudio)): + extraInputFile, params = params + ffmpegCommand.extend([ + '-t', safeDuration, + # Tell ffmpeg about shorter clips (seemingly not needed) + # streamDuration = getAudioDuration(extraInputFile) + # if streamDuration and streamDuration > float(safeDuration) + # else "{0:.3f}".format(streamDuration), + '-i', extraInputFile + ]) + # Construct dataset of extra filters we'll need to add later + for ffmpegFilter in params: + if streamNo + 2 not in extraFilters: + extraFilters[streamNo + 2] = [] + extraFilters[streamNo + 2].append(( + ffmpegFilter, params[ffmpegFilter] + )) + + # Start creating avfilters! Popen-style, so don't use semicolons; + extraFilterCommand = [] + + if globalFilters <= 0: + # Dictionary of last-used tmp labels for a given stream number + tmpInputs = {streamNo: -1 for streamNo in extraFilters} + else: + # Insert blank entries for global filters into extraFilters + # so the per-stream filters know what input to source later + for streamNo in range(len(extraAudio), 0, -1): + if streamNo + 1 not in extraFilters: + extraFilters[streamNo + 1] = [] + # Also filter the primary audio track + extraFilters[1] = [] + tmpInputs = { + streamNo: globalFilters - 1 + for streamNo in extraFilters + } + + # Add the global filters! + # NOTE: list length must = globalFilters, currently hardcoded + if tmpInputs: + extraFilterCommand.extend([ + '[%s:a] ashowinfo [%stmp0]' % ( + str(streamNo), + str(streamNo) + ) + for streamNo in tmpInputs + ]) + + # Now add the per-stream filters! + for streamNo, paramList in extraFilters.items(): + for param in paramList: + source = '[%s:a]' % str(streamNo) \ + if tmpInputs[streamNo] == -1 else \ + '[%stmp%s]' % ( + str(streamNo), str(tmpInputs[streamNo]) + ) + tmpInputs[streamNo] = tmpInputs[streamNo] + 1 + extraFilterCommand.append( + '%s %s%s [%stmp%s]' % ( + source, param[0], param[1], str(streamNo), + str(tmpInputs[streamNo]) + ) + ) + + # Join all the filters together and combine into 1 stream + extraFilterCommand = "; ".join(extraFilterCommand) + '; ' \ + if tmpInputs else '' + ffmpegCommand.extend([ + '-filter_complex', + extraFilterCommand + + '%s amix=inputs=%s:duration=first [a]' + % ( + "".join([ + '[%stmp%s]' % (str(i), tmpInputs[i]) + if i in extraFilters else '[%s:a]' % str(i) + for i in range(1, len(extraAudio) + 2) + ]), + str(len(extraAudio) + 1) + ), + ]) + + # Only map audio from the filters, and video from the pipe + ffmpegCommand.extend([ + '-map', '0:v', + '-map', '[a]', + ]) + + ffmpegCommand.extend([ + # OUTPUT + '-vcodec', vencoder, + '-acodec', aencoder, + '-b:v', vbitrate, + '-b:a', abitrate, + '-pix_fmt', Core.settings.value('outputVideoFormat'), + '-preset', Core.settings.value('outputPreset'), + '-f', container + ]) + + if acodec == 'aac': + ffmpegCommand.append('-strict') + ffmpegCommand.append('-2') + + ffmpegCommand.append(outputFile) + return ffmpegCommand + + +def testAudioStream(filename): + '''Test if an audio stream definitely exists''' + audioTestCommand = [ + core.Core.FFMPEG_BIN, + '-i', filename, + '-vn', '-f', 'null', '-' + ] + try: + checkOutput(audioTestCommand, stderr=subprocess.DEVNULL) + except subprocess.CalledProcessError: + return False + else: + return True + + +def getAudioDuration(filename): + '''Try to get duration of audio file as float, or False if not possible''' + command = [core.Core.FFMPEG_BIN, '-i', filename] + + try: + fileInfo = checkOutput(command, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as ex: + fileInfo = ex.output + + info = fileInfo.decode("utf-8").split('\n') + for line in info: + if 'Duration' in line: + d = line.split(',')[0] + d = d.split(' ')[3] + d = d.split(':') + duration = float(d[0])*3600 + float(d[1])*60 + float(d[2]) + break + else: + # String not found in output + return False + return duration + + +def readAudioFile(filename, videoWorker): + ''' + Creates the completeAudioArray given to components + and used to draw the classic visualizer. + ''' + duration = getAudioDuration(filename) + if not duration: + print('Audio file doesn\'t exist or unreadable.') + return + + command = [ + core.Core.FFMPEG_BIN, + '-i', filename, + '-f', 's16le', + '-acodec', 'pcm_s16le', + '-ar', '44100', # ouput will have 44100 Hz + '-ac', '1', # mono (set to '2' for stereo) + '-'] + in_pipe = openPipe( + command, + stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, bufsize=10**8 + ) + + completeAudioArray = numpy.empty(0, dtype="int16") + + progress = 0 + lastPercent = None + while True: + if core.Core.canceled: + return + # read 2 seconds of audio + progress += 4 + raw_audio = in_pipe.stdout.read(88200*4) + if len(raw_audio) == 0: + break + audio_array = numpy.fromstring(raw_audio, dtype="int16") + completeAudioArray = numpy.append(completeAudioArray, audio_array) + + percent = int(100*(progress/duration)) + if percent >= 100: + percent = 100 + + if lastPercent != percent: + string = 'Loading audio file: '+str(percent)+'%' + videoWorker.progressBarSetText.emit(string) + videoWorker.progressBarUpdate.emit(percent) + + lastPercent = percent + + in_pipe.kill() + in_pipe.wait() + + # add 0s the end + completeAudioArrayCopy = numpy.zeros( + len(completeAudioArray) + 44100, dtype="int16") + completeAudioArrayCopy[:len(completeAudioArray)] = completeAudioArray + completeAudioArray = completeAudioArrayCopy + + return (completeAudioArray, duration) diff --git a/src/toolkit/frame.py b/src/toolkit/frame.py new file mode 100644 index 0000000..b66e037 --- /dev/null +++ b/src/toolkit/frame.py @@ -0,0 +1,82 @@ +''' + Common tools for drawing compatible frames in a Component's frameRender() +''' +from PyQt5 import QtGui +from PIL import Image +from PIL.ImageQt import ImageQt +import sys +import os + +import core + + +class FramePainter(QtGui.QPainter): + ''' + A QPainter for a blank frame, which can be converted into a + Pillow image with finalize() + ''' + def __init__(self, width, height): + image = BlankFrame(width, height) + self.image = QtGui.QImage(ImageQt(image)) + super().__init__(self.image) + + def setPen(self, RgbTuple): + super().setPen(PaintColor(*RgbTuple)) + + def finalize(self): + self.end() + imBytes = self.image.bits().asstring(self.image.byteCount()) + + return Image.frombytes( + 'RGBA', (self.image.width(), self.image.height()), imBytes + ) + + +class PaintColor(QtGui.QColor): + '''Reverse the painter colour if the hardware stores RGB values backward''' + def __init__(self, r, g, b, a=255): + if sys.byteorder == 'big': + super().__init__(r, g, b, a) + else: + super().__init__(b, g, r, a) + + +def defaultSize(framefunc): + '''Makes width/height arguments optional''' + def decorator(*args): + if len(args) < 2: + newArgs = list(args) + if len(args) == 0 or len(args) == 1: + height = int(core.Core.settings.value("outputHeight")) + newArgs.append(height) + if len(args) == 0: + width = int(core.Core.settings.value("outputWidth")) + newArgs.insert(0, width) + args = tuple(newArgs) + return framefunc(*args) + return decorator + + +def FloodFrame(width, height, RgbaTuple): + return Image.new("RGBA", (width, height), RgbaTuple) + + +@defaultSize +def BlankFrame(width, height): + '''The base frame used by each component to start drawing.''' + return FloodFrame(width, height, (0, 0, 0, 0)) + + +@defaultSize +def Checkerboard(width, height): + ''' + A checkerboard to represent transparency to the user. + TODO: Would be cool to generate this image with numpy instead. + ''' + image = FloodFrame(1920, 1080, (0, 0, 0, 0)) + image.paste(Image.open( + os.path.join(core.Core.wd, "background.png")), + (0, 0) + ) + image = image.resize((width, height)) + return image |
