aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--components/text.py7
-rw-r--r--video_thread.py42
2 files changed, 24 insertions, 25 deletions
diff --git a/components/text.py b/components/text.py
index c9359f2..c0bb61f 100644
--- a/components/text.py
+++ b/components/text.py
@@ -95,19 +95,16 @@ class Component(__base__.Component):
im = Image.new("RGBA", (width, height),(0,0,0,0))
image = ImageQt(im)
- image1 = QtGui.QImage(image)
- painter = QPainter(image1)
+ painter = QPainter(image)
self.titleFont.setPixelSize(self.fontSize)
painter.setFont(self.titleFont)
painter.setPen(QColor(*self.textColor))
-
- fm = QtGui.QFontMetrics(self.titleFont)
painter.drawText(self.xPosition, self.yPosition, self.title)
painter.end()
buffer = QtCore.QBuffer()
buffer.open(QtCore.QIODevice.ReadWrite)
- image1.save(buffer, "PNG")
+ image.save(buffer, "PNG")
strio = io.BytesIO()
strio.write(buffer.data())
diff --git a/video_thread.py b/video_thread.py
index c37741d..6972be4 100644
--- a/video_thread.py
+++ b/video_thread.py
@@ -71,7 +71,7 @@ class Worker(QtCore.QObject):
if time.time() - self.lastPreview >= 0.05 or i[0] == 0:
self._image = ImageQt(i[1])
self.imageCreated.emit(QtGui.QImage(self._image))
- lastPreview = time.time()
+ self.lastPreview = time.time()
self.previewQueue.task_done()
@@ -139,48 +139,50 @@ class Worker(QtCore.QObject):
# create video for output
numpy.seterr(divide='ignore')
+ # initialize components
+ print('loaded components:',
+ ["%s%s" % (num, str(component)) for num, component in enumerate(components)])
+ self.staticComponents = {}
+ for compNo, comp in enumerate(components):
+ properties = None
+ properties = comp.preFrameRender(
+ worker=self,
+ completeAudioArray=self.completeAudioArray,
+ sampleSize=self.sampleSize
+ )
+
+ if properties and 'static' in properties:
+ self.staticComponents[compNo] = comp.frameRender(compNo, 0)
+
self.compositeQueue = Queue()
self.compositeQueue.maxsize = 20
self.renderQueue = PriorityQueue()
self.renderQueue.maxsize = 20
self.previewQueue = PriorityQueue()
+ # create threads to render frames and send them back here for piping out
for i in range(2):
- t = Thread(target=self.renderNode)
+ t = Thread(target=self.renderNode, name="Render Thread")
t.daemon = True
t.start()
- self.dispatchThread = Thread(target=self.renderDispatch)
+ self.dispatchThread = Thread(target=self.renderDispatch, name="Render Dispatch Thread")
self.dispatchThread.daemon = True
self.dispatchThread.start()
- self.previewDispatch = Thread(target=self.previewDispatch)
+ self.previewDispatch = Thread(target=self.previewDispatch, name="Render Dispatch Thread")
self.previewDispatch.daemon = True
self.previewDispatch.start()
frameBuffer = {}
self.lastPreview = 0.0
- # initialize components
- print('loaded components:',
- ["%s%s" % (num, str(component)) for num, component in enumerate(components)])
- self.staticComponents = {}
- for compNo, comp in enumerate(components):
- properties = None
- properties = comp.preFrameRender(
- worker=self,
- completeAudioArray=self.completeAudioArray,
- sampleSize=self.sampleSize
- )
-
- if properties and 'static' in properties:
- self.staticComponents[compNo] = comp.frameRender(compNo, 0)
-
for i in range(0, len(self.completeAudioArray), self.sampleSize):
-
while True:
if i in frameBuffer:
+ # if frame's in buffer, pipe it to ffmpeg
break
+ # else fetch the next frame & add to the buffer
data = self.renderQueue.get()
frameBuffer[data[0]] = data[1]
self.renderQueue.task_done()