aboutsummaryrefslogtreecommitdiff
path: root/core.py
blob: 996bd52467f64f4488cde6f6a65411c8900c77c2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import sys, io, os
from PyQt4 import QtCore, QtGui, uic
from PyQt4.QtGui import QPainter, QColor
from os.path import expanduser
import subprocess as sp
import numpy
from PIL import Image, ImageDraw, ImageFont
from PIL.ImageQt import ImageQt

class Core():

  def __init__(self):
    self.lastBackgroundImage = ""
    self._image = None

    self.FFMPEG_BIN = self.findFfmpeg()

  def findFfmpeg(self):
    if sys.platform == "win32":
      return "ffmpeg.exe"
    else:
      try:
        with open(os.devnull, "w") as f:
          sp.check_call(['ffmpeg', '-version'], stdout=f, stderr=f)
        return "ffmpeg"
      except:
        return "avconv"

  def drawBaseImage(self, backgroundImage, titleText, titleFont):

    if self._image == None or not self.lastBackgroundImage == backgroundImage:
      self.lastBackgroundImage = backgroundImage

      if backgroundImage == "":
        im = Image.new("RGB", (1280, 720), "black")
      else:
        im = Image.open(backgroundImage)

      # resize if necessary
      if not im.size == (1280, 720):
        im = im.resize((1280, 720), Image.ANTIALIAS)

      self._image = ImageQt(im)
    
    self._image1 = QtGui.QImage(self._image)
    painter = QPainter(self._image1)
    font = titleFont
    font.setPointSizeF(35)
    painter.setFont(font)
    painter.setPen(QColor(255, 255, 255))

    painter.drawText(70, 375, titleText)
    painter.end()

    buffer = QtCore.QBuffer()
    buffer.open(QtCore.QIODevice.ReadWrite)
    self._image1.save(buffer, "PNG")

    strio = io.BytesIO()
    strio.write(buffer.data())
    buffer.close()
    strio.seek(0)
    return Image.open(strio)

  def drawBars(self, spectrum, image):

    imTop = Image.new("RGBA", (1280, 360))
    draw = ImageDraw.Draw(imTop)
    for j in range(0, 63):
      draw.rectangle((10 + j * 20, 325, 10 + j * 20 + 20, 325 - spectrum[j * 4] * 1 - 10), fill=(255, 255, 255, 50))
      draw.rectangle((15 + j * 20, 320, 15 + j * 20 + 10, 320 - spectrum[j * 4] * 1), fill="white")


    imBottom = imTop.transpose(Image.FLIP_TOP_BOTTOM)

    im = Image.new("RGB", (1280, 720), "black")

    im.paste(image, (0, 0))
    im.paste(imTop, (0, 0), mask=imTop)
    im.paste(imBottom, (0, 360), mask=imBottom)

    return im

  def readAudioFile(self, filename):
    command = [ self.FFMPEG_BIN,
          '-i', filename,
          '-f', 's16le',
          '-acodec', 'pcm_s16le',
          '-ar', '44100', # ouput will have 44100 Hz
          '-ac', '1', # mono (set to '2' for stereo)
          '-']
    in_pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.DEVNULL, bufsize=10**8)
    
    completeAudioArray = numpy.empty(0, dtype="int16")

    while True:
      # read 2 seconds of audio
      raw_audio = in_pipe.stdout.read(88200*4)
      if len(raw_audio) == 0:
        break
      audio_array = numpy.fromstring(raw_audio, dtype="int16")
      completeAudioArray = numpy.append(completeAudioArray, audio_array)
      # print(audio_array)

    in_pipe.kill()
    in_pipe.wait()

    # add 0s the end
    completeAudioArrayCopy = numpy.zeros(len(completeAudioArray) + 44100, dtype="int16")
    completeAudioArrayCopy[:len(completeAudioArray)] = completeAudioArray
    completeAudioArray = completeAudioArrayCopy

    return completeAudioArray

  def transformData(self, i, completeAudioArray, sampleSize, smoothConstantDown, smoothConstantUp, lastSpectrum):
    if len(completeAudioArray) < (i + sampleSize):
      sampleSize = len(completeAudioArray) - i

    window = numpy.hanning(sampleSize)
    data = completeAudioArray[i:i+sampleSize][::1] * window
    paddedSampleSize = 2048
    paddedData = numpy.pad(data, (0, paddedSampleSize - sampleSize), 'constant')
    spectrum = numpy.fft.fft(paddedData)
    sample_rate = 44100
    frequencies = numpy.fft.fftfreq(len(spectrum), 1./sample_rate)

    y = abs(spectrum[0:paddedSampleSize/2 - 1])

    # filter the noise away
    # y[y<80] = 0

    y = 20 * numpy.log10(y)
    y[numpy.isinf(y)] = 0

    if lastSpectrum is not None:
      lastSpectrum[y < lastSpectrum] = y[y < lastSpectrum] * smoothConstantDown + lastSpectrum[y < lastSpectrum] * (1 - smoothConstantDown)
      lastSpectrum[y >= lastSpectrum] = y[y >= lastSpectrum] * smoothConstantUp + lastSpectrum[y >= lastSpectrum] * (1 - smoothConstantUp)
    else:
      lastSpectrum = y

    x = frequencies[0:paddedSampleSize/2 - 1]

    return lastSpectrum