如何在使用 qt 和 simpleaudio

时间:2021-08-01 20:34:21

标签: python audio pyqt pyqt5 python-multithreading

我正在尝试在 GUI 应用程序中使用 simpleaudio 播放音频信号,用户应该在其中对播放的块的内容做出反应并按下按钮。在用户按下按钮的那一刻,我想切换到下一首曲目。这一切都是使用 pytq5 在 Python 3.x 中使用来自 qt 的插槽和信号完成的。即使我的 GUI 没有冻结,我也不明白为什么在播放的音频块期间(之间)我无法读取按钮动作,而是在所有曲目完成后读取所有动作。

我的代码如下所示: 处理轨道和块的模块

import simpleaudio as sa
import numpy as np

class MusicReactor:
    def __init__(self):
        self.timeProTone = ...
        self.deltaT = ...
        self.maxVolSkal = ...
        self.minVolSkal = ...
        self.frequencySample = ...
        self.currentTestedEar = ...
        
        
    def test_function(self, frequency):
        # array of time values
        times = np.arange(0, self.timeProTone, self.deltaT)
        # generator to create a callable object with new volume each time
        for time in times:
            # get the volume and set the volume to the starting sequence
            currentVolume = (self.maxVolSkal-self.minVolSkal)/self.timeProTone * time + self.minVolSkal
            self.setVolumeScalar(currentVolume)
            # create chunk the tone as a numpy array
            audio = createTone(frequency, self.deltaT, self.frequencySample, self.currentTestedEar)
            yield audio, currentVolume


def createTone(frequency, duration, frequencySampled, currentTestedEar = TestEar.Both):

    # Generate array with seconds*sample_rate steps, ranging between 0 and seconds
    tt = np.linspace((0, 0), (duration, duration), int(duration * frequencySampled), False)

    #populate the other ear with zeros
    if currentTestedEar is not TestEar.Both:
        tt[:, 1-currentTestedEar.value] = 0 # This strategy works only if the note creation i a sinusoidal : sin(0) = 0

    # Generate a 440 Hz sine wave
    note = np.sin(frequency * tt * 2 * np.pi)

    # normalize to 16-bit range
    note *= 32767 / np.max(np.abs(note))

    # Ensure that highest value is in 16-bit range
    audio = note * (2 ** 15 - 1) / np.max(np.abs(note))

    # Convert to 16-bit data
    audio = audio.astype(np.int16)
    return audio

def playTone(audio, frequencySample, num_channels=1, bytes_per_sample=2):
    # Start playback
    play_obj = sa.play_buffer(audio, num_channels, bytes_per_sample, frequencySample)
    # Wait for playback to finish before exiting
    play_obj.wait_done()

def generateRndFreq(minF,maxF):
    freq = np.random.uniform(low=minF, high=maxF)
    return freq

现在是 GUI 类及其对应的工作类

class HearingTest_ui(QWidget):
    # Send info through signals to subthreads
    sig_int_sender = pyqtSignal(int)
    hearingObjSender = pyqtSignal( Hearing.HearingTest)
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        uic.loadUi("testForm.ui", self)

        
        self.Both_rB.toggled.connect(self.onTogle_earTested)
        self.Links_rB.toggled.connect(self.onTogle_earTested)
        self.Recht_rB.toggled.connect(self.onTogle_earTested)


        # Method 2 Test
        self.ML_startButton.clicked.connect(self.runMethod2Test)
        self.setMaxMLProgressBar()
        self.ml_nTests = self.ML_spinBox.value()
        self.ML_spinBox.valueChanged.connect(self.setNTests)
        self.ML_spinBox.valueChanged.connect(self.setMaxMLProgressBar)

        # Hearing Test Object
        self.HT = Hearing.MusicReactor()


    def runMethod2Test(self):
        # Preprocessing
        self.HT.choose_ear(self.testedEarTuple) # reads a tpogle to assign a channel for the chunk of music
        # thread and worker configuration
        # Step 2: Create a QThread object
        self.ml_thread = QThread(parent=self)
        # Step 3: Create a worker object
        self.ml_worker = ML_Worker(self.ml_nTests)
        # Step 4: Move worker to the thread
        self.ml_worker.moveToThread(self.ml_thread)
        # Step 5: Connect signals and slots
        #self.ml_thread.started.connect(partial(self.ml_worker.actualLongTaskFromHearingTest, self.HT))
        self.hearingObjSender.connect(self.ml_worker.actualLongTaskFromHearingTest)
        self.ml_worker.progress.connect(self.updateProgressbar)
        self.ML_spinBox.valueChanged.connect(self.ml_worker.set_maxTests)
        self.sig_int_sender.connect(self.ml_worker.set_maxTests)
        self.ML_yesButton.clicked.connect(self.ml_worker.change_Flag)
        self.ml_worker.request_playchunk.connect(self.ml_worker.sendAudio2queue)
        self.ml_worker.finished.connect(self.ml_thread.quit)
        self.ml_worker.finished.connect(self.ml_worker.deleteLater)
        self.ml_thread.finished.connect(self.ml_thread.deleteLater)
        # Final resets
        self.ml_worker.changeButtonStatus.connect(self.ML_startButton.setEnabled)

        # start thread
        print("clicked runMethodOfLimits")
        self.ml_thread.start()

        self.hearingObjSender.emit(self.HT)



class ML_Worker(QObject):
    finished = pyqtSignal()
    progress = pyqtSignal(int)
    retrieve = pyqtSignal()
    changeButtonStatus = pyqtSignal(bool)
    request_playchunk = pyqtSignal(np.ndarray, int, Hearing.MusicReactor)

    def __init__(self,nTest):
        super().__init__()
        self.__abort = False
        self.nTests = nTest
        self.MoLFlag = False

    def abort(self):
        self.__abort = True

    @pyqtSlot(int)
    def set_maxTests(self, val):
        print(type(val))
        logging.info(f"set_maxTests.... {val}")
        self.nTests = val

    @pyqtSlot()
    def change_Flag(self):
        print("clicked")
        self.MoLFlag = True

    # def of long runnning task
    @pyqtSlot(Hearing.MusicReactor)
    def actualLongTaskFromHearingTest(self, HTObj):
        self.changeButtonStatus.emit(False)
        self.progress.emit(0)
        self.retrieve.emit()
        print(self.nTests)
        start = 0
        for i in range(self.nTests):
            self.MoLFlag = False
            j = i + 1
            print("start", i)
            # create the frequency for the test
            chunk_freq = Hearing.generateRndFreq(0, 10000)
            #create chunks as generator
            for chunk, volume in HTObj.test_function(chunk_freq):
                # play chunk of the audio
                self.request_playchunk.emit(chunk, 2, HTObj) # this is my current method, by using the signals and slots
                # Hearing.playTone(chunk, HTObj.frequencySample, num_channels=2)^# previously I tried something like this, which resulted in the same behavior
                print(volume)

                if self.MoLFlag:
                    print(self.MoLFlag)
                    break

        self.progress.emit(j)
        self.changeButtonStatus.emit(True)
        self.finished.emit()

    @pyqtSlot(np.ndarray, int, Hearing.MusicReactor)
    def sendAudio2queue(self, chunk, channels, HTObj):
        Hearing.playTone(chunk, HTObj.frequencySample, num_channels=channels)

如果有人能帮我看看,我将不胜感激。我真的很想了解为什么会发生这种情况。我相信它与线程队列有关,可能我需要打开一个负责音乐的新线程,而另一个人负责 GUI 反应,但我仍然不明白为什么它不会破坏当我单击“ML_yesButton”时循环(使用生成器)。

1 个答案:

答案 0 :(得分:0)

在这种情况下没有必要使用线程。 wait_done() 方法会阻塞执行它的线程,以便应用程序在播放完音频之前不会终止。

在这种情况下,可以使用 QTimer 来检查音频是否播放完毕。

import simpleaudio as sa
import numpy as np

from PyQt5.QtCore import pyqtSignal, QObject, Qt, QTimer
from PyQt5.QtWidgets import QApplication, QPushButton, QVBoxLayout, QWidget


class AudioManager(QObject):
    started = pyqtSignal()
    finished = pyqtSignal()

    def __init__(self, parent=None):
        super().__init__(parent)
        self._play_obj = None

        self._timer = QTimer(interval=10)
        self._timer.timeout.connect(self._handle_timeout)

    def start(self, audio_data, num_channels, bytes_per_sample, sample_rate):
        self._play_obj = sa.play_buffer(
            audio_data, num_channels, bytes_per_sample, sample_rate
        )
        self._timer.start()
        self.started.emit()

    def stop(self):
        if self._play_obj is None:
            return
        self._play_obj.stop()
        self._play_obj = None
        self.finished.emit()

    def _handle_timeout(self):
        if self._play_obj is None:
            return
        if not self.running():
            self.stop()

    def running(self):
        return self._play_obj.is_playing()


def create_tone(duration, fs, f):
    tt = np.linspace((0, 0), (duration, duration), int(duration * fs), False)
    note = np.sin(f * tt * 2 * np.pi)
    note *= 32767 / np.max(np.abs(note))
    audio = note * (2 ** 15 - 1) / np.max(np.abs(note))
    audio = audio.astype(np.int16)
    return audio


class Widget(QWidget):
    def __init__(self, parent=None):
        super().__init__(parent)

        self.audio_manager = AudioManager()
        self.audio_manager.started.connect(self.handle_started)
        self.audio_manager.finished.connect(self.handle_finished)

        self.button = QPushButton("Start", checkable=True)
        self.button.toggled.connect(self.handle_toggled)

        lay = QVBoxLayout(self)
        lay.addWidget(self.button, alignment=Qt.AlignCenter)

    def handle_toggled(self, state):
        if state:
            frequency = np.random.uniform(low=0, high=10000)
            tone = create_tone(60, 1000, frequency)
            self.audio_manager.start(tone, 1, 2, 16000)

        else:
            self.audio_manager.stop()
            self.button.setText("Start")

    def handle_started(self):
        self.button.setChecked(True)
        self.button.setText("Stop")

    def handle_finished(self):
        self.button.setChecked(False)
        self.button.setText("Start")


def main():
    import sys

    app = QApplication(sys.argv)

    widget = Widget()
    widget.resize(640, 480)
    widget.show()

    sys.exit(app.exec_())


if __name__ == "__main__":
    main()