#complie by python3 only_test.py
import pyaudio
import numpy as np
import wave
import time
import math
#from pydub import AudioSegment
#from pydub.playback import play
#from scipy.signal import iirfilter
from scipy import signal
RATE = 48000
CHUNK = 4096
WIDTH = 2
volume = 0.0
duration = 1.0
#SHORT_NORMALIZE = (1.0/32768.0)
#INPUT_BLOCK_TIME = 1
#INPUT_BLOCK_PER_BLOCK = int(RATE*INPUT_BLOCK_TIME)
while True:
#use a blackman window
window = np.blackman(CHUNK)
#load audio stream
p = pyaudio.PyAudio()
player = p.open(format=pyaudio.paInt16,
channels=1,
rate=RATE,
output=True,
frames_per_buffer=CHUNK)
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
#errorcount = 0
for i in range(int(20*RATE/CHUNK)):
sound = stream.read(CHUNK)
#imp_ff = signal.filtfilt(b,a,sound)
#playback microphone sound
#player.write(np.fromstring(sound,dtype=np.int16),CHUNK)
#generate samples with return frequency to array
#samples= (np.sin(2*np.pi*np.arange(RATE*duration)*freq/RATE)).astype(np.int16)
#inverse frequency samples
#inverse_samples = -samples
#return frequency sound stream
#player.write(np.fromstring((volume*inverse_samples)\
,dtype=np.int16),CHUNK)
#unpack the data and times by hamming window
indata = np.array(wave.struct.unpack("%dh"%(len(sound)/WIDTH),\
sound))*window
#take the fft and square each value
fftData = abs(np.fft.rfft(indata))*2
#ifftData = abs(np.fft.irfft(indata))*2
#find the maxium
which = fftData[1:].argmax() + 1
#use quadratic interpolation around the max
if which != len(fftData)-1:
y0,y1,y2 = np.log(fftData[which-1:which+2:])
x1 = (y2-y0)*.5 / (2*y1-y2-y0)
#find the frequency and output it
freq = (which+x1)*RATE/CHUNK
print("the freq is %d hz." % (freq))
else:
freq = which*RATE/CHUNK
print("the freq is %d hz." % (freq))
#playback the mic sound
player.write(np.fromstring(sound,dtype=np.int16),CHUNK)
if freq < 65:
freq = 0
#generate samples, note conversion to array
#samples =
(np.sin(2*np.pi*np.arange(RATE*duration)*freq/RATE)).astype(np.int16)
#invert phase of samples
#result_samples = samples
#playback the invert_mic sound
#player.write(np.fromstring(result_samples,dtype=np.int16),CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
我们目前正在实时处理麦克风。 它旨在通过它获得频率,并通过陷波滤波器(带阻滤波器)去除输出频率的正弦波声音。 我不知道用什么代码写一个陷波滤波器(带阻滤波器)。 您有任何代码或库可以提供帮助吗?
答案 0 :(得分:0)
由于您已经在使用scipy.signal,因此可以使用scipy.signal.iirnotch。也许您还想阅读 IIR过滤器的一些背景信息,例如品质因数。
按如下方式使用:
b, a = signal.iirnotch( w0, Q )
w0 是标准化频率。
Q 是表征陷波滤波器-3 dB带宽相对于其中心频率的品质因数。
该函数返回IIR滤波器的分子 b 和分母 a 多项式。
示例:的
fs = 200.0 # Sample frequency (Hz)
f0 = 60.0 # Frequency to be removed from signal (Hz)
Q = 30.0 # Quality factor
w0 = f0 / (fs / 2 ) # Normalized Frequency
b, a = signal.iirnotch( w0, Q )
# Look at frequency response
w, h = signal.freqz( b, a )
freq = w * fs / ( 2 * np.pi )
plt.plot( freq, 20*np.log10( abs( h ) ) )
答案 1 :(得分:0)
尽管Arude的解决方案很不错,但我还是简化了它。另外,在scipy docs中,w0
这个术语可能会让人很困惑。它说必须对它进行归一化,但是只要您输入采样率,它就会为您完成。
我已将此类简化包装到以下代码中,并添加了一个带有虚假信号的完整工作示例,其中我消除了60Hz的嗡嗡声:
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
# Create/view notch filter
samp_freq = 1000 # Sample frequency (Hz)
notch_freq = 60.0 # Frequency to be removed from signal (Hz)
quality_factor = 30.0 # Quality factor
b_notch, a_notch = signal.iirnotch(notch_freq, quality_factor, samp_freq)
freq, h = signal.freqz(b_notch, a_notch, fs = samp_freq)
plt.figure('filter')
plt.plot( freq, 20*np.log10(abs(h)))
# Create/view signal that is a mixture of two frequencies
f1 = 17
f2 = 60
t = np.linspace(0.0, 1, 1_000)
y_pure = np.sin(f1 * 2.0*np.pi*t) + np.sin(f2 * 2.0*np.pi*t)
plt.figure('result')
plt.subplot(211)
plt.plot(t, y_pure, color = 'r')
# apply notch filter to signal
y_notched = signal.filtfilt(b_notch, a_notch, y_pure)
# plot notch-filtered version of signal
plt.subplot(212)
plt.plot(t, y_notched, color = 'r')