好吧,这段代码有效:
with requests.get("https://impradio.bytemasters.gr/8002/LIVE", stream=True) as peradio: for chunk in peradio.iter_content(chunk_size=4096): self.s.send(chunk) self.s.sync()
但这没有
self.stream.write(final_slice.raw_data) active_connection_found = False for connection in self.connections: if connection["status"]=="connected": connection["connection"].send(final_slice.raw_data) connection["connection"].sync() active_connection_found = True if active_connection_found==False: time.sleep(0.01)
没有错误,但我在浏览器上听不到任何音乐(正在加载)。
因此,我尝试将 final_slice 写入临时文件,然后将 f.read() 字节发送到 Icecast 服务器。 可以用,但是声音延迟很大(从 final_slice 到 final_slice),程序变慢很多。
active_connection_found = False send_data = io.BytesIO() final_slice.export(send_data,format="mp3") send_data = send_data.getvalue() for connection in self.connections: if connection["status"]=="connected": connection["connection"].send(send_data) connection["connection"].sync() active_connection_found = True if active_connection_found==False: time.sleep(0.01)
up 代码有效但质量低(延迟延迟)。
我能做什么?
编辑:我尝试使用的比特率为 128Kbps。
pyaudio 服务质量设置为:
#QoS settings self.bit_rate = 128*1024 #128 kb/sec #never used this variable at the code. self.packet_time = 93 #125 msec self.packet_size = int(16384/4) #self.new_sample_rate = 32768 self.new_sample_rate = 44100 self.format = pyaudio.paInt16 self.channels = 2
Edit2:我尝试发送 final_slice._data
,但结果相同。
这个在QThread的run方法中
def run(self): for output_device in self.output_devices: if(self.primary_output_device_name==output_device[2]): self.output_device_index = output_device[1] for input_device in self.input_devices: if(self.input_device_name==input_device[2]): self.input_device_index = input_device[1] self.stream = self.p.open(format=pyaudio.paInt16,channels=2,rate=self.new_sample_rate,input=True,input_device_index=self.input_device_index,output=True,output_device_index=self.output_device_index,frames_per_buffer=self.packet_size) self.stream.start_stream() while(True): deck_1_slice = AudioSegment.empty() if self.deck_1["play-status"]=="playing": if self.deck_1["current-duration-milliseconds"]>self.deck_1["total-duration-milliseconds"]: self.deck_1["play-status"] = "stopped" self.deck_1_status_signal.emit("stopped") self.deck_1["chunk-number"] = 0 self.deck_1["current-duration-milliseconds"] = 0 if self.deck_1["play-status"]=="playing": if self.deck_1["player-list-item"]["type"] != "retransmitions": if(self.deck_1["normalize"]==False or self.deck_1["normalize"]==0): if((self.deck_1["chunk-number"]+1)*(self.packet_time)20: deck_1_slice = effects.high_pass_filter(deck_1_slice, self.deck_1["low-frequency"]) if self.deck_1["high-frequency"]>20000: deck_1_slice = effects.low_pass_filter(deck_1_slice, self.deck_1["high-frequency"]) if(self.deck_1["volume"]==0): self.db_volume = -200 else: self.db_volume = 20*math.log10(self.deck_1["volume"]/100) deck_1_slice = deck_1_slice+self.db_volume else: self.deck_1["chunk-number"] = 0 deck_1_slice = AudioSegment.empty() elif self.deck_1["play-status"] == "stopped": self.deck_1["chunk-number"] = 0 self.deck_1["current-duration-milliseconds"] = 0 deck_1_slice = AudioSegment.empty() #deck_1_volume_strength average_data_value = deck_1_slice.max normalized_value = abs(average_data_value)/deck_1_slice.max_possible_amplitude if normalized_value>1: normalized_value = 1 self.deck_1_volume_signal.emit(normalized_value) #deck 2 deck_2_slice = AudioSegment.empty() if self.deck_2["play-status"]=="playing": if self.deck_2["current-duration-milliseconds"]>self.deck_2["total-duration-milliseconds"]: self.deck_2["play-status"] = "stopped" self.deck_2["chunk-number"] = 0 self.deck_2["current-duration-milliseconds"] = 0 self.deck_2_status_signal.emit("stopped") if self.deck_2["play-status"]=="playing": if self.deck_2["player-list-item"]["type"] != "retransmitions": if(self.deck_2["normalize"]==False or self.deck_2["normalize"]==0): if((self.deck_2["chunk-number"]+1)*(self.packet_time)20: deck_2_slice = effects.high_pass_filter(deck_2_slice, self.deck_2["low-frequency"]) if self.deck_2["high-frequency"]>20000: deck_2_slice = effects.low_pass_filter(deck_2_slice, self.deck_2["high-frequency"]) if(self.deck_2["volume"]==0): self.db_volume = -200 else: self.db_volume = 20*math.log10(self.deck_2["volume"]/100) deck_2_slice = deck_2_slice+self.db_volume else: self.deck_2["chunk-number"] = 0 deck_2_slice = AudioSegment.empty() elif self.deck_2["play-status"] == "stopped": self.deck_2["chunk-number"] = 0 self.deck_2["current-duration-milliseconds"] = 0 deck_2_slice = AudioSegment.empty() #deck_2_volume_strength average_data_value = deck_2_slice.max normalized_value = abs(average_data_value)/deck_2_slice.max_possible_amplitude if normalized_value>1: normalized_value = 1 self.deck_2_volume_signal.emit(normalized_value) #music clip deck music_clip_deck_slice = AudioSegment.empty() if self.music_clip_deck["play-status"]=="playing": if self.music_clip_deck["current-duration-milliseconds"]>self.music_clip_deck["total-duration-milliseconds"]: self.music_clip_deck["play-status"] = "stopped" self.music_clip_deck["chunk-number"] = 0 self.music_clip_deck["current-duration-milliseconds"] = 0 self.music_clip_deck_status_signal.emit("stopped") if self.music_clip_deck["play-status"]=="playing": if(self.music_clip_deck["normalize"]==False or self.music_clip_deck["normalize"]==0): if((self.music_clip_deck["chunk-number"]+1)*(self.packet_time)20: music_clip_deck_slice = effects.high_pass_filter(music_clip_deck_slice, self.music_clip_deck["low-frequency"]) if self.music_clip_deck["high-frequency"]>20000: music_clip_deck_slice = effects.low_pass_filter(music_clip_deck_slice, self.music_clip_deck["high-frequency"]) if(self.music_clip_deck["volume"]==0): self.db_volume = -200 else: self.db_volume = 20*math.log10(self.music_clip_deck["volume"]/100) music_clip_deck_slice = music_clip_deck_slice+self.db_volume else: self.music_clip_deck["chunk-number"] = 0 music_clip_deck_slice = AudioSegment.empty() elif self.deck_2["play-status"] == "stopped": self.music_clip_deck["chunk-number"] = 0 self.music_clip_deck["current-duration-milliseconds"] = 0 music_clip_deck_slice = AudioSegment.empty() #music_clip_volume_strength average_data_value = music_clip_deck_slice.max normalized_value = abs(average_data_value)/music_clip_deck_slice.max_possible_amplitude if normalized_value>1: normalized_value = 1 self.music_clip_deck_volume_signal.emit(normalized_value) #speackers deck if self.speackers_deck["play-status"]=="playing": microphone_data = self.stream.read(self.packet_size) microphone_slice = AudioSegment(microphone_data, sample_width=2, frame_rate=self.new_sample_rate, channels=2) #microphone_slice = AudioSegment.from_mono_audiosegments(microphone_slice, microphone_slice) if self.speackers_deck["pan"]!=0: microphone_slice = microphone_slice.pan(self.speackers_deck["pan"]) if self.speackers_deck["low-frequency"]>20: microphone_slice = effects.high_pass_filter(microphone_slice, self.speackers_deck["low-frequency"]) if self.speackers_deck["high-frequency"]>20000: microphone_slice = effects.low_pass_filter(microphone_slice, self.speackers_deck["high-frequency"]) if(self.speackers_deck["volume"]==0): self.db_volume = -200 else: self.db_volume = 20*math.log10(self.speackers_deck["volume"]/100) microphone_slice = microphone_slice+self.db_volume #normalize if(self.speackers_deck["normalize"]==True or self.speackers_deck["normalize"]==1): microphone_slice = self.normalize_method(microphone_slice,0.1) else: self.speackers_deck["chunk-number"] = 0 self.speackers_deck["current-duration-milliseconds"] = 0 microphone_slice = AudioSegment.empty() #microphone_volume_strength average_data_value = microphone_slice.max normalized_value = abs(average_data_value)/microphone_slice.max_possible_amplitude if normalized_value>1: normalized_value = 1 self.speackers_deck_volume_signal.emit(normalized_value) if(len(deck_1_slice)==0 and len(deck_2_slice)==0 and len(music_clip_deck_slice)==0 and len(microphone_slice)==0): final_slice = self.silent_segment else: if(len(deck_1_slice)==0 and len(deck_2_slice)==0 and len(music_clip_deck_slice)==0 and len(microphone_slice)!=0): final_slice = microphone_slice elif(len(deck_1_slice)==0 and len(deck_2_slice)==0 and len(music_clip_deck_slice)!=0 and len(microphone_slice)==0): final_slice = music_clip_deck_slice elif(len(deck_1_slice)==0 and len(deck_2_slice)!=0 and len(music_clip_deck_slice)==0 and len(microphone_slice)==0): final_slice = deck_2_slice elif(len(deck_1_slice)!=0 and len(deck_2_slice)==0 and len(music_clip_deck_slice)==0 and len(microphone_slice)==0): final_slice = deck_1_slice elif(len(deck_1_slice)!=0 and len(deck_2_slice)!=0 and len(music_clip_deck_slice)==0 and len(microphone_slice)==0): final_slice = deck_1_slice.overlay(deck_2_slice) elif(len(deck_1_slice)!=0 and len(deck_2_slice)==0 and len(music_clip_deck_slice)!=0 and len(microphone_slice)==0): final_slice = deck_1_slice.overlay(music_clip_deck_slice) elif(len(deck_1_slice)!=0 and len(deck_2_slice)==0 and len(music_clip_deck_slice)==0 and len(microphone_slice)!=0): final_slice = deck_1_slice.overlay(microphone_slice) elif(len(deck_1_slice)==0 and len(deck_2_slice)!=0 and len(music_clip_deck_slice)!=0 and len(microphone_slice)==0): final_slice = deck_2_slice.overlay(music_clip_deck_slice) elif(len(deck_1_slice)==0 and len(deck_2_slice)!=0 and len(music_clip_deck_slice)==0 and len(microphone_slice)!=0): final_slice = deck_2_slice.overlay(microphone_slice) elif(len(deck_1_slice)==0 and len(deck_2_slice)==0 and len(music_clip_deck_slice)!=0 and len(microphone_slice)!=0): final_slice = music_clip_deck_slice.overlay(microphone_slice) elif(len(deck_1_slice)!=0 and len(deck_2_slice)!=0 and len(music_clip_deck_slice)!=0 and len(microphone_slice)==0): final_slice = deck_1_slice.overlay(deck_2_slice).overlay(music_clip_deck_slice) elif(len(deck_1_slice)!=0 and len(deck_2_slice)!=0 and len(music_clip_deck_slice)==0 and len(microphone_slice)!=0): final_slice = deck_1_slice.overlay(deck_2_slice).overlay(microphone_slice) elif(len(deck_1_slice)==0 and len(deck_2_slice)!=0 and len(music_clip_deck_slice)!=0 and len(microphone_slice)!=0): final_slice = deck_2_slice.overlay(music_clip_deck_slice).overlay(microphone_slice) elif(len(deck_1_slice)!=0 and len(deck_2_slice)==0 and len(music_clip_deck_slice)!=0 and len(microphone_slice)!=0): final_slice = deck_1_slice.overlay(music_clip_deck_slice).overlay(microphone_slice) elif(len(deck_1_slice)!=0 and len(deck_2_slice)!=0 and len(music_clip_deck_slice)!=0 and len(microphone_slice)!=0): final_slice = deck_1_slice.overlay(deck_2_slice).overlay(music_clip_deck_slice).overlay(microphone_slice) #final slice settings if self.general_deck["pan"]!=0: final_slice = final_slice.pan(self.general_deck["pan"]) if self.general_deck["low-frequency"]>20: final_slice = effects.high_pass_filter(final_slice, self.general_deck["low-frequency"]) if self.general_deck["high-frequency"]>20000: final_slice = effects.low_pass_filter(final_slice, self.general_deck["high-frequency"]) if(self.general_deck["volume"]==0): self.db_volume = -200 else: self.db_volume = 20*math.log10(self.general_deck["volume"]/100) final_slice = final_slice+self.db_volume #normalize if(self.general_deck["normalize"]==True or self.general_deck["normalize"]==1): final_slice = self.normalize_method(final_slice,0.1) #final_slice_volume_strength average_data_value = final_slice.max normalized_value = abs(average_data_value)/final_slice.max_possible_amplitude self.general_deck_volume_signal.emit(normalized_value) if self.deck_1["play-status"]=="playing": self.deck_1["chunk-number"] +=1 self.deck_1["current-duration-milliseconds"] += self.packet_time self.deck_1_duration_signal.emit(self.deck_1["current-duration-milliseconds"]) if self.deck_2["play-status"]=="playing": self.deck_2["chunk-number"] +=1 self.deck_2["current-duration-milliseconds"] += self.packet_time self.deck_2_duration_signal.emit(self.deck_2["current-duration-milliseconds"]) if self.music_clip_deck["play-status"]=="playing": self.music_clip_deck["chunk-number"] +=1 self.music_clip_deck["current-duration-milliseconds"] += self.packet_time self.music_clip_deck_duration_signal.emit(self.music_clip_deck["current-duration-milliseconds"]) if self.speackers_deck["play-status"]=="playing": self.speackers_deck["chunk-number"] +=1 self.speackers_deck["current-duration-milliseconds"] += self.packet_time self.speackers_deck_duration_signal.emit(self.speackers_deck["current-duration-milliseconds"]) if self.record_status=="recording": try: self.output_file.writeframesraw(final_slice.raw_data) except: pass #self.stream.write(final_slice.raw_data) active_connection_found = False send_data = io.BytesIO() t1 = time.time() final_slice.export(send_data,format="mp3") send_data = send_data.getvalue() t2 = time.time() print(t2-t1) for connection in self.connections: if connection["status"]=="connected": connection["connection"].send(send_data) connection["connection"].sync() active_connection_found = True if active_connection_found==False: time.sleep(0.01)
我尝试将几秒钟录制为 wav 格式,录制质量很好。
但是当我连接到 Icecast 服务器时,来自应用程序或来自浏览器的声音被中断。 :/
编辑:如果我将 packet_time 更改为 744 毫秒。几乎一切都完美无缺。现在的问题是麦克风的声音。
提前致谢,
克里斯·帕帕斯