分享音频检测代码
分享音频检测代码:import sounddevice as sd
import numpy as np
import logging
# 配置日志
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
filename='detect_mute.log',
filemode='a')
# 配置参数
THRESHOLD_DB = -40# 静音阈值(分贝)
CHUNK_SIZE = 1024# 每次处理的音频块大小
SAMPLE_RATE = 44100# 采样率
CHANNELS = 1# 麦克风通道数
# 计算分贝的函数
def calculate_db(data):
rms = np.sqrt(np.mean(np.square(data.astype(float))))
return 20 * np.log10(rms) if rms > 0 else -np.inf
# 实时音频回调函数
def audio_callback(indata, frames, time, status):
db_level = calculate_db(indata)
is_silent = db_level < THRESHOLD_DB
log_message = f"当前音量: {db_level:.2f} dB | {'静音中...' if is_silent else '检测到声音'}"
print(log_message)
logging.info(log_message)
# 开始监听
try:
logging.info("开始检测静音(按Ctrl+C停止)...")
print("开始检测静音(按Ctrl+C停止)...")
with sd.InputStream(callback=audio_callback,
blocksize=CHUNK_SIZE,
samplerate=SAMPLE_RATE,
channels=CHANNELS):
while True:
sd.sleep(1000)# 保持程序运行
except KeyboardInterrupt:
logging.info("检测已停止")
print("\n检测已停止")
except Exception as e:
logging.error(f"发生错误: {str(e)}")
print(f"发生错误: {str(e)}")
OR
import sounddevice as sd
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from collections import deque
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.widgets import Slider, Button
# 配置参数
SAMPLE_RATE = 44100
CHUNK_SIZE = 1024
BUFFER_SECONDS = 2# 波形显示时间窗口
NOISE_THRESHOLD_DB = -40# 初始噪声阈值
FREQ = 440# 测试音频率
# 生成测试音信号
t = np.arange(CHUNK_SIZE) / SAMPLE_RATE
test_tone = 0.3 * np.sin(2 * np.pi * FREQ * t).astype('float32')
# 可视化参数
plt.rcParams['toolbar'] = 'None'# 隐藏工具栏
WAVE_COLOR = '#1f77b4'# 波形颜色
SPEC_COLOR = 'viridis'# 频谱图颜色
class AudioVisualizer:
def __init__(self):
# 初始化数据缓冲区
self.wave_buffer = deque(maxlen=int(SAMPLE_RATE * BUFFER_SECONDS / CHUNK_SIZE))
self.fft_buffer = deque(maxlen=50)# 用于3D频谱
self.vol_diff_history = deque(maxlen=50)
# 创建可视化画布
self.fig = plt.figure(figsize=(14, 10), facecolor='#f0f0f0')
self.fig.canvas.manager.set_window_title('Real-Time Audio Mixer Analyzer')
# 创建子图布局
self.ax_wave = plt.subplot2grid((3, 3), (0, 0), colspan=3)
self.ax_spec = plt.subplot2grid((3, 3), (1, 0))
self.ax_3d = self.fig.add_subplot(1, 3, 2, projection='3d')# 3D频谱图
self.ax_vol = plt.subplot2grid((3, 3), (1, 2))
self.ax_status = plt.subplot2grid((3, 3), (2, 0), colspan=3)
# 初始化波形图
self.wave_line, = self.ax_wave.plot([], [], lw=1, color=WAVE_COLOR)
self.ax_wave.set_title('Real-time Waveform')
self.ax_wave.set_ylim(-1, 1)
self.ax_wave.set_xlim(0, BUFFER_SECONDS)
# 初始化2D频谱图
self.spec_plot = self.ax_spec.imshow(np.zeros((64, CHUNK_SIZE // 2)),
cmap=SPEC_COLOR,
aspect='auto',
origin='lower',
extent=(0, SAMPLE_RATE / 2, 0, 64))
self.ax_spec.set_title('2D Spectrogram')
self.ax_spec.set_xlabel('Frequency (Hz)')
self.ax_spec.set_ylabel('Time Frame')
# 初始化3D频谱图
self.ax_3d.set_title('3D Spectrogram')
self.ax_3d.set_xlabel('Frequency (Hz)')
self.ax_3d.set_ylabel('Time Frame')
self.ax_3d.set_zlabel('Magnitude')
self.ax_3d.grid(True)
# 初始化音量差仪表
self.vol_bars = self.ax_vol.barh(['Mic', 'Tone'], , color=['#2ca02c', '#d62728'])
self.ax_vol.set_title('Volume Comparison')
self.ax_vol.set_xlim(0, 80)
# 状态显示区域
self.status_text = self.ax_status.text(0.5, 0.5, '',
ha='center', va='center', fontsize=14)
self.ax_status.axis('off')
# 添加交互式控制
self.add_interactive_controls()
# 调整布局
plt.tight_layout()
def add_interactive_controls(self):
""" 添加交互式控制组件 """
# 噪声阈值滑块
ax_thresh = plt.axes(, facecolor='lightgoldenrodyellow')
self.threshold_slider = Slider(ax_thresh, 'Noise Threshold (dB)', -60, 0, valinit=NOISE_THRESHOLD_DB)
# 重置按钮
reset_ax = plt.axes()
self.reset_button = Button(reset_ax, 'Reset', color='lightgoldenrodyellow')
# 绑定事件
self.threshold_slider.on_changed(self.update_threshold)
self.reset_button.on_clicked(self.reset_visualization)
def update_threshold(self, val):
""" 更新噪声阈值 """
global NOISE_THRESHOLD_DB
NOISE_THRESHOLD_DB = val
def reset_visualization(self, event):
""" 重置可视化 """
self.wave_buffer.clear()
self.fft_buffer.clear()
self.vol_diff_history.clear()
self.threshold_slider.reset()
def update_plots(self, frame):
""" 更新所有可视化元素 """
if self.wave_buffer:
# 更新波形图
wave_data = np.concatenate(self.wave_buffer)
time_axis = np.linspace(0, BUFFER_SECONDS, len(wave_data))
self.wave_line.set_data(time_axis, wave_data)
# 更新2D频谱图
spec_data = np.array(self.fft_buffer).T
if spec_data.size > 0:
self.spec_plot.set_data(spec_data)
self.spec_plot.set_clim(vmin=spec_data.min(), vmax=spec_data.max())
# 更新3D频谱图
self.ax_3d.clear()
if len(self.fft_buffer) > 1:
X, Y = np.meshgrid(np.linspace(0, SAMPLE_RATE / 2, CHUNK_SIZE // 2), np.arange(len(self.fft_buffer)))
Z = np.array(self.fft_buffer)
self.ax_3d.plot_surface(X, Y, Z, cmap='magma', rstride=1, cstride=1, alpha=0.8)
self.ax_3d.set_zlim(0, np.max(Z))
# 更新音量仪表
for bar, val in zip(self.vol_bars, ):
bar.set_width(val + 60)# 将dB转换为正数显示
self.ax_vol.set_xlim(0, max(analyzer.mic_db, analyzer.tone_db) + 65)
# 更新状态文本
status_str = (f"Noise: {'🚩 Detected' if analyzer.noise_state else '✅ Clean'}\n"
f"Mix: {'🔊 Active' if analyzer.mix_state else '🔇 Inactive'}\n"
f"Vol Diff: {analyzer.volume_diff:.1f} dB")
self.status_text.set_text(status_str)
return + list(self.vol_bars)
class AudioAnalyzer:
def __init__(self):
self.noise_state = False
self.mix_state = False
self.volume_diff = 0
self.mic_db = 0
self.tone_db = 0
def analyze(self, mic_data, mixed_data):
# 噪声检测
mic_rms = np.sqrt(np.mean(mic_data ** 2))
self.mic_db = 20 * np.log10(mic_rms) if mic_rms > 0 else -np.inf
self.noise_state = self.mic_db > NOISE_THRESHOLD_DB
# 混音检测
freqs, magnitudes = self.calculate_fft(mixed_data)
tone_peak = magnitudes
avg_magnitude = np.mean(magnitudes)
self.mix_state = tone_peak > 3 * avg_magnitude
# 音量差计算
tone_rms = np.sqrt(np.mean(test_tone ** 2))
self.tone_db = 20 * np.log10(tone_rms) if tone_rms > 0 else -np.inf
self.volume_diff = self.mic_db - self.tone_db
# 保存可视化数据
visualizer.wave_buffer.append(mixed_data)
visualizer.fft_buffer.append(magnitudes[:CHUNK_SIZE // 2])
visualizer.vol_diff_history.append(self.volume_diff)
def calculate_fft(self, data):
fft_data = np.fft.rfft(data)
freqs = np.fft.rfftfreq(len(data), 1 / SAMPLE_RATE)
return freqs, np.abs(fft_data)
def audio_callback(indata, outdata, frames, time, status):
global analyzer
# 混音处理
mixed = (indata[:, 0] * 0.7) + (test_tone * 0.3)
outdata[:] = np.repeat(mixed.reshape(-1, 1), 2, axis=1)
# 音频分析
analyzer.analyze(indata[:, 0], mixed)
if __name__ == "__main__":
# 初始化组件
analyzer = AudioAnalyzer()
visualizer = AudioVisualizer()
# 启动动画
ani = FuncAnimation(visualizer.fig,
visualizer.update_plots,
interval=50,
blit=True)
try:
print("Starting real-time audio analysis with visualization...")
with sd.Stream(samplerate=SAMPLE_RATE,
blocksize=CHUNK_SIZE,
channels=2,
callback=audio_callback):
plt.show()# 阻塞主线程显示窗口
except KeyboardInterrupt:
print("\nAnalysis stopped.") 感谢分享!点赞! 以下是两个音频检测代码示例:
示例1:检测静音状态
import sounddevice as sd
import numpy as np
import logging
# 配置日志
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
filename='detect_mute.log',
filemode='a')
# 配置参数
THRESHOLD_DB = -40# 静音阈值(分贝)
CHUNK_SIZE = 1024# 每次处理的音频块大小
SAMPLE_RATE = 44100# 采样率
CHANNELS = 1# 麦克风通道数
# 计算分贝的函数
def calculate_db(data):
rms = np.sqrt(np.mean(np.square(data.astype(float))))
return 20 * np.log10(rms) if rms > 0 else -np.inf
# 实时音频回调函数
def audio_callback(indata, frames, time, status):
db_level = calculate_db(indata)
is_silent = db_level < THRESHOLD_DB
log_message = f"当前音量: {db_level:.2f} dB | {'静音中...' if is_silent else '检测到声音'}"
print(log_message)
logging.info(log_message)
# 开始监听
try:
logging.info("开始检测静音(按Ctrl+C停止)...")
print("开始检测静音(按Ctrl+C停止)...")
with sd.InputStream(callback=audio_callback,
blocksize=CHUNK_SIZE,
samplerate=SAMPLE_RATE,
channels=CHANNELS):
while True:
sd.sleep(1000)# 保持程序运行
except KeyboardInterrupt:
logging.info("检测已停止")
print("\n检测已停止")
except Exception as e:
logging.error(f"发生错误: {str(e)}")
print(f"发生错误: {str(e)}")
示例2:实时音频分析和可视化
import sounddevice as sd
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from collections import deque
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.widgets import Slider, Button
# 配置参数
SAMPLE_RATE = 44100
CHUNK_SIZE = 1024
BUFFER_SECONDS = 2# 波形显示时间窗口
NOISE_THRESHOLD_DB = -40# 初始噪声阈值
FREQ = 440# 测试音频率
# 生成测试音信号
t = np.arange(CHUNK_SIZE) / SAMPLE_RATE
test_tone = 0.3 * np.sin(2 * np.pi * FREQ * t).astype('float32')
# 可视化参数
plt.rcParams['toolbar'] = 'None'# 隐藏工具栏
WAVE_COLOR = '#1f77b4'# 波形颜色
SPEC_COLOR = 'viridis'# 频谱图颜色
class AudioVisualizer:
def __init__(self):
# 初始化数据缓冲区
self.wave_buffer = deque(maxlen=int(SAMPLE_RATE * BUFFER_SECONDS / CHUNK_SIZE))
self.fft_buffer = deque(maxlen=50)# 用于3D频谱
self.vol_diff_history = deque(maxlen=50)
# 创建可视化画布
self.fig = plt.figure(figsize=(14, 10), facecolor='#f0f0f0')
self.fig.canvas.manager.set_window_title('Real-Time Audio Mixer Analyzer')
# 创建子图布局
self.ax_wave = plt.subplot2grid((3, 3), (0, 0), colspan=3)
self.ax_spec = plt.subplot2grid((3, 3), (1, 0))
self.ax_3d = self.fig.add_subplot(1, 3, 2, projection='3d')# 3D频谱图
self.ax_vol = plt.subplot2grid((3, 3), (1, 2))
self.ax_status = plt.subplot2grid((3, 3), (2, 0), colspan=3)
# 初始化波形图
self.wave_line, = self.ax_wave.plot([], [], lw=1, color=WAVE_COLOR)
self.ax_wave.set_title('Real-time Waveform')
self.ax_wave.set_ylim(-1, 1)
self.ax_wave.set_xlim(0, BUFFER_SECONDS)
# 初始化2D频谱图
self.spec_plot = self.ax_spec.imshow(np.zeros((64, CHUNK_SIZE // 2)),
cmap=SPEC_COLOR,
aspect='auto',
origin='lower',
extent=(0, SAMPLE_RATE / 2, 0, 64))
self.ax_spec.set_title('2D Spectrogram')
self.ax_spec.set_xlabel('Frequency (Hz)')
self.ax_spec.set_ylabel('Time Frame')
# 初始化3D频谱图
self.ax_3d.set_title('3D Spectrogram')
self.ax_3d.set_xlabel('Frequency (Hz)')
self.ax_3d.set_ylabel('Time Frame')
self.ax_3d.set_zlabel('Magnitude')
self.ax_3d.grid(True)
# 初始化音量差仪表
self.vol_bars = self.ax_vol.barh([], [], color=[])
self.ax_vol.set_title('Volume Comparison')
self.ax_vol.set_xlim(0, 80)
# 状态显示区域
self.status_text = self.ax_status.text(0.5, 0.5, '',
ha='center', va='center', fontsize=14)
self.ax_status.axis('off')
# 添加交互式控制
self.add_interactive_controls()
# 调整布局
plt.tight_layout()
def add_interactive_controls(self):
""" 添加交互式控制组件 """
# 噪声阈值滑块
ax_thresh = plt.axes(, facecolor='lightgoldenrodyellow')
self.threshold_slider = Slider(ax_thresh, 'Noise Threshold (dB)', -60, 0, valinit=NOISE_THRESHOLD_DB)
# 重置按钮
reset_ax = plt.axes()
self.reset_button = Button(reset_ax, 'Reset', color='lightgoldenrodyellow')
# 绑定事件
self.threshold_slider.on_changed(self.update_threshold)
self.reset_button.on_clicked(self.reset_visualization)
def update_threshold(self, val):
""" 更新噪声阈值 """
global NOISE_THRESHOLD_DB
NOISE_THRESHOLD_DB = val
def reset_visualization(self, event):
""" 重置可视化 """
self.wave_buffer.clear()
self.fft_buffer.clear()
self.vol_diff_history.clear()
self.threshold_slider.reset()
def update_plots(self, frame):
""" 更新所有可视化元素 """
if self.wave_buffer:
# 更新波形图
wave_data = np.concatenate(self.wave_buffer)
time_axis = np.linspace(0, BUFFER_SECONDS, len(wave_data))
self.wave_line.set_data(time_axis, wave_data)
# 更新2D频谱图
spec_data = np.array(self.fft_buffer).T
if spec_data.size > 0:
self.spec_plot.set_data(spec_data)
self.spec_plot.set_clim(vmin=spec_data.min(), vmax=spec_data.max())
# 更新3D频谱图
self.ax_3d.clear()
if len(self.fft_buffer) > 1:
X, Y = np.meshgrid(np.linspace(0, SAMPLE_RATE / 2, CHUNK_SIZE // 2), np.arange(len(self.fft_buffer)))
Z = np.array(self.fft_buffer)
self.ax_3d.plot_surface(X, Y, Z, cmap='magma', rstride=1, cstride=1, alpha=0.8)
self.ax_3d.set_zlim(0, np.max(Z))
# 更新音量仪表
for bar, val in zip(self.vol_bars, []):
bar.set_width(val + 60)# 将dB转换为正数显示
self.ax_vol.set_xlim(0, max(analyzer.mic_db, analyzer.tone_db) + 65)
# 更新状态文本
status_str = (f"Noise: {'🚩 Detected' if analyzer.noise_state else '✅ Clean'}\n"
f"Mix: {'🔊 Active' if analyzer.mix_state else '🔇 Inactive'}\n"
f"Vol Diff: {analyzer.volume_diff:.1f} dB")
self.status_text.set_text(status_str)
return + list(self.vol_bars)
class AudioAnalyzer:
def __init__(self):
self.noise_state = False
self.mix_state = False
self.volume_diff = 0
self.mic_db = 0
self.tone_db = 0
def analyze(self, mic_data, mixed_data):
# 噪声检测
mic_rms = np.sqrt(np.mean(mic_data ** 2))
self.mic_db = 20 * np.log10(mic_rms) if mic_rms > 0 else -np.inf
self.noise_state = self.mic_db > NOISE_THRESHOLD_DB
# 混音检测
freqs, magnitudes = self.calculate_fft(mixed_data)
tone_peak = magnitudes
avg_magnitude = np.mean(magnitudes)
self.mix_state = tone_peak > 3 * avg_magnitude
# 音量差计算
tone_rms = np.sqrt(np.mean(test_tone ** 2))
self.tone_db = 20 * np.log10(tone_rms) if tone_rms > 0 else -np.inf
self.volume_diff = self.mic_db - self.tone_db
# 保存可视化数据
visualizer.wave_buffer.append(mixed_data)
visualizer.fft_buffer.append(magnitudes)
visualizer.vol_diff_history.append(self.volume_diff)
def calculate_fft(self, data):
fft_data = np.fft.rfft(data)
freqs = np.fft.rfftfreq(len(data), 1 / SAMPLE_RATE)
return freqs, np.abs(fft_data)
def audio_callback(indata, outdata, frames, time, status):
global analyzer
# 混音处理
mixed = (indata * 0.7) + (test_tone * 0.3)
outdata[:] = np.repeat(mixed.reshape([-1, 1]), 2, axis=1)
# 音频分析
analyzer.analyze(indata, mixed)
if __name__ == "__main__":
# 初始化组件
analyzer = AudioAnalyzer()
visualizer = AudioVisualizer()
# 启动动画
ani = FuncAnimation(visualizer.fig,
visualizer.update_plots,
interval=50,
blit=True)
try:
print("Starting real-time audio analysis with visualization...")
with sd.Stream(samplerate=SAMPLE_RATE,
blocksize=CHUNK_SIZE,
channels=2,
callback=audio_callback):
plt.show()# 阻塞主线程显示窗口
except KeyboardInterrupt:
print("\nAnalysis stopped.")
这两个示例分别展示了如何进行音频静音检测和音频实时分析与可视化。如果有任何问题,请随时问我!
以上回复来自 FishC AI 小助理,如未能正确解答您的问题,请继续追问。
页:
[1]